repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
fredwilliam/PMO | apps/hq/middleware/hq.py | 3 | 2738 | from __future__ import absolute_import
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from hq.authentication import get_username_password
from hq.utils import get_dates
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
# this keeps a thread-local cache of stuff. we're gonna stick some HQ
# stuff inside so that we have access to the user and domain from things
# that don't have a handle to the request object
_thread_locals = local()
def get_current_user():
"""Get the current (thread-specific) user"""
return getattr(_thread_locals, 'user', None)
def get_current_domain():
"""Get the current (thread-specific) user"""
return getattr(_thread_locals, 'domain', None)
class HqMiddleware(object):
'''Middleware for CommCare HQ. Right now the only thing this does is
set some stuff in the thread locals (user and domain) if they exist
as well as do some custom authentication for unsalted passwords and
set convenience accessors for passed in dates in urlparams.'''
def process_request(self, request):
_thread_locals.user = getattr(request, 'user', None)
if request.user and not request.user.is_anonymous():
self._set_local_vars(request, request.user)
else:
# attempt our custom authentication only if regular auth fails
# (and request.user == anonymousUser
username, password = get_username_password(request)
if username and password:
user = authenticate(username=username, password=password)
if user is not None:
request.user = user
self._set_local_vars(request, user)
# do the same for start and end dates. at some point our views
# can just start accessing these properties on the request assuming
# our middleware is running
try:
startdate, enddate = utils.get_dates(request)
request.startdate = startdate
request.enddate = enddate
except Exception:
request.startdate = None
request.enddate = None
return None
def _set_local_vars(self, request, user):
"""Sets the User and Domain objects in the threadlocals, if
they exist"""
try:
# set the domain in the thread locals
# so it can be accessed in places other
# than views.
_thread_locals.domain = request.user.selected_domain
except Exception:
# likely means that there's no selected user or
# domain, just let it go.
pass
| bsd-3-clause | -5,733,568,763,307,876,000 | 38.695652 | 75 | 0.643901 | false |
sebalix/OpenUpgrade | openerp/tools/float_utils.py | 312 | 10296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
HALF-UP (away from zero).
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used: 'HALF-UP' or 'UP', the first
one rounding up to the closest number with the rule that number>=0.5 is
rounded up to 1, and the latest one always rounding up.
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
if rounding_method == 'HALF-UP':
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
# TIE-BREAKING: UP (for ceiling operations)
# When rounding the value up, we instead subtract the epsilon value
# as the the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up to the next number
# The math.ceil operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
elif rounding_method == 'UP':
sign = cmp(normalized_value, 0)
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value))*sign # ceil to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 | -2,727,074,082,039,679,000 | 49.22439 | 98 | 0.650447 | false |
imk1/IMKTFBindingCode | getCellTypeSpecificDomainBoundaries.py | 1 | 2787 | import sys
import argparse
import gzip
import math
def parseArgument():
# Parse the input
parser =\
argparse.ArgumentParser(description = "Get domain boundaries that are present in the 1st cell type but not in the 2nd")
parser.add_argument("--cellTypeOneDomainsFileName", required=True,
help='Domain boundaries from 1st cell type, contains header, gzipped, chromosomes do not start with chr')
parser.add_argument("--cellTypeTwoDomainsFileName", required=True,
help='Domain boundaries from 2nd cell type, contains header, gzipped, chromosomes do not start with chr')
parser.add_argument("--outputFileName", required=True, help='Name of file where cell-type-specific domains will be recorded')
options = parser.parse_args();
return options
def getTopCorner(coordinateLine):
# Get the top corner of a domain
coordinateLineElements = coordinateLine.split("\t")
domainTopCorner = (coordinateLineElements[0], int(coordinateLineElements[2]), int(coordinateLineElements[5]))
return domainTopCorner
def getDomainPreserved(topCorner, topCornerTwo):
# Determine whether two domains are close enough
if topCorner[0] != topCornerTwo[0]:
# The domains are not close enough
return False
else:
domainDistance = math.sqrt(math.pow((topCorner[1] - topCornerTwo[1]), 2) + math.pow((topCorner[2] - topCornerTwo[2]), 2))
distanceBound = 50000
if distanceBound > 0.2 * abs(topCorner[1] - topCorner[2]):
# Make the upper bound on the distance 1/5 of the domain size
distanceBound = 0.2 * abs(topCorner[1] - topCorner[2])
if domainDistance <= distanceBound:
# The domain is preserved
return True
return False
def getCellTypeSpecificDomainBoundaries(options):
# Get domain boundaries that are present in the 1st cell type but not in the 2nd
cellTypeOneDomainsFile = gzip.open(options.cellTypeOneDomainsFileName)
cellTypeOneDomainsFile.readline() # Remove the header
cellTypeTwoDomainsFile = gzip.open(options.cellTypeTwoDomainsFileName)
cellTypeTwoDomainsFile.readline() # Remove the header
cellTypeTwoDomainTopCorners = [getTopCorner(line.strip()) for line in cellTypeTwoDomainsFile.readlines()]
cellTypeTwoDomainsFile.close()
outputFile = open(options.outputFileName, 'w+')
for line in cellTypeOneDomainsFile:
# Iterate through the domains of the 1st cell type and record the domains that are not in the 2nd cell type
topCorner = getTopCorner(line.strip())
domainPreservedList = [getDomainPreserved(topCorner, topCornerTwo) for topCornerTwo in cellTypeTwoDomainTopCorners]
if True not in domainPreservedList:
# The domain is cell-type-specific, so record it
outputFile.write(line)
cellTypeOneDomainsFile.close()
outputFile.close()
if __name__=="__main__":
options = parseArgument()
getCellTypeSpecificDomainBoundaries(options)
| mit | 8,918,017,699,157,882,000 | 44.688525 | 126 | 0.775386 | false |
guorendong/iridium-browser-ubuntu | native_client/pnacl/driver/shelltools.py | 8 | 2070 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from driver_log import Log
import types
######################################################################
#
# Shell Utilities
#
######################################################################
class shell(object):
@staticmethod
def unescape(s):
w = shell.split(s)
if len(w) == 0:
return ''
if len(w) == 1:
return w[0]
# String was not properly escaped in the first place?
assert(False)
# TODO(pdox): Simplify this function by moving more of it into unescape
@staticmethod
def split(s):
"""Split a shell-style string up into a list of distinct arguments.
For example: split('cmd -arg1 -arg2="a b c"')
Returns ['cmd', '-arg1', '-arg2=a b c']
"""
assert(isinstance(s, types.StringTypes))
out = []
inspace = True
inquote = False
buf = ''
i = 0
while i < len(s):
if s[i] == '"':
inspace = False
inquote = not inquote
elif s[i] == ' ' and not inquote:
if not inspace:
out.append(buf)
buf = ''
inspace = True
elif s[i] == '\\':
if not i+1 < len(s):
Log.Fatal('Unterminated \\ escape sequence')
inspace = False
i += 1
buf += s[i]
else:
inspace = False
buf += s[i]
i += 1
if inquote:
Log.Fatal('Unterminated quote')
if not inspace:
out.append(buf)
return out
@staticmethod
def join(args):
"""Turn a list into a shell-style string For example:
shell.join([ 'a', 'b', 'c d e' ]) = 'a b "c d e"'
"""
return ' '.join([ shell.escape(a) for a in args ])
@staticmethod
def escape(s):
"""Shell-escape special characters in a string
Surround with quotes if necessary
"""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
if ' ' in s:
s = '"' + s + '"'
return s
| bsd-3-clause | 1,363,600,135,209,868,800 | 23.939759 | 73 | 0.511594 | false |
Drooids/odoo | addons/account_asset/wizard/account_asset_change_duration.py | 258 | 5021 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class asset_modify(osv.osv_memory):
_name = 'asset.modify'
_description = 'Modify Asset'
def _get_asset_method_time(self, cr, uid, ids, field_name, arg, context=None):
if ids and len(ids) == 1 and context.get('active_id'):
asset = self.pool['account.asset.asset'].browse(cr, uid, context.get('active_id'), context=context)
return {ids[0]: asset.method_time}
else:
return dict.fromkeys(ids, False)
_columns = {
'name': fields.char('Reason', required=True),
'method_number': fields.integer('Number of Depreciations', required=True),
'method_period': fields.integer('Period Length'),
'method_end': fields.date('Ending date'),
'note': fields.text('Notes'),
'asset_method_time': fields.function(_get_asset_method_time, type='char', string='Asset Method Time', readonly=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
res = super(asset_modify, self).default_get(cr, uid, fields, context=context)
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
if context.get('active_id'):
res['asset_method_time'] = self._get_asset_method_time(cr, uid, [0], 'asset_method_time', [], context=context)[0]
return res
def modify(self, cr, uid, ids, context=None):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of Ids
@param context: A standard dictionary
@return: Close the wizard.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
history_obj = self.pool.get('account.asset.history')
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
data = self.browse(cr, uid, ids[0], context=context)
history_vals = {
'asset_id': asset_id,
'name': data.name,
'method_time': asset.method_time,
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
'user_id': uid,
'date': time.strftime('%Y-%m-%d'),
'note': data.note,
}
history_obj.create(cr, uid, history_vals, context=context)
asset_vals = {
'method_number': data.method_number,
'method_period': data.method_period,
'method_end': data.method_end,
}
asset_obj.write(cr, uid, [asset_id], asset_vals, context=context)
asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,193,045,290,903,390,700 | 43.830357 | 125 | 0.597889 | false |
if1live/pelican-plugins | extract_toc/extract_toc.py | 28 | 1805 | # -*- coding: utf-8 -*-
"""
Extract Table of Content
========================
A Pelican plugin to extract table of contents (ToC) from `article.content` and
place it in its own `article.toc` variable for use in templates.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
try:
from pandoc_reader import PandocReader
except ImportError:
PandocReader = False
def extract_toc(content):
if isinstance(content, contents.Static):
return
soup = BeautifulSoup(content._content,'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = None
# default Markdown reader
if not toc and readers.MarkdownReader.enabled and extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
if toc: toc.extract()
# default reStructuredText reader
if not toc and readers.RstReader.enabled and extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc: toc.extract()
if toc:
tag=BeautifulSoup(str(toc), 'html.parser')
tag.div['class']='toc'
tag.div['id']=''
p=tag.find('p', class_='topic-title first')
if p:p.extract()
toc=tag
# Pandoc reader (markdown and other formats)
if not toc and PandocReader and PandocReader.enabled and extension in PandocReader.file_extensions:
toc = soup.find('nav', id='TOC')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
if content.toc.startswith('<html>'):
content.toc = content.toc[12:-14]
def register():
signals.content_object_init.connect(extract_toc)
| agpl-3.0 | 280,944,805,733,843,230 | 29.59322 | 106 | 0.643767 | false |
hujiajie/pa-chromium | chrome/test/functional/omnibox.py | 65 | 15428 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import shutil
import tempfile
import urlparse
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class OmniboxTest(pyauto.PyUITest):
"""Test cases for the omnibox."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import time
while True:
self.pprint(self.GetOmniboxInfo().omniboxdict)
time.sleep(1)
def testFocusOnStartup(self):
"""Verify that the omnibox has focus on startup."""
self.WaitUntilOmniboxReadyHack()
self.assertTrue(self.GetOmniboxInfo().Properties('has_focus'))
def testHistoryResult(self):
"""Verify that the omnibox can fetch items from the history."""
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
self.AppendTab(pyauto.GURL(url))
def _VerifyHistoryResult(query_list, description, windex=0):
"""Verify result matching given description for given list of queries."""
for query_text in query_list:
matches = test_utils.GetOmniboxMatchesFor(
self, query_text, windex=windex,
attr_dict={'description': description})
self.assertTrue(matches)
self.assertEqual(1, len(matches))
item = matches[0]
self.assertEqual(url, item['destination_url'])
# Query using URL & title.
_VerifyHistoryResult([url, title], title)
# Verify results in another tab.
self.AppendTab(pyauto.GURL())
_VerifyHistoryResult([url, title], title)
# Verify results in another window.
self.OpenNewBrowserWindow(True)
self.WaitUntilOmniboxReadyHack(windex=1)
_VerifyHistoryResult([url, title], title, windex=1)
# Verify results in an incognito window.
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.WaitUntilOmniboxReadyHack(windex=2)
_VerifyHistoryResult([url, title], title, windex=2)
def _VerifyOmniboxURLMatches(self, url, description, windex=0):
"""Verify URL match results from the omnibox.
Args:
url: The URL to use.
description: The string description within the history page and Google
search to match against.
windex: The window index to work on. Defaults to 0 (first window).
"""
matches_description = test_utils.GetOmniboxMatchesFor(
self, url, windex=windex, attr_dict={'description': description})
self.assertEqual(1, len(matches_description))
if description == 'Google Search':
self.assertTrue(re.match('http://www.google.com/search.+',
matches_description[0]['destination_url']))
else:
self.assertEqual(url, matches_description[0]['destination_url'])
def testFetchHistoryResultItems(self):
"""Verify omnibox fetches history items in 2nd tab, window and incognito."""
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
desc = 'Google Search'
# Fetch history page item in the second tab.
self.AppendTab(pyauto.GURL(url))
self._VerifyOmniboxURLMatches(url, title)
# Fetch history page items in the second window.
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url, 1, 0)
self._VerifyOmniboxURLMatches(url, title, windex=1)
# Fetch google search items in incognito window.
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 2, 0)
self._VerifyOmniboxURLMatches(url, desc, windex=2)
def testSelect(self):
"""Verify omnibox popup selection."""
url1 = self.GetFileURLForDataPath('title2.html')
url2 = self.GetFileURLForDataPath('title1.html')
title1 = 'Title Of Awesomeness'
self.NavigateToURL(url1)
self.NavigateToURL(url2)
matches = test_utils.GetOmniboxMatchesFor(self, 'file://')
self.assertTrue(matches)
# Find the index of match for |url1|.
index = None
for i, match in enumerate(matches):
if match['description'] == title1:
index = i
self.assertTrue(index is not None)
self.OmniboxMovePopupSelection(index) # Select |url1| line in popup.
self.assertEqual(url1, self.GetOmniboxInfo().Text())
self.OmniboxAcceptInput()
self.assertEqual(title1, self.GetActiveTabTitle())
def testInlineAutoComplete(self):
"""Verify inline autocomplete for a pre-visited URL."""
self.NavigateToURL('http://www.google.com')
matches = test_utils.GetOmniboxMatchesFor(self, 'goog')
self.assertTrue(matches)
# Omnibox should suggest auto completed URL as the first item.
matches_description = matches[0]
self.assertTrue('www.google.com' in matches_description['contents'])
self.assertEqual('history-url', matches_description['type'])
# The URL should be inline-autocompleted in the omnibox.
self.assertTrue('google.com' in self.GetOmniboxInfo().Text())
def testCrazyFilenames(self):
"""Test omnibox query with filenames containing special chars.
The files are created on the fly and cleaned after use.
"""
filename = os.path.join(self.DataDir(), 'downloads', 'crazy_filenames.txt')
zip_names = self.EvalDataFrom(filename)
# We got .zip filenames. Change them to .html.
crazy_filenames = [x.replace('.zip', '.html') for x in zip_names]
title = 'given title'
def _CreateFile(name):
"""Create the given html file."""
fp = open(name, 'w') # |name| could be unicode.
print >>fp, '<html><title>%s</title><body>' % title
print >>fp, 'This is a junk file named <h2>%s</h2>' % repr(name)
print >>fp, '</body></html>'
fp.close()
crazy_fileurls = []
# Temp dir for hosting crazy filenames.
temp_dir = tempfile.mkdtemp(prefix='omnibox')
# Windows has a dual nature dealing with unicode filenames.
# While the files are internally saved as unicode, there's a non-unicode
# aware API that returns a locale-dependent coding on the true unicode
# filenames. This messes up things.
# Filesystem-interfacing functions like os.listdir() need to
# be given unicode strings to "do the right thing" on win.
# Ref: http://boodebr.org/main/python/all-about-python-and-unicode
try:
for filename in crazy_filenames: # |filename| is unicode.
file_path = os.path.join(temp_dir, filename.encode('utf-8'))
_CreateFile(os.path.join(temp_dir, filename))
file_url = self.GetFileURLForPath(file_path)
crazy_fileurls.append(file_url)
self.NavigateToURL(file_url)
# Verify omnibox queries.
for file_url in crazy_fileurls:
matches = test_utils.GetOmniboxMatchesFor(self,
file_url, attr_dict={'type': 'url-what-you-typed',
'description': title})
self.assertTrue(matches)
self.assertEqual(1, len(matches))
self.assertTrue(os.path.basename(file_url) in
matches[0]['destination_url'])
finally:
shutil.rmtree(unicode(temp_dir)) # Unicode so that Win treats nicely.
def testSuggest(self):
"""Verify suggested results in omnibox."""
matches = test_utils.GetOmniboxMatchesFor(self, 'apple')
self.assertTrue(matches)
self.assertTrue([x for x in matches if x['type'] == 'search-suggest'])
def testDifferentTypesOfResults(self):
"""Verify different types of results from omnibox.
This includes history result, bookmark result, suggest results.
"""
url = 'http://www.google.com/'
title = 'Google'
search_string = 'google'
self.AddBookmarkURL( # Add a bookmark.
self.GetBookmarkModel().BookmarkBar()['id'], 0, title, url)
self.NavigateToURL(url) # Build up history.
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
# Verify starred result (indicating bookmarked url).
self.assertTrue([x for x in matches if x['starred'] == True])
for item_type in ('history-url', 'search-what-you-typed',
'search-suggest',):
self.assertTrue([x for x in matches if x['type'] == item_type])
def testSuggestPref(self):
"""Verify no suggests for omnibox when suggested-services disabled."""
search_string = 'apple'
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kSearchSuggestEnabled))
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
self.assertTrue([x for x in matches if x['type'] == 'search-suggest'])
# Disable suggest-service.
self.SetPrefs(pyauto.kSearchSuggestEnabled, False)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kSearchSuggestEnabled))
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(matches)
# Verify there are no suggest results.
self.assertFalse([x for x in matches if x['type'] == 'search-suggest'])
def testAutoCompleteForSearch(self):
"""Verify omnibox autocomplete for search."""
search_string = 'youtu'
verify_string = 'youtube'
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
# Retrieve last contents element.
matches_description = matches[-1]['contents'].split()
self.assertEqual(verify_string, matches_description[0])
def _GotContentHistory(self, search_text, url):
"""Check if omnibox returns a previously-visited page for given search text.
Args:
search_text: The string search text.
url: The string URL to look for in the omnibox matches.
Returns:
True, if the omnibox returns the previously-visited page for the given
search text, or False otherwise.
"""
# Omnibox doesn't change results if searching the same text repeatedly.
# So setting '' in omnibox before the next repeated search.
self.SetOmniboxText('')
matches = test_utils.GetOmniboxMatchesFor(self, search_text)
matches_description = [x for x in matches if x['destination_url'] == url]
return 1 == len(matches_description)
def testContentHistory(self):
"""Verify omnibox results when entering page content.
Test verifies that visited page shows up in omnibox on entering page
content.
"""
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'find_in_page', 'largepage.html'))
self.NavigateToURL(url)
self.assertTrue(self.WaitUntil(
lambda: self._GotContentHistory('British throne', url)))
def testOmniboxSearchHistory(self):
"""Verify page navigation/search from omnibox are added to the history."""
url = self.GetFileURLForDataPath('title2.html')
self.NavigateToURL(url)
self.AppendTab(pyauto.GURL('about:blank'))
self.SetOmniboxText('java')
self.WaitUntilOmniboxQueryDone()
self.OmniboxAcceptInput()
history = self.GetHistoryInfo().History()
self.assertEqual(2, len(history))
self.assertEqual(url, history[1]['url'])
self.assertEqual('java - Google Search', history[0]['title'])
def _VerifyHasBookmarkResult(self, matches):
"""Verify that we have a bookmark result.
Args:
matches: A list of match items, as returned by
test_utils.GetOmniboxMatchesFor().
"""
matches_starred = [result for result in matches if result['starred']]
self.assertTrue(matches_starred)
self.assertEqual(1, len(matches_starred))
def _CheckBookmarkResultForVariousInputs(self, url, title, windex=0):
"""Check if we get the bookmark for complete and partial inputs.
Args:
url: A string URL.
title: A string title for the given URL.
windex: The window index to use. Defaults to 0 (first window).
"""
# Check if the complete URL would get the bookmark.
url_matches = test_utils.GetOmniboxMatchesFor(self, url, windex=windex)
self._VerifyHasBookmarkResult(url_matches)
# Check if the complete title would get the bookmark.
title_matches = test_utils.GetOmniboxMatchesFor(self, title, windex=windex)
self._VerifyHasBookmarkResult(title_matches)
# Check if the partial URL would get the bookmark.
split_url = urlparse.urlsplit(url)
partial_url = test_utils.GetOmniboxMatchesFor(
self, split_url.scheme, windex=windex)
self._VerifyHasBookmarkResult(partial_url)
# Check if the partial title would get the bookmark.
split_title = title.split()
search_term = split_title[len(split_title) - 1]
partial_title = test_utils.GetOmniboxMatchesFor(
self, search_term, windex=windex)
self._VerifyHasBookmarkResult(partial_title)
def testBookmarkResultInNewTabAndWindow(self):
"""Verify omnibox finds bookmarks in search options of new tabs/windows."""
url = self.GetFileURLForDataPath('title2.html')
self.NavigateToURL(url)
title = 'This is Awesomeness'
bookmarks = self.GetBookmarkModel()
bar_id = bookmarks.BookmarkBar()['id']
self.AddBookmarkURL(bar_id, 0, title, url)
bookmarks = self.GetBookmarkModel()
nodes = bookmarks.FindByTitle(title)
self.AppendTab(pyauto.GURL(url))
self._CheckBookmarkResultForVariousInputs(url, title)
self.OpenNewBrowserWindow(True)
self.assertEqual(2, self.GetBrowserWindowCount())
self.NavigateToURL(url, 1, 0)
self._CheckBookmarkResultForVariousInputs(url, title, windex=1)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.assertEqual(3, self.GetBrowserWindowCount())
self.NavigateToURL(url, 2, 0)
self._CheckBookmarkResultForVariousInputs(url, title, windex=2)
def testAutoCompleteForNonAsciiSearch(self):
"""Verify can search/autocomplete with non-ASCII incomplete keywords."""
search_string = u'\u767e'
verify_string = u'\u767e\u5ea6\u4e00\u4e0b'
matches = test_utils.GetOmniboxMatchesFor(self, search_string)
self.assertTrue(verify_string in matches[-1]['contents'])
class OmniboxLiveTest(pyauto.PyUITest):
"""Test cases for the omnibox that hit live servers (such as Google)."""
def ExtraChromeFlags(self):
"""Override default list of extra flags used in pyauto tests."""
# Force the suggest field trial group. This doesn't guarantee that there
# will be no experimental behaviour, but there's no other way to disable
# all suggest field trials at the moment. TODO(mpearson): Consider allowing
# the suggest_url to be overridden using a flag (so that we can omit the
# "sugexp=chrome,mod=<n>" CGI param), or provide some other way to turn off
# all suggest field trials.
return ['--force-fieldtrials=OmniboxSearchSuggest/10/']
def testGoogleSearch(self):
"""Verify Google search item in omnibox results."""
search_text = 'hello world'
verify_str = 'Google Search'
url_re = 'http://www.google.com/search\?.*q=hello\+world.*'
matches_description = test_utils.GetOmniboxMatchesFor(
self, search_text, attr_dict={'description': verify_str})
self.assertTrue(matches_description)
# There should be a least one entry with the description Google. Suggest
# results may end up having 'Google Search' in them, so use >=.
self.assertTrue(len(matches_description) >= 1)
item = matches_description[0]
self.assertTrue(re.search(url_re, item['destination_url']))
self.assertEqual('search-what-you-typed', item['type'])
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause | 1,675,963,879,764,035,300 | 40.810298 | 80 | 0.694581 | false |
aaiyer/bugseverywhere | libbe/command/list.py | 5 | 10809 | # Copyright (C) 2005-2012 Aaron Bentley <[email protected]>
# Chris Ball <[email protected]>
# Gianluca Montecchi <[email protected]>
# Oleg Romanyshyn <[email protected]>
# Robert Lehmann <[email protected]>
# W. Trevor King <[email protected]>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import itertools
import os
import re
import libbe
import libbe.bug
import libbe.command
import libbe.command.depend
from libbe.command.depend import Filter, parse_status, parse_severity
import libbe.command.tag
import libbe.command.target
import libbe.command.util
# get a list of * for cmp_*() comparing two bugs.
AVAILABLE_CMPS = [fn[4:] for fn in dir(libbe.bug) if fn[:4] == 'cmp_']
AVAILABLE_CMPS.remove('attr') # a cmp_* template.
class List (libbe.command.Command):
"""List bugs
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = List(ui=ui)
>>> ret = ui.run(cmd)
abc/a:om: Bug A
>>> ret = ui.run(cmd, {'status':'closed'})
abc/b:cm: Bug B
>>> ret = ui.run(cmd, {'status':'all', 'sort':'time'})
abc/a:om: Bug A
abc/b:cm: Bug B
>>> bd.storage.writeable
True
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'list'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.options.extend([
libbe.command.Option(name='status',
help='Only show bugs matching the STATUS specifier',
arg=libbe.command.Argument(
name='status', metavar='STATUS', default='active',
completion_callback=libbe.command.util.complete_status)),
libbe.command.Option(name='severity',
help='Only show bugs matching the SEVERITY specifier',
arg=libbe.command.Argument(
name='severity', metavar='SEVERITY', default='all',
completion_callback=libbe.command.util.complete_severity)),
libbe.command.Option(name='important',
help='List bugs with >= "serious" severity'),
libbe.command.Option(name='assigned', short_name='a',
help='Only show bugs matching ASSIGNED',
arg=libbe.command.Argument(
name='assigned', metavar='ASSIGNED', default=None,
completion_callback=libbe.command.util.complete_assigned)),
libbe.command.Option(name='mine', short_name='m',
help='List bugs assigned to you'),
libbe.command.Option(name='extra-strings', short_name='e',
help='Only show bugs matching STRINGS, e.g. --extra-strings'
' TAG:working,TAG:xml',
arg=libbe.command.Argument(
name='extra-strings', metavar='STRINGS', default=None,
completion_callback=libbe.command.util.complete_extra_strings)),
libbe.command.Option(name='sort', short_name='S',
help='Adjust bug-sort criteria with comma-separated list '
'SORT. e.g. "--sort creator,time". '
'Available criteria: %s' % ','.join(AVAILABLE_CMPS),
arg=libbe.command.Argument(
name='sort', metavar='SORT', default=None,
completion_callback=libbe.command.util.Completer(AVAILABLE_CMPS))),
libbe.command.Option(name='tags', short_name='t',
help='Add TAGS: field to standard listing format.'),
libbe.command.Option(name='ids', short_name='i',
help='Only print the bug IDS'),
libbe.command.Option(name='xml', short_name='x',
help='Dump output in XML format'),
])
# parser.add_option("-S", "--sort", metavar="SORT-BY", dest="sort_by",
# help="Adjust bug-sort criteria with comma-separated list SORT-BY. e.g. \"--sort creator,time\". Available criteria: %s" % ','.join(AVAILABLE_CMPS), default=None)
# # boolean options. All but ids and xml are special cases of long forms
# ("w", "wishlist", "List bugs with 'wishlist' severity"),
# ("A", "active", "List all active bugs"),
# ("U", "unconfirmed", "List unconfirmed bugs"),
# ("o", "open", "List open bugs"),
# ("T", "test", "List bugs in testing"),
# for s in bools:
# attr = s[1].replace('-','_')
# short = "-%c" % s[0]
# long = "--%s" % s[1]
# help = s[2]
# parser.add_option(short, long, action="store_true",
# dest=attr, help=help, default=False)
# return parser
#
# ])
def _run(self, **params):
storage = self._get_storage()
bugdirs = self._get_bugdirs()
writeable = storage.writeable
storage.writeable = False
cmp_list, status, severity, assigned, extra_strings_regexps = \
self._parse_params(bugdirs, params)
filter = Filter(status, severity, assigned,
extra_strings_regexps=extra_strings_regexps)
bugs = list(itertools.chain(*list(
[bugdir.bug_from_uuid(uuid) for uuid in bugdir.uuids()]
for bugdir in bugdirs.values())))
bugs = [b for b in bugs if filter(bugdirs, b) == True]
self.result = bugs
if len(bugs) == 0 and params['xml'] == False:
print >> self.stdout, 'No matching bugs found'
# sort bugs
bugs = self._sort_bugs(bugs, cmp_list)
# print list of bugs
if params['ids'] == True:
for bug in bugs:
print >> self.stdout, bug.id.user()
else:
self._list_bugs(bugs, show_tags=params['tags'], xml=params['xml'])
storage.writeable = writeable
return 0
def _parse_params(self, bugdirs, params):
cmp_list = []
if params['sort'] != None:
for cmp in params['sort'].split(','):
if cmp not in AVAILABLE_CMPS:
raise libbe.command.UserError(
'Invalid sort on "%s".\nValid sorts:\n %s'
% (cmp, '\n '.join(AVAILABLE_CMPS)))
cmp_list.append(getattr(libbe.bug, 'cmp_%s' % cmp))
status = parse_status(params['status'])
severity = parse_severity(params['severity'],
important=params['important'])
# select assigned
if params['assigned'] == None:
if params['mine'] == True:
assigned = [self._get_user_id()]
else:
assigned = 'all'
else:
assigned = libbe.command.util.select_values(
params['assigned'], libbe.command.util.assignees(bugdirs))
for i in range(len(assigned)):
if assigned[i] == '-':
assigned[i] = params['user-id']
if params['extra-strings'] == None:
extra_strings_regexps = []
else:
extra_strings_regexps = [re.compile(x)
for x in params['extra-strings'].split(',')]
return (cmp_list, status, severity, assigned, extra_strings_regexps)
def _sort_bugs(self, bugs, cmp_list=None):
if cmp_list is None:
cmp_list = []
cmp_list.extend(libbe.bug.DEFAULT_CMP_FULL_CMP_LIST)
cmp_fn = libbe.bug.BugCompoundComparator(cmp_list=cmp_list)
bugs.sort(cmp_fn)
return bugs
def _list_bugs(self, bugs, show_tags=False, xml=False):
if xml == True:
print >> self.stdout, \
'<?xml version="1.0" encoding="%s" ?>' % self.stdout.encoding
print >> self.stdout, '<be-xml>'
if len(bugs) > 0:
for bug in bugs:
if xml == True:
print >> self.stdout, bug.xml(show_comments=True)
else:
bug_string = bug.string(shortlist=True)
if show_tags == True:
attrs,summary = bug_string.split(' ', 1)
bug_string = (
'%s%s: %s'
% (attrs,
','.join(libbe.command.tag.get_tags(bug)),
summary))
print >> self.stdout, bug_string
if xml == True:
print >> self.stdout, '</be-xml>'
def _long_help(self):
return """
This command lists bugs. Normally it prints a short string like
bea/576:om:[TAGS:] Allow attachments
Where
bea/576 the bug id
o the bug status is 'open' (first letter)
m the bug severity is 'minor' (first letter)
TAGS comma-separated list of bug tags (if --tags is set)
Allo... the bug summary string
You can optionally (-u) print only the bug ids.
There are several criteria that you can filter by:
* status
* severity
* assigned (who the bug is assigned to)
Allowed values for each criterion may be given in a comma seperated
list. The special string "all" may be used with any of these options
to match all values of the criterion. As with the --status and
--severity options for `be depend`, starting the list with a minus
sign makes your selections a blacklist instead of the default
whitelist.
status
%s
severity
%s
assigned
free form, with the string '-' being a shortcut for yourself.
In addition, there are some shortcut options that set boolean flags.
The boolean options are ignored if the matching string option is used.
""" % (','.join(libbe.bug.status_values),
','.join(libbe.bug.severity_values))
| gpl-2.0 | -3,966,256,480,652,344,000 | 41.892857 | 186 | 0.558794 | false |
alexforencich/python-ivi | ivi/tektronix/tektronixMSO5204B.py | 1 | 1557 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMSO5000 import *
class tektronixMSO5204B(tektronixMSO5000):
"Tektronix MSO5204B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO5204B')
super(tektronixMSO5204B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 2e9
self._init_channels()
| mit | 3,777,026,336,843,986,400 | 36.97561 | 77 | 0.757225 | false |
simbha/mAngE-Gin | lib/Django 1.7/django/contrib/staticfiles/finders.py | 106 | 9852 | from collections import OrderedDict
import os
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.functional import empty, LazyObject
from django.utils.module_loading import import_string
from django.utils._os import safe_join
from django.utils import six, lru_cache
from django.contrib.staticfiles import utils
# To keep track on which directories the finder has searched the static files.
searched_locations = []
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a find() method')
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError('subclasses of BaseFinder must provide a list() method')
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = OrderedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute.
"""
storage_class = FileSystemStorage
source_dir = 'static'
def __init__(self, app_names=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app names to storage instances
self.storages = OrderedDict()
app_configs = apps.get_app_configs()
if app_names:
app_names = set(app_names)
app_configs = [ac for ac in app_configs if ac.name in app_names]
for app_config in app_configs:
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir))
if os.path.isdir(app_storage.location):
self.storages[app_config.name] = app_storage
if app_config.name not in self.apps:
self.apps.append(app_config.name)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in six.itervalues(self.storages):
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.location not in searched_locations:
searched_locations.append(self.storage.location)
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
searched_locations[:] = []
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return [] if all else None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
@lru_cache.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
| mit | 723,407,337,129,858,400 | 34.695652 | 103 | 0.587901 | false |
Kmayankkr/robocomp | tools/rcmonitor/someTest.py | 5 | 1932 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
# Custom Template
import Ice
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.ic = Ice.initialize(sys.argv)
self.mods = modules
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompCamara'].CamaraPrx.checkedCast(self.prx)
self.measures = range(33)
self.job()
def job(self):
# Remote procedure call
output = self.proxy.getRGBPackedImage(5) # vector, head, bState
# Store image
self.image = output[0]
# Store pos measure
self.measures.pop(0)
self.measures.append(output[1].tilt.pos)
def paintEvent(self, event=None):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
# Draw image
qimage = QImage(self.image, 320, 240, QImage.Format_RGB888)
painter.drawImage(QPointF(0, 0), qimage)
# Draw signal
for idx in range(len(self.measures)-1):
painter.drawLine(idx*10, (self.height()/2)-(self.measures[idx]*100), (idx+1)*10, (self.height()/2)-(self.measures[idx+1]*100))
painter.end()
| gpl-3.0 | 6,752,904,597,666,087,000 | 32.310345 | 132 | 0.690994 | false |
binarytemple/ansible | plugins/inventory/rax.py | 24 | 9460 | #!/usr/bin/env python
# (c) 2013, Jesse Keating <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
inventory: rax
short_description: Rackspace Public Cloud external inventory script
description:
- Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
- |
When run against a specific host, this script returns the following
variables:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
where some item can have nested structure.
- credentials are set in a credentials file
version_added: None
options:
creds_file:
description:
- File to find the Rackspace Public Cloud credentials in
required: true
default: null
region:
description:
- An optional value to narrow inventory scope, i.e. DFW, ORD, IAD, LON
required: false
default: null
authors:
- Jesse Keating <[email protected]>
- Paul Durivage <[email protected]>
- Matt Martz <[email protected]>
notes:
- RAX_CREDS_FILE is an optional environment variable that points to a
pyrax-compatible credentials file.
- If RAX_CREDS_FILE is not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials.
- See https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating
- RAX_REGION is an optional environment variable to narrow inventory search
scope
- RAX_REGION, if used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list
- RAX_ENV is an environment variable that will use an environment as
configured in ~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
- RAX_META_PREFIX is an environment variable that changes the prefix used
for meta key/value groups. For compatibility with ec2.py set to
RAX_META_PREFIX=tag
requirements: [ "pyrax" ]
examples:
- description: List server instances
code: RAX_CREDS_FILE=~/.raxpub rax.py --list
- description: List servers in ORD datacenter only
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
- description: List servers in ORD and DFW datacenters
code: RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
- description: Get server details for server named "server.example.com"
code: RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
'''
import os
import re
import sys
import argparse
import collections
from types import NoneType
try:
import json
except:
import simplejson as json
try:
import pyrax
except ImportError:
print('pyrax is required for this module')
sys.exit(1)
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
def rax_slugify(value):
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in server.metadata.iteritems():
prefix = os.getenv('RAX_META_PREFIX', 'meta')
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
hostvars[server.name]['ansible_ssh_host'] = server.accessIPv4
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = os.getenv('RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
try:
creds_file = os.path.expanduser(os.environ['RAX_CREDS_FILE'])
except KeyError, e:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.stderr.write('No value in environment variable %s and/or no '
'credentials file at %s\n'
% (e.message, default_creds_file))
sys.exit(1)
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception, e:
sys.stderr.write("%s: %s\n" % (e, e.message))
sys.exit(1)
regions = []
if region:
regions.append(region)
else:
for region in os.getenv('RAX_REGION', 'all').split(','):
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.stderr.write('Unsupported region %s' % region)
sys.exit(1)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,296,031,224,833,735,700 | 32.309859 | 94 | 0.621459 | false |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/Paste-2.0.1/paste/gzipper.py | 50 | 3611 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware
Gzip-encodes the response.
"""
import gzip
from paste.response import header_value, remove_header
from paste.httpheaders import CONTENT_LENGTH
import six
class GzipOutput(object):
pass
class middleware(object):
def __init__(self, application, compress_level=6):
self.application = application
self.compress_level = int(compress_level)
def __call__(self, environ, start_response):
if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
# nothing for us to do, so this middleware will
# be a no-op:
return self.application(environ, start_response)
response = GzipResponse(start_response, self.compress_level)
app_iter = self.application(environ,
response.gzip_start_response)
if app_iter is not None:
response.finish_response(app_iter)
return response.write()
class GzipResponse(object):
def __init__(self, start_response, compress_level):
self.start_response = start_response
self.compress_level = compress_level
self.buffer = six.BytesIO()
self.compressible = False
self.content_length = None
def gzip_start_response(self, status, headers, exc_info=None):
self.headers = headers
ct = header_value(headers,'content-type')
ce = header_value(headers,'content-encoding')
self.compressible = False
if ct and (ct.startswith('text/') or ct.startswith('application/')) \
and 'zip' not in ct:
self.compressible = True
if ce:
self.compressible = False
if self.compressible:
headers.append(('content-encoding', 'gzip'))
remove_header(headers, 'content-length')
self.headers = headers
self.status = status
return self.buffer.write
def write(self):
out = self.buffer
out.seek(0)
s = out.getvalue()
out.close()
return [s]
def finish_response(self, app_iter):
if self.compressible:
output = gzip.GzipFile(mode='wb', compresslevel=self.compress_level,
fileobj=self.buffer)
else:
output = self.buffer
try:
for s in app_iter:
output.write(s)
if self.compressible:
output.close()
finally:
if hasattr(app_iter, 'close'):
app_iter.close()
content_length = self.buffer.tell()
CONTENT_LENGTH.update(self.headers, content_length)
self.start_response(self.status, self.headers)
def filter_factory(application, **conf):
import warnings
warnings.warn(
'This function is deprecated; use make_gzip_middleware instead',
DeprecationWarning, 2)
def filter(application):
return middleware(application)
return filter
def make_gzip_middleware(app, global_conf, compress_level=6):
"""
Wrap the middleware, so that it applies gzipping to a response
when it is supported by the browser and the content is of
type ``text/*`` or ``application/*``
"""
compress_level = int(compress_level)
return middleware(app, compress_level=compress_level)
| gpl-2.0 | 8,809,160,515,196,071,000 | 32.747664 | 84 | 0.629189 | false |
geertj/python-tdbus | examples/avahi.py | 1 | 1381 | #!/usr/bin/env python
#
# This file is part of python-tdbus. Python-tdbus is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
# complete list.
# This example shows how to access Avahi on the D-BUS.
import sys
from tdbus import *
CONN_AVAHI = 'org.freedesktop.Avahi'
PATH_SERVER = '/'
IFACE_SERVER = 'org.freedesktop.Avahi.Server'
conn = Connection(DBUS_BUS_SYSTEM)
dispatcher = BlockingDispatcher(conn)
try:
result = dispatcher.call_method(PATH_SERVER, 'GetVersionString',
interface=IFACE_SERVER, destination=CONN_AVAHI)
except Error:
print 'Avahi NOT available.'
raise
print 'Avahi is available at %s' % CONN_AVAHI
print 'Avahi version: %s' % result[0]
print
print 'Browsing service types on domain: local'
print 'Press CTRL-\\ to exit'
print
result = dispatcher.call_method('/', 'ServiceTypeBrowserNew', interface=IFACE_SERVER,
destination=CONN_AVAHI, format='iisu', args=(-1, 0, 'local', 0))
browser = result[0]
def item_new(message, dispatcher):
args = message.get_args()
print 'service %s exists on domain %s' % (args[2], args[3])
dispatcher.add_signal_handler(browser, 'ItemNew', item_new)
dispatcher.dispatch()
| mit | 6,137,892,896,904,960,000 | 29.688889 | 85 | 0.708182 | false |
yajnab/android_kernel_samsung_msm7x27 | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 | -7,423,571,370,482,485,000 | 23.820513 | 75 | 0.654442 | false |
sajuptpm/manila | manila/tests/cmd/test_manage.py | 2 | 15025 | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import code
import readline
import sys
import ddt
import mock
from oslo_config import cfg
import six
from manila.cmd import manage as manila_manage
from manila import context
from manila import db
from manila.db import migration
from manila import test
from manila import version
CONF = cfg.CONF
@ddt.ddt
class ManilaCmdManageTestCase(test.TestCase):
def setUp(self):
super(ManilaCmdManageTestCase, self).setUp()
sys.argv = ['manila-share']
CONF(sys.argv[1:], project='manila', version=version.version_string())
self.shell_commands = manila_manage.ShellCommands()
self.host_commands = manila_manage.HostCommands()
self.db_commands = manila_manage.DbCommands()
self.version_commands = manila_manage.VersionCommands()
self.config_commands = manila_manage.ConfigCommands()
self.get_log_cmds = manila_manage.GetLogCommands()
self.service_cmds = manila_manage.ServiceCommands()
def test_param2id_is_uuid_like(self):
obj_id = '12345678123456781234567812345678'
self.assertEqual(obj_id, manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_with_dash(self):
obj_id = '112-112-112'
self.assertIsNone(manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_without_dash(self):
obj_id = '123'
self.assertEqual(123, manila_manage.param2id(obj_id))
def test_param2id_not_uuid_like_value_error(self):
obj_id = 'invalidvalue'
self.assertRaises(ValueError, manila_manage.param2id, obj_id)
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_bpython(self):
self.shell_commands.bpython()
manila_manage.ShellCommands.run.assert_called_once_with('bpython')
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_ipython(self):
self.shell_commands.ipython()
manila_manage.ShellCommands.run.assert_called_once_with('ipython')
@mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock())
def test_shell_commands_python(self):
self.shell_commands.python()
manila_manage.ShellCommands.run.assert_called_once_with('python')
@ddt.data({}, {'shell': 'bpython'})
def test_run_bpython(self, kwargs):
try:
import bpython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed')
self.shell_commands.run(**kwargs)
bpython.embed.assert_called_once_with()
def test_run_bpython_import_error(self):
try:
import bpython
import IPython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed',
mock.Mock(side_effect=ImportError()))
self.mock_object(IPython, 'embed')
self.shell_commands.run(shell='bpython')
IPython.embed.assert_called_once_with()
def test_run(self):
try:
import bpython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(bpython, 'embed')
self.shell_commands.run()
bpython.embed.assert_called_once_with()
def test_run_ipython(self):
try:
import IPython
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(IPython, 'embed')
self.shell_commands.run(shell='ipython')
IPython.embed.assert_called_once_with()
def test_run_ipython_import_error(self):
try:
import IPython
if not hasattr(IPython, 'Shell'):
setattr(IPython, 'Shell', mock.Mock())
setattr(IPython.Shell, 'IPShell',
mock.Mock(side_effect=ImportError()))
except ImportError as e:
self.skipTest(six.text_type(e))
self.mock_object(IPython, 'embed',
mock.Mock(side_effect=ImportError()))
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
shell = IPython.embed.return_value
self.shell_commands.run(shell='ipython')
IPython.Shell.IPShell.assert_called_once_with(argv=[])
self.assertFalse(shell.mainloop.called)
self.assertTrue(readline.parse_and_bind.called)
code.interact.assert_called_once_with()
def test_run_python(self):
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
self.shell_commands.run(shell='python')
readline.parse_and_bind.assert_called_once_with("tab:complete")
code.interact.assert_called_once_with()
def test_run_python_import_error(self):
self.mock_object(readline, 'parse_and_bind')
self.mock_object(code, 'interact')
self.shell_commands.run(shell='python')
readline.parse_and_bind.assert_called_once_with("tab:complete")
code.interact.assert_called_once_with()
@mock.patch('six.moves.builtins.print')
def test_list(self, print_mock):
serv_1 = {
'host': 'fake_host1',
'availability_zone': 'avail_zone1',
}
serv_2 = {
'host': 'fake_host2',
'availability_zone': 'avail_zone2',
}
self.mock_object(db, 'service_get_all',
mock.Mock(return_value=[serv_1, serv_2]))
self.mock_object(context, 'get_admin_context',
mock.Mock(return_value='admin_ctxt'))
self.host_commands.list(zone='avail_zone1')
context.get_admin_context.assert_called_once_with()
db.service_get_all.assert_called_once_with('admin_ctxt')
print_mock.assert_has_calls([
mock.call(u'host \tzone '),
mock.call('fake_host1 \tavail_zone1 ')])
@mock.patch('six.moves.builtins.print')
def test_list_zone_is_none(self, print_mock):
serv_1 = {
'host': 'fake_host1',
'availability_zone': 'avail_zone1',
}
serv_2 = {
'host': 'fake_host2',
'availability_zone': 'avail_zone2',
}
self.mock_object(db, 'service_get_all',
mock.Mock(return_value=[serv_1, serv_2]))
self.mock_object(context, 'get_admin_context',
mock.Mock(return_value='admin_ctxt'))
self.host_commands.list()
context.get_admin_context.assert_called_once_with()
db.service_get_all.assert_called_once_with('admin_ctxt')
print_mock.assert_has_calls([
mock.call(u'host \tzone '),
mock.call('fake_host1 \tavail_zone1 '),
mock.call('fake_host2 \tavail_zone2 ')])
def test_sync(self):
self.mock_object(migration, 'upgrade')
self.db_commands.sync(version='123')
migration.upgrade.assert_called_once_with('123')
def test_version(self):
self.mock_object(migration, 'version')
self.db_commands.version()
migration.version.assert_called_once_with()
def test_downgrade(self):
self.mock_object(migration, 'downgrade')
self.db_commands.downgrade(version='123')
migration.downgrade.assert_called_once_with('123')
def test_revision(self):
self.mock_object(migration, 'revision')
self.db_commands.revision('message', True)
migration.revision.assert_called_once_with('message', True)
def test_stamp(self):
self.mock_object(migration, 'stamp')
self.db_commands.stamp(version='123')
migration.stamp.assert_called_once_with('123')
def test_version_commands_list(self):
self.mock_object(version, 'version_string',
mock.Mock(return_value='123'))
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
self.version_commands.list()
version.version_string.assert_called_once_with()
self.assertEqual('123\n', fake_out.getvalue())
def test_version_commands_call(self):
self.mock_object(version, 'version_string',
mock.Mock(return_value='123'))
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
self.version_commands()
version.version_string.assert_called_once_with()
self.assertEqual('123\n', fake_out.getvalue())
def test_get_log_commands_no_errors(self):
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
CONF.set_override('log_dir', None)
expected_out = 'No errors in logfiles!\n'
self.get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('six.moves.builtins.open')
@mock.patch('os.listdir')
def test_get_log_commands_errors(self, listdir, open):
CONF.set_override('log_dir', 'fake-dir')
listdir.return_value = ['fake-error.log']
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
open.return_value = six.StringIO(
'[ ERROR ] fake-error-message')
expected_out = ('fake-dir/fake-error.log:-\n'
'Line 1 : [ ERROR ] fake-error-message\n')
self.get_log_cmds.errors()
self.assertEqual(expected_out, fake_out.getvalue())
open.assert_called_once_with('fake-dir/fake-error.log', 'r')
listdir.assert_called_once_with(CONF.log_dir)
@mock.patch('six.moves.builtins.open')
@mock.patch('os.path.exists')
def test_get_log_commands_syslog_no_log_file(self, path_exists, open):
path_exists.return_value = False
exit = self.assertRaises(SystemExit, self.get_log_cmds.syslog)
self.assertEqual(exit.code, 1)
path_exists.assert_any_call('/var/log/syslog')
path_exists.assert_any_call('/var/log/messages')
@mock.patch('manila.utils.service_is_up')
@mock.patch('manila.db.service_get_all')
@mock.patch('manila.context.get_admin_context')
def test_service_commands_list(self, get_admin_context, service_get_all,
service_is_up):
ctxt = context.RequestContext('fake-user', 'fake-project')
get_admin_context.return_value = ctxt
service = {'binary': 'manila-binary',
'host': 'fake-host.fake-domain',
'availability_zone': 'fake-zone',
'updated_at': '2014-06-30 11:22:33',
'disabled': False}
service_get_all.return_value = [service]
service_is_up.return_value = True
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print_format = format % ('Binary',
'Host',
'Zone',
'Status',
'State',
'Updated At')
service_format = format % (service['binary'],
service['host'].partition('.')[0],
service['availability_zone'],
'enabled',
':-)',
service['updated_at'])
expected_out = print_format + '\n' + service_format + '\n'
self.service_cmds.list()
self.assertEqual(expected_out, fake_out.getvalue())
get_admin_context.assert_called_with()
service_get_all.assert_called_with(ctxt)
service_is_up.assert_called_with(service)
def test_methods_of(self):
obj = type('Fake', (object,),
{name: lambda: 'fake_' for name in ('_a', 'b', 'c')})
expected = [('b', obj.b), ('c', obj.c)]
self.assertEqual(expected, manila_manage.methods_of(obj))
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_argv_lt_2(self, register_cli_opt):
script_name = 'manila-manage'
sys.argv = [script_name]
CONF(sys.argv[1:], project='manila', version=version.version_string())
exit = self.assertRaises(SystemExit, manila_manage.main)
self.assertTrue(register_cli_opt.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_log.log.register_options')
@mock.patch('oslo_log.log.setup')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
def test_main_sudo_failed(self, register_cli_opt, log_setup,
register_log_opts, config_opts_call):
script_name = 'manila-manage'
sys.argv = [script_name, 'fake_category', 'fake_action']
config_opts_call.side_effect = cfg.ConfigFilesNotFoundError(
mock.sentinel._namespace)
exit = self.assertRaises(SystemExit, manila_manage.main)
self.assertTrue(register_cli_opt.called)
register_log_opts.assert_called_once_with(CONF)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='manila',
version=version.version_string())
self.assertFalse(log_setup.called)
self.assertEqual(exit.code, 2)
@mock.patch('oslo_config.cfg.ConfigOpts.__call__')
@mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt')
@mock.patch('oslo_log.log.register_options')
def test_main(self, register_log_opts, register_cli_opt, config_opts_call):
script_name = 'manila-manage'
sys.argv = [script_name, 'config', 'list']
action_fn = mock.MagicMock()
CONF.category = mock.MagicMock(action_fn=action_fn)
manila_manage.main()
self.assertTrue(register_cli_opt.called)
register_log_opts.assert_called_once_with(CONF)
config_opts_call.assert_called_once_with(
sys.argv[1:], project='manila', version=version.version_string())
self.assertTrue(action_fn.called)
@ddt.data('bar', '-bar', '--bar')
def test_get_arg_string(self, arg):
parsed_arg = manila_manage.get_arg_string(arg)
self.assertEqual('bar', parsed_arg)
| apache-2.0 | -6,347,929,367,467,317,000 | 38.748677 | 79 | 0.6 | false |
Jc11235/Kekulean_Program | GUI_Version/Ubuntu_Version/DriverMethods.py | 1 | 39406 | from PerfectMatchingData import *
from Face import *
from Vertex import *
from Graph import *
from VertexList import *
from Output import *
from KekuleanMethods import *
from Checkers import *
from RequiredEdgeMethods import *
from Tkinter import *
from AppInformation import *
from random import randint
import time
import os
import shutil
import multiprocessing as mp
import threading
Break = False
BreakLoop = False
#These methods the main drivers of the program. Some of their helper methods are also present here.
settings = {}
#function that reads in the graph returns a 2D string list of the graph
def getInput(fileName):
faceGraph = []
inputFile = open(fileName, 'r')
row = inputFile.readline()
y = 0
while len(row) > 0:
row = row.replace('\n', '')
row = row.split(" ")
for i in range(len(row)):
x = row[i]
faceGraph.append((Face(int(x), y)))
row = inputFile.readline()
y += 1
inputFile.close()
return faceGraph
def getSettings():
fileName = "settings.txt"
inputFile = open(fileName, 'r')
lineNumber = 0
minW = 0
maxW = 0
minH = 0
maxH = 0
line = inputFile.readline()
while len(line) > 0:
line = line.replace('\n', '')
settings[lineNumber] = float(line)
line = inputFile.readline()
lineNumber += 1
inputFile.close()
def resetGraph(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeGraph(root,appInfo):
root.geometry("600x400")
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,choice,selection,choiceEntry,fileName = "graph.txt"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = fileName
faceGraph = getInput(fileName)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetGraph(root,appInfo,submitGraph,graphNumberEntry,view)
#A user-entered number of graphs are generated and tested for Kekulean-ness and written to their proper text files
def randomIntoFiles():
kekuleanFile = open("Kekuleans.txt", "w")
notKekuleanFile = open("NotKekulean.txt", "w")
numK = 0
numNotK = 0
trials = int(raw_input("How many graphs would you like to create? "))
print "\n" #just to provide some visual space
t1 = time.time()
for i in range(trials):
faceGraph = createRandomConnectedGraph()
vGraph = makeVertexGraph(faceGraph)
randGraph = Graph(faceGraph, vGraph)
if isKekulean(randGraph) == True:
numK += 1
kekuleanFile.write("Graph #" + str(numK) + "\n")
kekuleanFile.write(randGraph.simpleToString() + '\n')
else:
numNotK += 1
notKekuleanFile.write("Graph #" + str(numNotK) + "\n")
notKekuleanFile.write(randGraph.simpleToString() + '\n')
#print randGraph
#print "\n"
t2 = time.time()
print "\n" + str(numK) + " Kekulean graph(s) were found.\n" + str(numNotK) + " non-Kekulean graph(s) were found."
print "Time elapsed (in seconds): " + str(t2 - t1) + "\n"
kekuleanFile.close()
notKekuleanFile.close()
#creates a random Kekulean graph ands does stuff with it and saves it to an png
def createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
randomGraph = _createRandomKekulean()
print "There are", len(randomGraph.getVertexGraph()), "vertices"
graphs = assignMatching(randomGraph)
graphs.sort()
if len(graphs) > 0:
#save graphs as PNG file
savePNG(graphs, "graphs - Fries.png")
Graph.comparison = 'clars'
graphs.sort()
savePNG(graphs, "graphs - Clars.png")
while True:
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
while choice.lower() != 'fries' and choice.lower() != 'clars' and choice.lower() != 'quit':
choice = raw_input("Would you like to view the graphs ranked by Fries or Clars? (or quit?) ")
if choice.lower() == 'clars':
Graph.comparison = 'clars'
elif choice.lower() == 'fries':
Graph.comparison = 'fries'
else:
break
graphs.sort()
graphs.reverse()
print "There are", len(graphs), "Kekulean structures"
displayGraphs(graphs)
else:
print "error - Graph is Kekulean but has no perfect matching - see error.txt for graph"
errorFile = open("error.txt", "w")
errorFile.write(randomGraph.simpleToString() + '\n')
#Creates a random planar graph, which may not be connected
def createRandomGraph():
height = randint(settings[2], settings[3])
randGraph = []
for i in range(height):
rowLength = randint(settings[0], settings[1])
row = getRow(rowLength, i)
while len(row) == 0:
row = getRow(rowLength, i)
randGraph.extend(row)
if checkAlignment(randGraph) == False:
randGraph = createRandomGraph()
return randGraph
def checkAlignment(graph):
for face in graph:
if face.getX() == 0:
break
else:
#there is no face on the y-axis
return False
for face in graph:
if face.getY() == 0:
break
else:
#there is no face on the x-axis
return False
#there is a face on the x-axis
return True
def createRandomConnectedGraph():
g = createRandomGraph()
while isConnected(faceGraphToInts(g)) == False:
g = createRandomGraph()
return g
#generates a row for the the createRandomGraph method
def getRow(rl, rowNum):
r = []
for j in range(rl):
chance = randint(0, 100)
if chance > settings[4] * 100:
r.append(Face(j, rowNum))
return r
def _createRandomKekulean():
#creates a face graphs
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
while isKekulean(randomGraph) == False:
#print "making K"
randomFaces = createRandomGraph()
while isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
if isKekulean(randomGraph):
return randomGraph
else:
return _createRandomKekulean()
def createManyKekuleans():
graphs = [] #list of kekulean graphs
graphList = [] #list of the Kekulean graphs with their matchings, and Fries/Clars Faces
trials = int(raw_input("How many graphs would you like to create? "))
pool = mp.Pool(mp.cpu_count())
results = [pool.apply_async(_createRandomKekulean) for x in range(trials)]
graphs = [r.get() for r in results]
for g in graphs:
graphList.extend(assignMatching(g))
graphList.sort()
if len(graphList) > 0:
print "There are", len(graphList), "Kekulean structures"
displayGraphs(graphList)
def testKekuleanThms():
conflictFile = open("conflict.txt", "w")
interval = float(raw_input("How many hours would you like to run the program?"))
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
nelsonThm = isOldKekulean(randomGraph)
perfectMatchingThm = isKekulean(randomGraph)
if nelsonThm != perfectMatchingThm:
conflictFile.write("Perfect matching: " + str(perfectMatchingThm) + " Nelson Thm: " + str(nelsonThm) + "\n")
conflictFile.write(randomGraph.simpleToString())
conflictFile.write("\n")
t2 = time.time()
counter += 1
conflictFile.close()
#takes a row and returns a the number of vertical edges in that row
def getRowEdgeCount(row):
edgeCount = 0
f = 0
for i in range(len(row)):
edgeCount += 1
try:
f = row[i+1]
except:
f = None
if row[i] + 1 != f or f == None:
edgeCount += 1
return edgeCount
def getMinRows(g):
minRows = {}
index = 0
minEdges = sys.maxint
for r in g:
edgeCount = getRowEdgeCount(r)
if edgeCount < minEdges:
minEdges = edgeCount
minRows.clear()
minRows[index] = r
elif edgeCount == minEdges:
minRows[index] = r
index += 1
return minRows
#counts up the number of peaks above each row and stores those values in a list at indexes that correspond to the the row of the graph
def getPeaksAboveRows(g):
peaksAboveRow = [0]*(len(g))
for r in range(len(g)):
#print "r: " + str(r)
row = g[r]
if r > 0:
peaksAboveRow[r] += peaksAboveRow[r-1]
for col in range(len(row)):
face = row[col]
if searchRow(face, True, g, r) == True:
peaksAboveRow[r] += 1
#print "Peak at: " + str(r) + ", " + str(col)
if searchRow(face, False, g, r) == True and r < len(g)-1:
peaksAboveRow[r+1] -= 1
#print "Valley at: " + str(r) + ", " + str(col)
peaksAboveRow[r] = abs(peaksAboveRow[r])
return peaksAboveRow
#Theorem I devoloped
def NelsonThm(peaks, g):
kekulean = True
minRows = getMinRows(g)
for i, row in minRows.items():
if peaks[i] > getRowEdgeCount(row):
kekulean = False
break
return kekulean
#ckesks of a graph is Kekulean and returns a boolean
def isOldKekulean(graph):
fg = faceGraphToInts(graph.getFaceGraph())
peaksAbove = getPeaksAboveRows(fg)
#print peaksAbove
kekulean = NelsonThm(peaksAbove, fg)
return kekulean
def getUpperBounds(graph):
#faceGraph = getInput(filename)
#vertexGraph = makeVertexGraph(faceGraph)
#graph = Graph(faceGraph, vertexGraph)
kekulean = isKekulean(graph)
if kekulean == True:
rowCount = [0] * graph.getNumberOfRows()
whiteCount = [0] * graph.getNumberOfRows()
blackCount = [0] * graph.getNumberOfRows()
print "len:", len(whiteCount)
for v in graph.getVertexGraph():
#even y numbers mean the vertex is marked white on the graph
if v.getY() % 2 == 0:
index = v.getY() / 2
if index < len(whiteCount):
whiteCount[index] += 1
#The else implies that the vertex's y is odd, and thus the verex is marked black
else:
index = (v.getY() - 1) / 2
if index < len(blackCount):
blackCount[index] += 1
print "Upper Bonds of the graph per row:"
for index in range(len(rowCount)):
count = abs(sum(whiteCount[0:index+1]) - sum(blackCount[0:index+1]))
print count
rowCount[index] = count
totalUpperBonds = sum(rowCount)
print "Upper bond of the graph:", totalUpperBonds
else:
print "The graph is not Kekulean"
def testConjectureSameFaces(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
drawConflictsCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
#second part
def testConjectureSameFacesKKFF(root, interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
graphList = []
graphNumber = 0
counter = 0
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#must be 'fries' or 'clars'
Graph.comparison = 'fries'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
clarNumberStructure = []
friesNumberStructure = []
for g in graphList:
if(h.getFaces() == g.getFaces()):
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
if h.getFriesNumber() > g.getFriesNumber():
drawConflictsKKFF(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
graphNumber += 1
text.update_idletasks()
quit.update_idletasks()
scrollbar.update_idletasks()
text.destroy()
scrollbar.destroy()
quit.destroy()
def testConjectureSameFacesFFCC(root, interval):
clarNumberStructures = []
friesNumberStructures = []
graphs = []
graphList = []
temp = 0
graphNumber = 0
counter = 0
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command = text.yview)
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
if Break == True:
Break = False
quit.destroy()
break
text.insert(CURRENT, "Graph " + str(graphNumber) + "\n")
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
randomGraph.setMaxClarManual(setMaxClar(randomGraph))
randomGraph.setMaxFriesManual(setMaxFries(randomGraph))
h = structures[-1]
graphs.append(randomGraph)
h.setMaxClarManual(setMaxClar(randomGraph))
h.setMaxFriesManual(setMaxFries(randomGraph))
h.setNumStructures(len(structures))
h.setFaces(getNumFaces(faceGraphToInts(randomFaces)))
graphCount = 0
graphNumber += 1
for g in graphList:
if(g.getFaces() == h.getFaces()):
if g.getNumVertices() == h.getNumVertices():
if g.getNumStructures() < h.getNumStructures():
if g.getMaxClar() > h.getMaxClar():
if g.getMaxFries() < h.getMaxFries():
print 'Conjecture is false:\n'
saveClarFaceFFCC(graphs[graphCount],randomGraph,temp)
saveFriesFaceFFCC(graphs[graphCount],randomGraph,temp)
folderName = "FFCCConjectureConflicts"
fileName = folderName + "/" + str(randomGraph.getNumVertices()) + "_" + str(temp)+ "/info" + ".txt"
f = open(fileName,'w')
f.write("C1: " + str(g.getMaxClar()) + " C2: " + str(h.getMaxClar()) + " F1: " + str(g.getMaxFries()) + " F2: " + str(h.getMaxFries()) + "\n")
f.write(str(faceGraphToInts(g.getFaceGraph())) + "\n")
f.write(str(faceGraphToInts(h.getFaceGraph())) + "\n")
f.close()
temp += 1
graphCount += 1
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def setMaxFries(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'fries'
structures.sort()
return structures[-1].getFriesNumber()
def setMaxClar(graph):
g = graph.getFaceGraph()
v = makeVertexGraph(g)
G = Graph(g,v)
structures = assignMatching(G)
Graph.comparison = 'clars'
structures.sort()
return structures[-1].getClarsNumber()
def saveClarFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'clars'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/clar1" + ".png"
fileName2 = folderName + "/clar2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def saveFriesFaceFFCC(graph1,graph2,count):
g1 = graph1.getFaceGraph()
g2 = graph2.getFaceGraph()
v1 = makeVertexGraph(g1)
v2 = makeVertexGraph(g2)
G1 = Graph(g1,v1)
G2 = Graph(g2,v2)
structures1 = assignMatching(G1)
structures2 = assignMatching(G2)
Graph.comparison = 'fries'
structures1.sort()
structures2.sort()
h1 = structures1[-1]
h2 = structures2[-1]
if not os.path.exists("FFCCConjectureConflicts"):
os.mkdir("FFCCConjectureConflicts")
folderName = "FFCCConjectureConflicts/" + str(G1.getNumVertices()) + "_" + str(count)
#setup folder
if not os.path.exists(folderName):
os.mkdir(folderName)
#print "adding"
fileName1 = folderName + "/fries1" + ".png"
fileName2 = folderName + "/fries2" + ".png"
#print fileName1
saveSinglePNG(h1,fileName1)
saveSinglePNG(h2,fileName2)
def testConjectureDifferentFaces(hours=0):
graphList = []
results = open("results.txt", "w")
results.write("The program actually run!")
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
counter = 0
while t2 - t1 < timeLimit:
print "graph #" + str(counter)
#creates a face graphs
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
for f in randomGraph.getFaceGraph():
pairs = randomGraph.getBondedVertices(f)
print str(pairs)
#must be 'fries' or 'clars'
Graph.comparison = 'clars'
structures.sort()
h = structures[-1]
h.setNumStructures(len(structures))
#h.setString(structures[0].simpleToString())
#is the data right?
#print "Verts:", h.getNumVertices()
#print "Structures:", h.getNumStructures()
#print "Clar:", h.getFriesNumber()
for g in graphList:
if h.getNumVertices() == g.getNumVertices() :#and h.getNumVertices() <= 26:
if h.getNumStructures() < g.getNumStructures():
#first part
if h.getClarsNumber() > g.getClarsNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsCC(g, h)
#second part
if h.getFriesNumber() > g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Fries: ' + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Fries: ' + str(g.getFriesNumber()) + " Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsKKFF(g, h)
#third part
if h.getClarsNumber() > g.getClarsNumber():
if h.getFriesNumber() < g.getFriesNumber():
print 'Conjecture is false:'
results.write('\ngraph H: Clars: ' + str(h.getClarsNumber()) + "graph H: Fries: " + str(h.getFriesNumber()) + " Number of Structures: " + str(h.getNumStructures()) + " Number of vertices: " + str(h.getNumVertices()) + "\n")
results.write(str(h))
results.write('\ngraph G: Clars: ' + str(g.getClarsNumber()) + "graph G: Fries: " + str(g.getFriesNumber()) +" Number of Structures: " + str(g.getNumStructures()) + " Number of vertices: " + str(g.getNumVertices()) + "\n")
results.write(str(g))
results.write("\n\n")
drawConflictsFFCC(g, h)
#only adds graphs to list if it under some number of vertices
graphList.append(h)
t2 = time.time()
counter += 1
def findHighestClars(graphs):
clars = 0
for g in graphs:
if g.getClarsNumber() > clars:
clars = g.getClarsNumber()
return clars
def _findRequiredEdges(graphs):
masterSet = getRequiredSet(graphs)
if len(masterSet) > 0:
for edge in masterSet:
v1, v2 = edge
v1.required = True
v2.required = True
return True
else:
return False
def findRequiredEdges(hours=0):
if not os.path.exists("requiredEdges"):
os.mkdir("requiredEdges")
edgeFile = open("requiredEdges/RequiredEdges.txt", "w")
graphNumber = 0
rqNum = 0
flag = False
if hours == 0:
interval = float(raw_input("How many hours would you like to run the program? "))
else:
interval = hours
timeLimit = 3600 * interval
print "limit:", timeLimit
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
print "graph", graphNumber
flag = False
graph = _createRandomKekulean()
graphs = assignMatching(graph)
for f in graph.getFaceGraph():
pairs = graph.getBondedVertices(f)
print str(pairs)
flag = _findRequiredEdges(graphs)
if flag == True:
print "Found graph with required edges"
edgeFile.write("Graph: " + str(rqNum) + "\n")
edgeFile.write(graph.simpleToString())
edgeFile.write("\n\n")
#save PNG's
fileName = "requiredEdges/Graph" + str(rqNum) + ".png"
saveSinglePNG(graphs[0], fileName)
rqNum += 1
graphNumber += 1
t2 = time.time()
def BreakModule():
global Break
Break = True
def BreakLoop():
global BreakLoop
BreakLoop = True
def combineGraphs(root,interval):
global Break
Break = False
quit = Button(root, text ="Quit", command = BreakModule)
quit.pack(side = LEFT)
graphNumber = 0
superGraphNumber = 0
deletedCount = 0
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT,fill = Y)
text = Text(root,yscrollcommand = scrollbar.set)
text.pack()
scrollbar.config(command=text.yview)
storedGraphs = {}
timeLimit = 3600 * interval
t1 = time.time()
t2 = time.time()
while t2 - t1 < timeLimit:
text.insert(CURRENT,"graph: " + str(graphNumber) + "\n")
if Break == True:
Break = False
quit.destroy()
break
flag = False
#new stuff
randomFaces = createRandomGraph()
vertexGraph = []
#Finds connected graph
while len(vertexGraph) % 2 != 0 or len(vertexGraph) == 0 or countPeaksAndValleys(randomFaces) == False or isConnected(faceGraphToInts(randomFaces)) == False:
randomFaces = createRandomGraph()
vertexGraph = makeVertexGraph(randomFaces)
randomGraph = Graph(randomFaces, vertexGraph)
perfectMatchingThm = isKekulean(randomGraph)
if perfectMatchingThm == True:
structures = assignMatching(randomGraph)
#end new stuff
Graph.comparison = 'clars'
structures.sort()
randomGraph.maxClars = structures[-1].getClarsNumber()
req_edges = getRequiredSet(structures)
externalEdges = getExternalEdges(req_edges)
if len(externalEdges) > 0:
#add graph and edges to list
storedGraphs[randomGraph] = externalEdges
for g, edges in storedGraphs.items():
complements = getComplements(externalEdges, edges)
for edge, compEdge in complements:
faceA = (edge[0].getFaces() & edge[1].getFaces()).pop()
faceB = (compEdge[0].getFaces() & compEdge[1].getFaces()).pop()
x = faceA.getX() - faceB.getX()
y = faceA.getY() - faceB.getY()
if edge[2] == "TOP_RIGHT" and compEdge[2] == "BOTTOM_LEFT":
newGraph = offsetFaces(g, x, y + 1);
elif edge[2] == "RIGHT" and compEdge[2] == "LEFT":
newGraph = offsetFaces(g, x + 1, y);
elif edge[2] == "TOP_LEFT" and compEdge[2] == "BOTTOM_RIGHT":
newGraph = offsetFaces(g, x + 1, y + 1);
elif edge[2] == "BOTTOM_LEFT" and compEdge[2] == "TOP_RIGHT":
newGraph = offsetFaces(g, x, y - 1);
elif edge[2] == "LEFT" and compEdge[2] == "RIGHT":
newGraph = offsetFaces(g, x - 1, y);
elif edge[2] == "BOTTOM_RIGHT" and compEdge[2] == "TOP_LEFT":
newGraph = offsetFaces(g, x - 1, y - 1);
overlap = checkFaceOverlap(randomGraph, newGraph)
#print overlap
if overlap is False:
faceGraph = combineFaces(randomGraph, newGraph)
faceGraph = adjustForNegatives(faceGraph)
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
#start new stuff
if len(structures) > 0:
#setup folder
folderName = "CombinedTemps"
if not os.path.exists(folderName):
os.mkdir(folderName)
fileName = folderName + "/superGraph.txt"
f = open(folderName + "/superGraph" + str(superGraphNumber) + ".txt" ,'w')
f.write(str(superGraph) + '\n')
f.close()
Graph.comparison = 'clars'
structures.sort()
if not os.path.exists("CombinedGraphs"):
os.mkdir("CombinedGraphs")
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
#setup folder
if not os.path.exists(folderNameCG):
os.mkdir(folderNameCG)
superName = folderNameCG + "/superGraph" + str(superGraphNumber) + ".png"
saveSinglePNG(structures[0], superName)
addCombinationsPNG(randomGraph, newGraph,superGraph, superGraphNumber, deletedCount)
superGraphNumber += 1
graphNumber += 1
t2 = time.time()
quit.update_idletasks()
quit.destroy()
def resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view):
submitGraph.destroy()
view.destroy()
graphNumberEntry.destroy()
def analyzeCombinedGraphsSetup(root,appInfo,path = "CombinedTemps",extension = ".txt"):
runningApps = []
root.geometry("600x400")
graphNumber = IntVar()
entry = Entry(root, textvariable = graphNumber)
entry.pack()
runningApps.append(entry)
if not os.path.exists(path):
os.mkdir(path)
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
num_files -= 1
#for i in range(0,num_files):
#oldFilename = path + "/superGraph" + str(k+1) + extension
#os.rename(oldFilename, path + "/superGraph" + str(i) + extension)
label = Label(root, text="There are " + str(num_files) + " files in the directory. Which wuold you like to look at?")
label.pack()
runningApps.append(label)
i = 0
submit = Button(root, text ="Submit", command = lambda: checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps))
submit.pack(side = BOTTOM)
while i == 0:
i = graphNumber.get()
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
def checkAnalyze(root,appInfo,num_files,quit,entry,label,i,graphNumber,submit,runningApps):
submit.destroy()
again = Label(root, text="That file does not exist, please try again.")
submit = Button(root, text ="Submit", command = lambda: analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry))
submit.pack(side = BOTTOM)
if i < -1 or i > num_files:
again.pack()
else:
analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry)
while (i < -1 or i > num_files):
submit.update_idletasks()
entry.update_idletasks()
label.update_idletasks()
again.update_idletasks()
i = graphNumber.get()
def analyzeCombinedGraphs(root,appInfo,i,runningApps,submit,again,label,entry):
submit.destroy()
again.destroy()
label.destroy()
entry.destroy()
selection = StringVar()
choiceEntry = Entry(root, textvariable = selection)
choice = selection.get()
def callback(root,appInfo,i,choice,selection,choiceEntry,extension = ".txt",path = "CombinedTemps"):
loading = Label(root, text="Analyzing graph data, this may take a few minutes.")
loading.pack()
fileName = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
#check for connectedness
connected = isConnected(faceGraphToInts(faceGraph))
if connected == True:
vertexGraph = makeVertexGraph(faceGraph)
superGraph = Graph(faceGraph, vertexGraph)
structures = assignMatching(superGraph)
_findRequiredEdges(structures)
loading.destroy()
choiceEntry.pack()
typeSelection = Label(root, text="Would you like to view the graphs ranked by Fries or Clars?")
typeSelection.pack()
submit = Button(root, text ="Submit", command = lambda: userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry))
submit.pack(side = BOTTOM)
while True:
choice = selection.get()
flag = False
exit = False
if choice != 'fries' and choice != 'clars' and choice != "":
againSelection = Label(root, text="That file does not exist, please try again.")
againSelection.pack()
print "again"
flag = True
while choice != 'fries' and choice != 'clars':
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
againSelection.update_idletasks()
choice = selection.get()
if exit == True:
againSelection.destroy()
break
submit.update_idletasks()
choiceEntry.update_idletasks()
typeSelection.update_idletasks()
t = threading.Thread(target = lambda: callback(root,appInfo,i,choice,selection,choiceEntry))
t.setDaemon(True)
appInfo.setThreads(t)
t.start()
def userInputStructures(root,appInfo,structures,choice,submit,typeSelection,choiceEntry):
structureNumber = IntVar()
submit.destroy()
typeSelection.destroy()
choiceEntry.destroy()
def displayCombinedClarFries(structureNumber,structures,choice):
structures.sort()
if choice == 'clars':
Graph.comparison = 'clars'
elif choice == 'fries':
Graph.comparison = 'fries'
structures.reverse()
structures[structureNumber].displayGraph()
view = Label(root, text="There are " + str(len(structures)) + " distince Kekule structures avaiable. Which would you like to view?")
view.pack()
graphNumberEntry = Entry(root, textvariable = structureNumber)
graphNumberEntry.pack()
number = structureNumber.get()
submitGraph = Button(root, text ="Submit Structure", command = lambda: displayCombinedClarFries(number,structures,choice))
submitGraph.pack(side = BOTTOM)
def deleteB(button):
button.destroy()
reset = Button(root, text ="Quit", command = lambda: resetB(root,appInfo,submitGraph,graphNumberEntry,view))
reset.pack(side = BOTTOM)
def resetB(root,appInfo,submitGraph,graphNumberEntry,view):
deleteB(reset)
resetCombinedGraphs(root,appInfo,submitGraph,graphNumberEntry,view)
def addCombinationsPNG(graph,newGraph,superGraph,superGraphNumber,deletedCount):
new1 = graph.getFaceGraph()
new2 = newGraph.getFaceGraph()
vertexG1 = makeVertexGraph(new1)
vertexG2 = makeVertexGraph(new2)
g1 = Graph(new1,vertexG1)
g2 = Graph(new2,vertexG2)
firstStructures = assignMatching(g1)
secondStructures = assignMatching(g2)
_findRequiredEdges(firstStructures)
_findRequiredEdges(secondStructures)
Graph.comparison = 'clars'
firstStructures.sort()
secondStructures.sort()
if(isKekulean(g2) == True and isKekulean(g1) == True):
folderNameCG = "CombinedGraphs/superGraph" + str(superGraphNumber)
firstName = folderNameCG + "/Graph" + str(1) + ".png"
secondName = folderNameCG + "/Graph" + str(2) + ".png"
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
else:
directoryName = "CombinedDeleted"
if not os.path.exists(directoryName):
os.mkdir(directoryName)
folderName = "CombinedDeleted/superGraph" + str(superGraphNumber) + "_" + str(deletedCount)
if not os.path.exists(folderName):
os.mkdir(folderName)
f = superGraph.getFaceGraph()
v3 = makeVertexGraph(f)
g3 = Graph(f,v3)
superGraphStructure = assignMatching(g3)
fileName = folderName + "/superDeleted" + str(superGraphNumber) + ".png"
firstName = folderName + "/Graph" + str(1) + ".png"
secondName = folderName + "/Graph" + str(2) + ".png"
saveSinglePNG(superGraphStructure[0], fileName)
saveSinglePNG(firstStructures[0], firstName)
saveSinglePNG(secondStructures[0], secondName)
shutil.rmtree("CombinedGraphs/superGraph" + str(superGraphNumber))
superGraphNumber -= 1
deletedCount += 1
def removeCombinedDuplicates(path = "CombinedTemps",extension = ".txt"):
num_files = len([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
print num_files
num_files -= 7
print num_files
masterFaceGraph = []
for i in range(0,num_files):
filename = "/superGraph" + str(i) + extension
faceGraph = getInput(path + "/superGraph" + str(i) + extension)
masterFaceGraph.append(faceGraphToInts(faceGraph))
for f in range(0, len(masterFaceGraph)):
for k in range(f+1, len(masterFaceGraph)):
flag = True
for h in range(0,len(masterFaceGraph[f])):
a = masterFaceGraph[f][h]
b = masterFaceGraph[k][h]
if len(a) != len(b):
flag = False
break
for t in range(0,len(masterFaceGraph[f][h])):
c = a[t]
d = b[t]
if c != d:
flag = False
break
if flag == False:
break
if (flag == True):
masterFaceGraph.remove(masterFaceGraph[k])
shutil.rmtree("CombinedGraphs/superGraph" + str(k))
os.remove("CombinedTemps/superGraph" + str(k) + extension)
for i in range(k+1,num_files):
path1 = "CombinedGraphs"
path2 = "CombinedTemps"
oldFilename1 = path1 + "/superGraph" + str(i)
oldFilename2 = path2 + "/superGraph" + str(i) + extension
os.rename(oldFilename1 + "/superGraph" + str(i) + ".png", oldFilename1 + "/superGraph" + str(i-1) + ".png")
os.rename(oldFilename1, path1 + "/superGraph" + str(i-1))
os.rename(oldFilename2, path2 + "/superGraph" + str(i-1) + extension)
num_files -= 1 | gpl-2.0 | -2,623,389,696,733,337,600 | 25.149303 | 232 | 0.68454 | false |
metacloud/python-novaclient | novaclient/v1_1/aggregates.py | 15 | 3503 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Aggregate interface."""
from novaclient import base
class Aggregate(base.Resource):
"""An aggregates is a collection of compute hosts."""
def __repr__(self):
return "<Aggregate: %s>" % self.id
def update(self, values):
"""Update the name and/or availability zone."""
return self.manager.update(self, values)
def add_host(self, host):
return self.manager.add_host(self, host)
def remove_host(self, host):
return self.manager.remove_host(self, host)
def set_metadata(self, metadata):
return self.manager.set_metadata(self, metadata)
def delete(self):
self.manager.delete(self)
class AggregateManager(base.ManagerWithFind):
resource_class = Aggregate
def list(self):
"""Get a list of os-aggregates."""
return self._list('/os-aggregates', 'aggregates')
def create(self, name, availability_zone):
"""Create a new aggregate."""
body = {'aggregate': {'name': name,
'availability_zone': availability_zone}}
return self._create('/os-aggregates', body, 'aggregate')
def get(self, aggregate):
"""Get details of the specified aggregate."""
return self._get('/os-aggregates/%s' % (base.getid(aggregate)),
"aggregate")
# NOTE:(dtroyer): utils.find_resource() uses manager.get() but we need to
# keep the API backward compatible
def get_details(self, aggregate):
"""Get details of the specified aggregate."""
return self.get(aggregate)
def update(self, aggregate, values):
"""Update the name and/or availability zone."""
body = {'aggregate': values}
return self._update("/os-aggregates/%s" % base.getid(aggregate),
body,
"aggregate")
def add_host(self, aggregate, host):
"""Add a host into the Host Aggregate."""
body = {'add_host': {'host': host}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def remove_host(self, aggregate, host):
"""Remove a host from the Host Aggregate."""
body = {'remove_host': {'host': host}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def set_metadata(self, aggregate, metadata):
"""Set a aggregate metadata, replacing the existing metadata."""
body = {'set_metadata': {'metadata': metadata}}
return self._create("/os-aggregates/%s/action" % base.getid(aggregate),
body, "aggregate")
def delete(self, aggregate):
"""Delete the specified aggregates."""
self._delete('/os-aggregates/%s' % (base.getid(aggregate)))
| apache-2.0 | 785,053,637,061,543,800 | 35.873684 | 79 | 0.611761 | false |
StratusLab/client | api/code/src/main/python/stratuslab/HttpClient.py | 1 | 9858 | #
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import stat
import base64
import mimetools
import mimetypes
import httplib2
import ssl
from httplib2 import httplib
from time import gmtime, strftime
from stratuslab import Util
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.Exceptions import ServerException
from stratuslab.Exceptions import ClientException
from stratuslab.Exceptions import NetworkException
class HttpClient(object):
ENV_HTTP_PROXY = 'http_proxy'
ENV_NO_PROXY = 'no_proxy'
@staticmethod
def getHttpProxyForUrl(url):
proxy = None
url_host = Util.parseUri(url)[1]
envProxy = HttpClient._getEnvVarProxy()
if envProxy and not (url_host in HttpClient._getEnvVarNoProxy()):
proxy_server, proxy_port = Util.parseUri(envProxy)[1:3]
proxy = httplib2.ProxyInfo(3, proxy_server, int(proxy_port),
proxy_rdns=True)
return proxy
@staticmethod
def _getEnvVarProxy():
return os.environ.get(HttpClient.ENV_HTTP_PROXY)
@staticmethod
def _getEnvVarNoProxy():
return os.environ.get(HttpClient.ENV_NO_PROXY)
def __init__(self, configHolder=ConfigHolder()):
self.verboseLevel = None
self.configHolder = configHolder
self.crendentials = {}
self.certificates = {}
self.handleResponse = True
self.useHttpCache = False
configHolder.assign(self)
def get(self, url, accept='application/xml'):
return self._httpCall(url, 'GET', accept=accept)
def post(self, url, body=None, contentType='application/xml', accept='application/xml'):
return self._httpCall(url, 'POST', body, contentType, accept, retry=False)
def post_multipart(self, url, files=[], params=[], accept='application/xml'):
boundary, body = self._multipart_encode(files, params)
contentType = 'multipart/form-data; boundary=%s' % boundary
return self.post(url, body, contentType=contentType, accept=accept)
def _multipart_encode(self, files, params):
"files - list of (<attribute name>, <file descriptor>) tuples"
"params - list of (<attribute name>, <value>) tuples"
boundary = mimetools.choose_boundary()
body = ''
for(key, value) in params:
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data; name="%s"' % key
body += '\r\n\r\n' + value + '\r\n'
for(key, fh) in files:
file_size = os.fstat(fh.fileno())[stat.ST_SIZE]
filename = fh.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
body += 'Content-Type: %s\r\n' % contenttype
body += 'Content-Length: %s\r\n' % file_size
fh.seek(0)
body += '\r\n' + fh.read() + '\r\n'
fh.close()
body += '--%s--\r\n\r\n' % boundary
return boundary, body
def put(self, url, body=None, contentType='application/xml', accept='application/xml'):
return self._httpCall(url, 'PUT', body, contentType, accept)
def delete(self, url, body=None, contentType='application/x-www-form-urlencoded', accept='application/xml'):
return self._httpCall(url, 'DELETE', body, contentType, accept)
def head(self, url):
return self._httpCall(url, 'HEAD')
def addCredentials(self, username, password):
self.crendentials[username] = password
def addCertificate(self, key, cert):
self.certificates[key] = cert
def setHandleResponse(self, handle):
self.handleResponse = handle
def _addCredentials(self, http):
for u, p in self.crendentials.items():
http.add_credentials(u, p)
def _addCredentialsToHeader(self, headers):
if self.crendentials:
u, p = self.crendentials.items()[0]
headers['authorization'] = 'Basic ' + base64.b64encode('%s:%s' % (u, p))
return headers
def _addCertificate(self, http):
for u, p in self.certificates.items():
http.add_certificate(u, p, '')
def _printDetail(self, message):
Util.printDetail(message, self.verboseLevel, Util.VERBOSE_LEVEL_DETAILED)
# This is a bandaid for problems associated with dropped SSL handshakes. The
# root cause of these problems needs to be found and fixed.
def _retryHttpRequestOnSSLError(self, httpObject, url, method, body, headers):
maxRetries = 3
retries = 0
lastException = None
while retries < maxRetries:
try:
if len(headers):
return httpObject.request(url, method, body, headers=headers)
else:
return httpObject.request(url, method, body)
except ssl.SSLError as e:
t = strftime("%Y-%m-%d %H:%M:%S", gmtime())
self._printDetail('SSL ERROR ENCOUNTERED (%s): %s' % (t, str(e)))
lastException = e
retries += 1
except httplib2.ssl_SSLError as e:
t = strftime("%Y-%m-%d %H:%M:%S", gmtime())
self._printDetail('SSL ERROR ENCOUNTERED (%s): %s' % (t, str(e)))
lastException = e
retries += 1
raise lastException
def _httpCall(self, url, method, body=None, contentType='application/xml', accept='application/xml', retry=True):
def _convertContent(content):
size = len(content)
if size > 2048:
return '<content too large; %d bytes>' % size
try:
return unicode(content, 'utf-8')
except:
return '<non-text content>'
def _getErrorMessageFromJsonContent(content):
try:
return json.loads(content)['message']
except:
return ''
def _handle3xx(resp):
if resp.status == 302:
# Redirected
resp, content = self._httpCall(resp['location'], method, body, accept)
else:
raise Exception('Should have been handled by httplib2!! ' + str(resp.status) + ": " + resp.reason)
return resp, content
def _handle4xx(resp):
error_message = _getErrorMessageFromJsonContent(content)
raise ClientException('Failed calling method %s on url %s, with reason: %s. Error: %s' %
(method, url, str(resp.status) + ": " + resp.reason, error_message),
content=content,
status=str(resp.status))
def _handle5xx(resp):
if retry:
return self._httpCall(url, method, body, contentType, accept, False)
raise ServerException('Failed calling method %s on url %s, with reason: %s' %
(method, url, str(resp.status) + ": " + resp.reason),
status=str(resp.status))
def _handleResponse(resp, content):
self._printDetail('Received response: %s' % resp + \
'\nwith content:\n %s' % \
_convertContent(content))
if str(resp.status).startswith('2'):
return resp, content
if str(resp.status).startswith('3'):
resp, content = _handle3xx(resp)
if str(resp.status).startswith('4'):
resp, content = _handle4xx(resp)
if str(resp.status).startswith('5'):
resp, content = _handle5xx(resp)
proxy = self.getHttpProxyForUrl(url)
if Util.isTrueConfVal(self.useHttpCache):
h = httplib2.Http(".cache", proxy_info=proxy)
else:
h = httplib2.Http(proxy_info=proxy)
h.force_exception_to_status_code = False
h.disable_ssl_certificate_validation=True
self._printDetail('Contacting the server with %s, at: %s' % (method, url))
headers = {}
if contentType:
headers['Content-Type'] = contentType
if accept:
headers['Accept'] = accept
# See https://github.com/StratusLab/client/issues/8
if method == 'POST':
self._addCredentialsToHeader(headers)
self._addCredentials(h)
self._addCertificate(h)
try:
resp, content = self._retryHttpRequestOnSSLError(h, url, method, body, headers)
except httplib.BadStatusLine:
raise NetworkException('BadStatusLine when contacting ' + url)
except AttributeError:
raise NetworkException('Cannot contact ' + url)
if self.handleResponse:
try:
_handleResponse(resp, content)
except ClientException, ex:
ex.mediaType = headers['Accept']
raise
return resp, content
| apache-2.0 | 6,699,662,071,350,455,000 | 37.964427 | 117 | 0.589268 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/lib2to3/fixes/fix_dict.py | 24 | 3811 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | {"iter"}
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| gpl-2.0 | 1,508,504,386,872,051,500 | 34.616822 | 78 | 0.562582 | false |
frinksdev/mini4chan | konnichiwa/settings.py | 1 | 2675 | """
Django settings for konnichiwa project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tmm&bc#^@y6c^^_s&v+v7*(n^7)h8qr+(^19f#ntjx$q$mp#y_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'konnichiwa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'konnichiwa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'klezmer'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-mx'
TIME_ZONE = 'America/Mexico_City'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit | -7,391,342,945,598,178,000 | 24.970874 | 71 | 0.689346 | false |
dianshen/python_day | day7/a/getfile.py | 1 | 1625 | #!/usr/bin/env python3
__author__ = 'DSOWASP'
import socket
import os
import hashlib
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.connect(ip_port)
while True:
cmd_input = input("cmd> ")
# print("发送:%s"%cmd)
cmd,fname = cmd_input.split()
if os.path.exists(fname):
ftell = os.path.getsize(fname)
else:
ftell = 0
if cmd == "q":
break
# elif
try:
sk.send(bytes("get %s %s"%(fname,ftell),"utf-8"))
r_data = sk.recv(1024).decode()
# print(r_data)
status,fsize = r_data.split()
fsize = int(fsize)
print(r_data)
if status == "210":
if ftell == 0:
f = open(fname,"wb")
content = sk.recv(1024)
f.write(content)
else:
recv_size = ftell
f = open(fname,'rb')
fmd5 = hashlib.md5()
text = f.read()
fmd5.update(text)
f.close()
f = open(fname,"ab")
while recv_size < fsize:
content = sk.recv(100)
f.write(content)
recv_size += len(content)
fmd5.update(content)
r_fmd5 = sk.recv(1024).decode()
print(r_fmd5)
f_md5 = r_fmd5.split()[2]
if f_md5 == fmd5.hexdigest():
print("文件传输正确!")
f.close()
except ConnectionResetError:
print("server break the connect")
break
# print("recv:%s"%raw_data.decode()) | apache-2.0 | -9,071,279,650,714,440,000 | 24.52381 | 57 | 0.453018 | false |
Insoleet/aiohttp | aiohttp/web_exceptions.py | 12 | 6892 | from .web_reqrep import Response
__all__ = (
'HTTPException',
'HTTPError',
'HTTPRedirection',
'HTTPSuccessful',
'HTTPOk',
'HTTPCreated',
'HTTPAccepted',
'HTTPNonAuthoritativeInformation',
'HTTPNoContent',
'HTTPResetContent',
'HTTPPartialContent',
'HTTPMultipleChoices',
'HTTPMovedPermanently',
'HTTPFound',
'HTTPSeeOther',
'HTTPNotModified',
'HTTPUseProxy',
'HTTPTemporaryRedirect',
'HTTPClientError',
'HTTPBadRequest',
'HTTPUnauthorized',
'HTTPPaymentRequired',
'HTTPForbidden',
'HTTPNotFound',
'HTTPMethodNotAllowed',
'HTTPNotAcceptable',
'HTTPProxyAuthenticationRequired',
'HTTPRequestTimeout',
'HTTPConflict',
'HTTPGone',
'HTTPLengthRequired',
'HTTPPreconditionFailed',
'HTTPRequestEntityTooLarge',
'HTTPRequestURITooLong',
'HTTPUnsupportedMediaType',
'HTTPRequestRangeNotSatisfiable',
'HTTPExpectationFailed',
'HTTPServerError',
'HTTPInternalServerError',
'HTTPNotImplemented',
'HTTPBadGateway',
'HTTPServiceUnavailable',
'HTTPGatewayTimeout',
'HTTPVersionNotSupported',
)
############################################################
# HTTP Exceptions
############################################################
class HTTPException(Response, Exception):
# You should set in subclasses:
# status = 200
status_code = None
empty_body = False
def __init__(self, *, headers=None, reason=None,
body=None, text=None, content_type=None):
Response.__init__(self, status=self.status_code,
headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
Exception.__init__(self, self.reason)
if self.body is None and not self.empty_body:
self.text = "{}: {}".format(self.status, self.reason)
class HTTPError(HTTPException):
"""Base class for exceptions with status codes in the 400s and 500s."""
class HTTPRedirection(HTTPException):
"""Base class for exceptions with status codes in the 300s."""
class HTTPSuccessful(HTTPException):
"""Base class for exceptions with status codes in the 200s."""
class HTTPOk(HTTPSuccessful):
status_code = 200
class HTTPCreated(HTTPSuccessful):
status_code = 201
class HTTPAccepted(HTTPSuccessful):
status_code = 202
class HTTPNonAuthoritativeInformation(HTTPSuccessful):
status_code = 203
class HTTPNoContent(HTTPSuccessful):
status_code = 204
empty_body = True
class HTTPResetContent(HTTPSuccessful):
status_code = 205
empty_body = True
class HTTPPartialContent(HTTPSuccessful):
status_code = 206
############################################################
# 3xx redirection
############################################################
class _HTTPMove(HTTPRedirection):
def __init__(self, location, *, headers=None, reason=None,
body=None, text=None, content_type=None):
if not location:
raise ValueError("HTTP redirects need a location to redirect to.")
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Location'] = location
self.location = location
class HTTPMultipleChoices(_HTTPMove):
status_code = 300
class HTTPMovedPermanently(_HTTPMove):
status_code = 301
class HTTPFound(_HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(_HTTPMove):
status_code = 303
class HTTPNotModified(HTTPRedirection):
# FIXME: this should include a date or etag header
status_code = 304
empty_body = True
class HTTPUseProxy(_HTTPMove):
# Not a move, but looks a little like one
status_code = 305
class HTTPTemporaryRedirect(_HTTPMove):
status_code = 307
############################################################
# 4xx client error
############################################################
class HTTPClientError(HTTPError):
pass
class HTTPBadRequest(HTTPClientError):
status_code = 400
class HTTPUnauthorized(HTTPClientError):
status_code = 401
class HTTPPaymentRequired(HTTPClientError):
status_code = 402
class HTTPForbidden(HTTPClientError):
status_code = 403
class HTTPNotFound(HTTPClientError):
status_code = 404
class HTTPMethodNotAllowed(HTTPClientError):
status_code = 405
def __init__(self, method, allowed_methods, *, headers=None, reason=None,
body=None, text=None, content_type=None):
allow = ','.join(sorted(allowed_methods))
super().__init__(headers=headers, reason=reason,
body=body, text=text, content_type=content_type)
self.headers['Allow'] = allow
self.allowed_methods = allowed_methods
self.method = method.upper()
class HTTPNotAcceptable(HTTPClientError):
status_code = 406
class HTTPProxyAuthenticationRequired(HTTPClientError):
status_code = 407
class HTTPRequestTimeout(HTTPClientError):
status_code = 408
class HTTPConflict(HTTPClientError):
status_code = 409
class HTTPGone(HTTPClientError):
status_code = 410
class HTTPLengthRequired(HTTPClientError):
status_code = 411
class HTTPPreconditionFailed(HTTPClientError):
status_code = 412
class HTTPRequestEntityTooLarge(HTTPClientError):
status_code = 413
class HTTPRequestURITooLong(HTTPClientError):
status_code = 414
class HTTPUnsupportedMediaType(HTTPClientError):
status_code = 415
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
status_code = 416
class HTTPExpectationFailed(HTTPClientError):
status_code = 417
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
pass
class HTTPInternalServerError(HTTPServerError):
status_code = 500
class HTTPNotImplemented(HTTPServerError):
status_code = 501
class HTTPBadGateway(HTTPServerError):
status_code = 502
class HTTPServiceUnavailable(HTTPServerError):
status_code = 503
class HTTPGatewayTimeout(HTTPServerError):
status_code = 504
class HTTPVersionNotSupported(HTTPServerError):
status_code = 505
| apache-2.0 | 6,278,832,781,174,916,000 | 22.522184 | 78 | 0.648578 | false |
jurajmajor/ltl3tela | Experiments/ltlcross_runner.py | 1 | 23078 | # -*- coding: utf-8 -*-
import subprocess
import sys
import os.path
import re
import math
import spot
from IPython.display import SVG
from datetime import datetime
import pandas as pd
from experiments_lib import hoa_to_spot, dot_to_svg, pretty_print
def bogus_to_lcr(form):
"""Converts a formula as it is printed in ``_bogus.ltl`` file
(uses ``--relabel=abc``) to use ``pnn`` AP names.
"""
args = ['-r0','--relabel=pnn','-f',form]
return subprocess.check_output(["ltlfilt"] + args, universal_newlines=True).strip()
def parse_check_log(log_f):
"""Parses a given log file and locates cases where
sanity checks found some error.
Returns:
bugs: a dict: ``form_id``->``list of error lines``
bogus_forms: a dict: ``form_id``->``form``
tools: a dict: ``tool_id``->``command``
"""
log = open(log_f,'r')
bugs = {}
bogus_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
problem = re.compile('error: .* nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = []
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
bugs[form_id] = f_bugs
bogus_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
f_bugs.append(m_prob.group(0))
log.close()
tools = parse_log_tools(log_f)
return bugs, bogus_forms, tools
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output
def hunt_error_types(log_f):
log = open(log_f,'r')
errors = {}
err_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
problem = re.compile('error: .*')
nonempty = re.compile('error: (.*) is nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = {}
m_tool = tool.match(line)
if m_tool:
tid = m_tool.group(1)
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
errors[form_id] = f_bugs
err_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
prob = m_prob.group(0)
m_bug = nonempty.match(line)
if m_bug:
prob = 'nonempty'
tid = m_bug.group(1)
if prob not in f_bugs:
f_bugs[prob] = []
f_bugs[prob].append(tid)
log.close()
tools = parse_log_tools(log_f)
return errors, err_forms, tools
def parse_log_tools(log_f):
log = open(log_f,'r')
tools = {}
tool = re.compile('.*\[(P\d+)\]: (.*)$')
empty_line = re.compile('^\s$')
for line in log:
m_tool = tool.match(line)
m_empty = empty_line.match(line)
if m_empty:
break
if m_tool:
tid = m_tool.group(1)
tcmd = m_tool.group(2)
tools[tid] = tcmd
log.close()
return tools
class LtlcrossRunner(object):
"""A class for running Spot's `ltlcross` and storing and manipulating
its results. For LTL3HOA it can also draw very weak alternating automata
(VWAA).
Parameters
----------
tools : a dict (String -> String)
The records in the dict of the form ``name : ltlcross_cmd``
>>> tools = {"LTL3HOA" : "ltl3hoa -d -x -i -p 2 -f %f > %O",
>>> "SPOT": : "ltl2tgba"
>>> }
formula_files : a list of strings
paths to files with formulas to be fed to `ltlcross`
res_filename : String
filename to store the ltlcross`s results
cols : list of Strings, default ``['states','edges','transitions']``
names of ltlcross's statistics columns to be recorded
"""
def __init__(self, tools,
formula_files=['formulae/classic.ltl'],
res_filename='na_comp.csv',
cols=['states', 'edges', 'transitions'],
log_file=None,
):
self.tools = tools
self.mins = []
self.f_files = formula_files
self.cols = cols.copy()
self.automata = None
self.values = None
self.form = None
if res_filename == '' or res_filename is None:
self.res_file = '_'.join(tools.keys()) + '.csv'
else:
self.res_file = res_filename
if log_file is None:
self.log_file = self.res_file[:-3] + 'log'
else:
self.log_file = log_file
def create_args(self, automata=True, check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms = True, escape_tools=False):
"""Creates args that are passed to run_ltlcross
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
### Prepare ltlcross command ###
tools_strs = ["{"+name+"}" + cmd for (name, cmd) in self.tools.items() if name in tool_subset]
if escape_tools:
tools_strs = ["'{}'".format(t_str) for t_str in tools_strs]
args = tools_strs
if forms:
args += ' '.join(['-F '+F for F in self.f_files]).split()
if timeout:
args.append('--timeout='+timeout)
if automata:
args.append('--automata')
if save_bogus:
args.append('--save-bogus={}_bogus.ltl'.format(res_file[:-4]))
if not check:
args.append('--no-checks')
#else:
# args.append('--reference={ref_Spot}ltl2tgba -H %f')
args.append('--products=0')
args.append('--csv='+res_file)
return args
def ltlcross_cmd(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms=True, lcr='ltlcross'):
"""Returns ltlcross command for the parameters.
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset, forms,
escape_tools=True)
return ' '.join([lcr] + args)
def run_ltlcross(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
lcr='ltlcross'):
"""Removes any older version of ``self.res_file`` and runs `ltlcross`
on all tools.
Parameters
----------
args : a list of ltlcross arguments that can be used for subprocess
tool_subset : a list of names from self.tools
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset)
# Delete ltlcross result and lof files
subprocess.call(["rm", "-f", res_file, log_file])
## Run ltlcross ##
log = open(log_file,'w')
cmd = self.ltlcross_cmd(args,lcr=lcr)
print(cmd, file=log)
print(datetime.now().strftime('[%d.%m.%Y %T]'), file=log)
print('=====================', file=log,flush=True)
self.returncode = subprocess.call([lcr] + args, stderr=subprocess.STDOUT, stdout=log)
log.writelines([str(self.returncode)+'\n'])
log.close()
def parse_results(self, res_file=None):
"""Parses the ``self.res_file`` and sets the values, automata, and
form. If there are no results yet, it runs ltlcross before.
"""
if res_file is None:
res_file = self.res_file
if not os.path.isfile(res_file):
raise FileNotFoundError(res_file)
res = pd.read_csv(res_file)
# Add incorrect columns to track flawed automata
if not 'incorrect' in res.columns:
res['incorrect'] = False
# Removes unnecessary parenthesis from formulas
res.formula = res['formula'].map(pretty_print)
form = pd.DataFrame(res.formula.drop_duplicates())
form['form_id'] = range(len(form))
form.index = form.form_id
res = form.merge(res)
# Shape the table
table = res.set_index(['form_id', 'formula', 'tool'])
table = table.unstack(2)
table.axes[1].set_names(['column','tool'],inplace=True)
# Create separate tables for automata
automata = None
if 'automaton' in table.columns.levels[0]:
automata = table[['automaton']]
# Removes formula column from the index
automata.index = automata.index.levels[0]
# Removes `automata` from column names -- flatten the index
automata.columns = automata.columns.levels[1]
form = form.set_index(['form_id', 'formula'])
# Store incorrect and exit_status information separately
self.incorrect = table[['incorrect']]
self.incorrect.columns = self.incorrect.columns.droplevel()
self.exit_status = table[['exit_status']]
self.exit_status.columns = self.exit_status.columns.droplevel()
# stores the followed columns only
values = table[self.cols]
self.form = form
self.values = values.sort_index(axis=1,level=['column','tool'])
# self.compute_best("Minimum")
if automata is not None:
self.automata = automata
def compute_sbacc(self,col='states'):
def get_sbacc(aut):
if isinstance(aut, float) and math.isnan(aut):
return None
a = spot.automata(aut+'\n')
aut = next(a)
aut = spot.sbacc(aut)
if col == 'states':
return aut.num_states()
if col == 'acc':
return aut.num_sets()
df = self.automata.copy()
# Recreate the same index as for other cols
n_i = [(l, self.form_of_id(l,False)) for l in df.index]
df.index = pd.MultiIndex.from_tuples(n_i)
df.index.names=['form_id','formula']
# Recreate the same columns hierarchy
df = df.T
df['column'] = 'sb_{}'.format(col)
self.cols.append('sb_{}'.format(col))
df = df.set_index(['column'],append=True)
df = df.T.swaplevel(axis=1)
# Compute the requested values and add them to others
df = df.applymap(get_sbacc)
self.values = self.values.join(df)
def compute_best(self, tools=None, colname="Minimum"):
"""Computes minimum values over tools in ``tools`` for all
formulas and stores them in column ``colname``.
Parameters
----------
tools : list of Strings
column names that are used to compute the min over
colname : String
name of column used to store the computed values
"""
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if t in self.tools.keys()
or t in self.mins]
self.mins.append(colname)
for col in self.cols:
self.values[col, colname] = self.values[col][tools].min(axis=1)
self.values.sort_index(axis=1, level=0, inplace=True)
def aut_for_id(self, form_id, tool):
"""For given formula id and tool it returns the corresponding
non-deterministic automaton as a Spot's object.
Parameters
----------
form_id : int
id of formula to use
tool : String
name of the tool to use to produce the automaton
"""
if self.automata is None:
raise AssertionError("No results parsed yet")
if tool not in self.tools.keys():
raise ValueError(tool)
return hoa_to_spot(self.automata.loc[form_id, tool])
def cummulative(self, col="states"):
"""Returns table with cummulative numbers of given ``col``.
Parameters
---------
col : String
One of the followed columns (``states`` default)
"""
return self.values[col].dropna().sum()
def smaller_than(self, t1, t2, reverse=False,
restrict=True,
col='states', restrict_cols=True):
"""Returns a dataframe with results where ``col`` for ``tool1``
has strictly smaller value than ``col`` for ``tool2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
reverse : Boolean, default ``False``
if ``True``, it switches ``tool1`` and ``tool2``
restrict : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
col : String, default ``'states'``
name of column use for comparison.
restrict_cols : Boolean, default ``True``
if ``True``, show only the compared column
"""
return self.better_than(t1,t2,reverse=reverse,
props=[col],include_fails=False,
restrict_cols=restrict_cols,
restrict_tools=restrict)
def better_than(self, t1, t2, props=['states','acc'],
reverse=False, include_fails=True,
restrict_cols=True,restrict_tools=True
):
"""Compares ``t1`` against ``t2`` lexicographicaly
on cols from ``props`` and returns DataFrame with
results where ``t1`` is better than ``t2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
props : list of Strings, default (['states','acc'])
list of columns on which we want the comparison (in order)
reverse : Boolean, default ``False``
if ``True``, it switches ``t1`` and ``t2``
include_fails : Boolean, default ``True``
if ``True``, include formulae where t2 fails and t1 does not
fail
restrict_cols : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
property columns
restrict_tools : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
"""
if t1 not in list(self.tools.keys())+self.mins:
raise ValueError(t1)
if t2 not in list(self.tools.keys())+self.mins:
raise ValueError(t2)
if reverse:
t1, t2 = t2, t1
v = self.values
t1_ok = self.exit_status[t1] == 'ok'
if include_fails:
t2_ok = self.exit_status[t2] == 'ok'
# non-fail beats fail
c = v[t1_ok & ~t2_ok]
# We work on non-failures only from now on
eq = t1_ok & t2_ok
else:
c = pd.DataFrame()
eq = t1_ok
for prop in props:
# For each prop we add t1 < t2
better = v[prop][t1] < v[prop][t2]
# but only from those which were equivalent so far
equiv_and_better = v.loc[better & eq]
c = c.append(equiv_and_better)
# And now choose those equivalent also on prop to eq
eq = eq & (v[prop][t1] == v[prop][t2])
# format the output
idx = pd.IndexSlice
tools = [t1,t2] if restrict_tools else slice(None)
props = props if restrict_cols else slice(None)
return c.loc[:,idx[props,tools]]
def form_of_id(self, form_id, spot_obj=True):
"""For given form_id returns the formula
Parameters
----------
form_id : int
id of formula to return
spot_obj : Bool
If ``True``, returns Spot formula object (uses Latex to
print the formula in Jupyter notebooks)
"""
f = self.values.index[form_id][1]
if spot_obj:
return spot.formula(f)
return f
def id_of_form(self, f, convert=False):
"""Returns id of a given formula. If ``convert`` is ``True``
it also calls ``bogus_to_lcr`` first.
"""
if convert:
f = bogus_to_lcr(f)
ni = self.values.index.droplevel(0)
return ni.get_loc(f)
def mark_incorrect(self, form_id, tool,output_file=None,input_file=None):
"""Marks automaton given by the formula id and tool as flawed
and writes it into the .csv file
"""
if tool not in self.tools.keys():
raise ValueError(tool)
# Put changes into the .csv file
if output_file is None:
output_file = self.res_file
if input_file is None:
input_file = self.res_file
csv = pd.read_csv(input_file)
if not 'incorrect' in csv.columns:
csv['incorrect'] = False
cond = (csv['formula'].map(pretty_print) ==
pretty_print(self.form_of_id(form_id,False))) &\
(csv.tool == tool)
csv.loc[cond,'incorrect'] = True
csv.to_csv(output_file,index=False)
# Mark the information into self.incorrect
self.incorrect.loc[self.index_for(form_id)][tool] = True
def na_incorrect(self):
"""Marks values for flawed automata as N/A. This causes
that the touched formulae will be removed from cummulative
etc. if computed again. To reverse this information you
have to parse the results again.
It also sets ``exit_status`` to ``incorrect``
"""
self.values = self.values[~self.incorrect]
self.exit_status[self.incorrect] = 'incorrect'
def index_for(self, form_id):
return (form_id,self.form_of_id(form_id,False))
def get_error_count(self,err_type='timeout',drop_zeros=True):
"""Returns a Series with total number of er_type errors for
each tool.
Parameters
----------
err_type : String one of `timeout`, `parse error`,
`incorrect`, `crash`, or
'no output'
Type of error we seek
drop_zeros: Boolean (default True)
If true, rows with zeros are removed
"""
if err_type not in ['timeout', 'parse error',
'incorrect', 'crash',
'no output']:
raise ValueError(err_type)
if err_type == 'crash':
c1 = self.exit_status == 'exit code'
c2 = self.exit_status == 'signal'
res = (c1 | c2).sum()
else:
res = (self.exit_status == err_type).sum()
if drop_zeros:
return res.iloc[res.to_numpy().nonzero()]
return res
def cross_compare(self,tools=None,props=['states','acc'],
include_fails=True, total=True,
include_other=True):
def count_better(tool1,tool2):
if tool1 == tool2:
return float('nan')
try:
return len(self.better_than(tool1,tool2,props,
include_fails=include_fails))
except ValueError as e:
if include_other:
return float('nan')
else:
raise e
if tools is None:
tools = self.tools.keys()
c = pd.DataFrame(index=tools, columns=tools).fillna(0)
for tool in tools:
c[tool] = pd.DataFrame(c[tool]).apply(lambda x:
count_better(x.name,tool), 1)
if total:
c['V'] = c.sum(axis=1)
return c
def min_counts(self, tools=None, restrict_tools=False, unique_only=False, col='states',min_name='min(count)'):
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if
t in self.tools.keys() or
t in self.mins]
min_tools = tools if restrict_tools else list(self.tools.keys())
self.compute_best(tools=min_tools, colname=min_name)
s = self.values.loc(axis=1)[col]
df = s.loc(axis=1)[tools+[min_name]]
is_min = lambda x: x[x == x[min_name]]
best_t_count = df.apply(is_min, axis=1).count(axis=1)
choose = (df[best_t_count == 2]) if unique_only else df
choose = choose.index
min_counts = df.loc[choose].apply(is_min,axis=1).count()
return pd.DataFrame(min_counts[min_counts.index != min_name])
def param_runner(name, tools, data_dir='data_param'):
cols=["states","transitions","acc","time","nondet_states"]
r = LtlcrossRunner(tools,\
res_filename='{}/{}.csv'.format(data_dir,name),\
formula_files=['formulae/{}.ltl'.format(name)],\
cols=cols)
return r
| gpl-3.0 | 497,926,085,071,383,360 | 35.515823 | 114 | 0.533928 | false |
gbowerman/azurerm | azurerm/resourcegroups.py | 1 | 4056 | '''resourcegroups.py - azurerm functions for Resource Groups.'''
import json
from .restfns import do_delete, do_get, do_post, do_put
from .settings import get_rm_endpoint, RESOURCE_API
def create_resource_group(access_token, subscription_id, rgname, location):
'''Create a resource group in the specified location.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
rg_body = {'location': location}
body = json.dumps(rg_body)
return do_put(endpoint, body, access_token)
def delete_resource_group(access_token, subscription_id, rgname):
'''Delete the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'?api-version=', RESOURCE_API])
return do_delete(endpoint, access_token)
def export_template(access_token, subscription_id, rgname):
'''Capture the specified resource group as a template
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/exportTemplate',
'?api-version=', RESOURCE_API])
rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']}
body = json.dumps(rg_body)
return do_post(endpoint, body, access_token)
def get_resource_group(access_token, subscription_id, rgname):
'''Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
def get_resource_group_resources(access_token, subscription_id, rgname):
'''Get the resources in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/resources?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
def list_resource_groups(access_token, subscription_id):
'''List the resource groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/',
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token)
| mit | 1,837,027,315,200,992,000 | 33.666667 | 75 | 0.592949 | false |
samesense/tools | py/uniprotTxt.py | 1 | 5230 | """Getting uniprot txt files for proteins
parsing them
loading parsed data."""
import os, time, sys
import files
from collections import defaultdict
def parseProteinName(protein):
with open(mkTxtFile(protein)) as f:
for line in f:
# DE RecName: Full=Fatty acid synthase;
if line[0:2] == 'DE' and 'RecName' in line:
name = line.split(':')[1].split(';')[0].split('=')[1]
return name
elif line[0:2] == 'DE' and 'SubName' in line:
name = line.split(':')[1].split(';')[0].split('=')[1]
return name
elif line[0:2] == 'GE' and 'Name' in line:
name = line.split('Name=')[1].split(';')[0]
return name
def dumpProteinNames(proteins, nameFile):
""" '../working/neal/proteins.names' """
with open(nameFile, 'w') as fout:
for protein in proteins:
print >> fout, '\t'.join((protein, parseProteinName(protein)))
def loadProteinNames(nameFile):
protein2name = {}
with open(nameFile) as f:
for line in f:
protein, name = line.strip('\n').split('\t')
protein2name[protein] = name
return protein2name
def mkTxtFile(proteinID):
txtFile = files.dataDir + '/uniprot/txt/' + proteinID + '.txt'
return txtFile
def mkProteinToProsite(proteins):
protein2prosite = defaultdict(dict)
for protein in proteins:
prosites = parseProsite(protein)
for prosite in prosites:
protein2prosite[protein][prosite] = prosites[prosite]
return protein2prosite
def parseProsite(protein):
prosites = {}
with open(mkTxtFile(protein)) as f:
for line in f:
# DR PROSITE; PS51257; PROKAR_LIPOPROTEIN; 1.
# DR GO; GO:0046872; F:metal ion binding; IEA:UniProtKB-KW.
if ('GO;' in line or 'PROSITE' in line or 'Pfam' in line or 'SMART' in line) and line[0:2] == 'DR':
sp = [x.strip() for x in line.strip('\n').split(';')]
prositeID, prositeName = sp[1:3]
prosites[prositeID] = prositeName
elif line[0:2] == 'KW':
# KW NAD; NADP; Oxidoreductase; Phosphopantetheine; Phosphoprotein;
sp = [x.strip().strip('.') for x in line.strip('\n').split(';')]
for prosite in sp[1:]:
if prosite:
prosites['KW:'+prosite] = 'KW:' + prosite
return prosites
def download(proteinID):
txtFile = mkTxtFile(proteinID)
if not os.path.exists(txtFile):
os.system('wget "http://www.uniprot.org/uniprot/%s.txt" -O %s' % (proteinID, txtFile))
time.sleep(2)
def getProteins():
proteins = {}
with open('../working/neal/proteins.xls') as f:
for line in f:
mod, proteinLs = line.strip('\n').split('\t')
for p in proteinLs.split(';'):
proteins[p] = True
return proteins
def updateTxtFiles():
proteins = getProteins()
for p in proteins:
download(p)
def dumpProsite(proteins, prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = mkProteinToProsite(proteins)
with open(prositeFile, 'w') as fout:
for protein in protein2prosite:
for prosite in protein2prosite[protein]:
print >> fout, '\t'.join( (protein, prosite, protein2prosite[protein][prosite]) )
def loadProteinToProsite(prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = defaultdict(dict)
with open(prositeFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
protein2prosite[protein][prositeID] = prositeName
return protein2prosite
def loadProteinToGOCC(prositeFile):
""" '../working/neal/proteins.prosite' """
protein2prosite = defaultdict(dict)
with open(prositeFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
if 'C:' in prositeName:
protein2prosite[protein][prositeID] = prositeName
return protein2prosite
def loadGoCounts(proteinFile, goFile):
"""C,P,F counts for experimental proteins per mod
'../working/neal/proteins.xls'
'../working/neal/proteins.prosite'
"""
proteins = {'SNO':defaultdict(dict),
'RSG':defaultdict(dict),
'SPAL':defaultdict(dict),
'SOH':defaultdict(dict)}
mod2proteins = defaultdict(dict)
with open(proteinFile) as f:
for line in f:
mod, proteinLs = line.strip('\n').split('\t')
for p in proteinLs.split(';'):
mod2proteins[mod][p] = proteinLs
with open(goFile) as f:
for line in f:
protein, prositeID, prositeName = line.strip('\n').split('\t')
goType = prositeName.split(':')[0]
if goType in ('C', 'F', 'P'):
for mod in mod2proteins:
if protein in mod2proteins[mod]:
proteins[mod][goType][mod2proteins[mod][protein]] = True
return proteins
if __name__ == "__main__":
dumpProteinNames()
# dumpProsite()
| mit | -5,001,790,490,913,616,000 | 36.092199 | 111 | 0.57782 | false |
dragosbdi/p2pool | SOAPpy/SOAPBuilder.py | 289 | 22852 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py 1498 2010-03-12 02:13:19Z pooryorick $'
from version import __version__
import cgi
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
self.dump_dispatch(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(
None, "double", obj, tag, typed, ns_map, self.genroot(ns_map)))
def dump_int(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_int."
self.out.append(self.dumper(None, 'integer', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_bool(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_bool."
self.out.append(self.dumper(None, 'boolean', obj, tag, typed,
ns_map, self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(
ns_map, self.config.typesNamespaceURI)[0] + typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
if isinstance(data, (list, tuple, arrayType)):
should_drill = True
else:
should_drill = not same_type
for i in data:
self.dump(i, elemsname, should_drill, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_exception(self, obj, tag, typed = 0, ns_map = {}):
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('<%sFault %sroot="1"%s%s>' % (vns, cns, vdecl, cdecl))
self.dump(obj.faultcode, "faultcode", typed, ns_map)
self.dump(obj.faultstring, "faultstring", typed, ns_map)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_dispatch(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
# watch out for order!
dumpmap = (
(Exception, self.dump_exception),
(arrayType, self.dump_list),
(basestring, self.dump_string),
(NoneType, self.dump_None),
(bool, self.dump_bool),
(int, self.dump_int),
(long, self.dump_int),
(list, self.dump_list),
(tuple, self.dump_list),
(dict, self.dump_dictionary),
(float, self.dump_float),
)
for dtype, func in dumpmap:
if isinstance(obj, dtype):
func(obj, tag, typed, ns_map)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
else:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
d1 = getattr(obj, '__dict__', None)
if d1 is not None:
for (k, v) in d1:
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| gpl-3.0 | 4,260,291,081,987,510,300 | 34.265432 | 102 | 0.498687 | false |
JonnyWong16/plexpy | lib/maxminddb/file.py | 3 | 1988 | """For internal use only. It provides a slice-like file reader."""
import os
try:
# pylint: disable=no-name-in-module
from multiprocessing import Lock
except ImportError:
from threading import Lock
class FileBuffer(object):
"""A slice-able file reader"""
def __init__(self, database):
self._handle = open(database, 'rb')
self._size = os.fstat(self._handle.fileno()).st_size
if not hasattr(os, 'pread'):
self._lock = Lock()
def __getitem__(self, key):
if isinstance(key, slice):
return self._read(key.stop - key.start, key.start)
elif isinstance(key, int):
return self._read(1, key)
else:
raise TypeError("Invalid argument type.")
def rfind(self, needle, start):
"""Reverse find needle from start"""
pos = self._read(self._size - start - 1, start).rfind(needle)
if pos == -1:
return pos
return start + pos
def size(self):
"""Size of file"""
return self._size
def close(self):
"""Close file"""
self._handle.close()
if hasattr(os, 'pread'):
def _read(self, buffersize, offset):
"""read that uses pread"""
# pylint: disable=no-member
return os.pread(self._handle.fileno(), buffersize, offset)
else:
def _read(self, buffersize, offset):
"""read with a lock
This lock is necessary as after a fork, the different processes
will share the same file table entry, even if we dup the fd, and
as such the same offsets. There does not appear to be a way to
duplicate the file table entry and we cannot re-open based on the
original path as that file may have replaced with another or
unlinked.
"""
with self._lock:
self._handle.seek(offset)
return self._handle.read(buffersize)
| gpl-3.0 | 6,269,692,394,049,382,000 | 29.121212 | 77 | 0.572435 | false |
dagwieers/ansible | lib/ansible/modules/packaging/os/svr4pkg.py | 95 | 7684 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
type: bool
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg:
name: CSWcommon
src: /tmp/cswpkgs.pkg
state: present
# Install a package directly from an http site
- svr4pkg:
name: CSWpkgutil
src: 'http://get.opencsw.org/now'
state: present
zone: current
# Install a package with a response file
- svr4pkg:
name: CSWggrep
src: /tmp/third-party.pkg
response_file: /tmp/ggrep.response
state: present
# Ensure that a package is not installed.
- svr4pkg:
name: SUNWgnome-sound-recorder
state: absent
# Ensure that a category is not installed.
- svr4pkg:
name: FIREFOX
state: absent
category: true
'''
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = ['pkgadd', '-n']
if zone == 'current':
cmd += ['-G']
cmd += ['-a', adminfile, '-d', src]
if proxy is not None:
cmd += ['-x', proxy]
if response_file is not None:
cmd += ['-r', response_file]
if category:
cmd += ['-Y']
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = ['pkgrm', '-na', adminfile, '-Y', name]
else:
cmd = ['pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
src=dict(default=None),
proxy=dict(default=None),
response_file=dict(default=None),
zone=dict(required=False, default='all', choices=['current', 'all']),
category=dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,475,664,016,510,982,700 | 28.106061 | 147 | 0.624414 | false |
alexandrandronescu/autorobot | curs.py | 1 | 2038 | import sys
import datetime
import requests
def check_arguments(argv):
date1 = date2 = None
today = datetime.datetime.now().date()
if len(argv)==1:
date1 = datetime.date(year=datetime.datetime.now().date().year, month=datetime.datetime.now().date().month, day=1)
date2 = today
try:
if len(argv)>1:
if argv[1]=="today":
print "Today's currency: %s" % get_currency(today)
return
date1 = datetime.datetime.strptime(argv[1], "%Y-%m-%d").date()
if len(argv)>2:
date2 = datetime.datetime.strptime(argv[2], "%Y-%m-%d").date()
else:
date2 = today
if not date1 or not date2:
print "Incorrect dates!"
print "Usage: python curs.py 2013-12-01 2013-12-20"
return None, None
except Exception:
print "Exception while processing the parameters %s" % argv
return None, None
return date1, date2
def get_currency(date):
return float(requests.get('http://www.infovalutar.ro/bnr/%d/%d/%d/EUR' % (date.year, date.month, date.day)).text)
def compute_currency():
# compute the medium value of each day's currency between the specified dates
date1, date2 = check_arguments(sys.argv)
if not date1 or not date2:
return
# order the dates
difference = date2-date1
if difference.days<0:
tmp = date1
date1 = date2
date2 = tmp
difference = date1-date2
#print "Computing the currency between %s and %s (%s days)" % (date1, date2, difference.days)
currency = []
for day in range(difference.days+1):
# www.infovalutar.ro/bnr/2013/11/27/EUR
# print 'www.infovalutar.ro/bnr/%d/%d/%d/EUR' % (date.year, date.month, date.day)
date = date1+datetime.timedelta(days=day)
# add only weekdays
if date.isoweekday() in range(1, 6):
currency.append(get_currency(date))
print "Computing the currency between %s and %s (%s working days/%s total days)" % (date1, date2, len(currency), difference.days)
median_currency = sum(currency)/len(currency)
if len(currency) < 50:
print currency
print "Median currency: %s" % median_currency
if __name__ == "__main__":
compute_currency()
| gpl-2.0 | -6,973,143,538,955,363,000 | 26.917808 | 130 | 0.685476 | false |
gaddman/ansible | lib/ansible/module_utils/vmware.py | 3 | 42109 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import os
import ssl
import time
from random import randint
try:
# requests is required for exception handling of the ConnectionError
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils._text import to_text
from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
from ansible.module_utils.basic import env_fallback
class TaskError(Exception):
def __init__(self, *args, **kwargs):
super(TaskError, self).__init__(*args, **kwargs)
def wait_for_task(task, max_backoff=64, timeout=3600):
"""Wait for given task using exponential back-off algorithm.
Args:
task: VMware task object
max_backoff: Maximum amount of sleep time in seconds
timeout: Timeout for the given task in seconds
Returns: Tuple with True and result for successful task
Raises: TaskError on failure
"""
failure_counter = 0
start_time = time.time()
while True:
if time.time() - start_time >= timeout:
raise TaskError("Timeout")
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
error_msg = task.info.error
host_thumbprint = None
try:
error_msg = error_msg.msg
if hasattr(task.info.error, 'thumbprint'):
host_thumbprint = task.info.error.thumbprint
except AttributeError:
pass
finally:
raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
time.sleep(sleep_time)
failure_counter += 1
def wait_for_vm_ip(content, vm, timeout=300):
facts = dict()
interval = 15
while timeout > 0:
_facts = gather_vm_facts(content, vm)
if _facts['ipv4'] or _facts['ipv6']:
facts = _facts
break
time.sleep(interval)
timeout -= interval
return facts
def find_obj(content, vimtype, name, first=True, folder=None):
container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
# Get all objects matching type (and name if given)
obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
container.Destroy()
# Return first match or None
if first:
if obj_list:
return obj_list[0]
return None
# Return all matching objects or empty list
return obj_list
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
if not isinstance(obj_type, list):
obj_type = [obj_type]
objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
for obj in objects:
if obj.name == name:
return obj
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
def find_datacenter_by_name(content, datacenter_name):
return find_object_by_name(content, datacenter_name, [vim.Datacenter])
def get_parent_datacenter(obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
return datacenter
def find_datastore_by_name(content, datastore_name):
return find_object_by_name(content, datastore_name, [vim.Datastore])
def find_dvs_by_name(content, switch_name):
return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch])
def find_hostsystem_by_name(content, hostname):
return find_object_by_name(content, hostname, [vim.HostSystem])
def find_resource_pool_by_name(content, resource_pool_name):
return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
def find_network_by_name(content, network_name):
return find_object_by_name(content, network_name, [vim.Network])
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'hw_version': vm.config.version,
'instance_uuid': vm.config.instanceUuid,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
'vnc': {},
}
# facts that may or may not exist
if vm.summary.runtime.host:
try:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
except vim.fault.NoPermission:
# User does not have read permission for the host system,
# proceed without this value. This value does not contribute or hamper
# provisioning or power management operations.
pass
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
facts['hw_files'].append(files.snapshotDirectory + snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.dirname(files.vmPathName) + '/' + item)
for item in vm.layout.logFile:
facts['hw_files'].append(files.logDirectory + item)
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except BaseException:
pass
facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
if vm.guest.ipAddress:
if ':' in vm.guest.ipAddress:
facts['ipv6'] = vm.guest.ipAddress
else:
facts['ipv4'] = vm.guest.ipAddress
ethernet_idx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
port_group_key = entry.backing.port.portgroupKey
port_key = entry.backing.port.portKey
else:
port_group_key = None
port_key = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
'portgroup_portkey': port_key,
'portgroup_key': port_group_key,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
facts['vnc'] = get_vnc_extraconfig(vm)
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
if current_snap_obj:
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
else:
result['current_snapshot'] = dict()
return result
def get_vnc_extraconfig(vm):
result = {}
for opts in vm.config.extraConfig:
for optkeyname in ['enabled', 'ip', 'port', 'password']:
if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
result[optkeyname] = opts.value
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str',
required=False,
fallback=(env_fallback, ['VMWARE_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
fallback=(env_fallback, ['VMWARE_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['VMWARE_PASSWORD'])),
port=dict(type='int',
default=443,
fallback=(env_fallback, ['VMWARE_PORT'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
port = module.params.get('port', 443)
validate_certs = module.params['validate_certs']
if not hostname:
module.fail_json(msg="Hostname parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
if not username:
module.fail_json(msg="Username parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
if not password:
module.fail_json(msg="Password parameter is missing."
" Please specify this parameter in task or"
" export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or use validate_certs=false.')
ssl_context = None
if not validate_certs and hasattr(ssl, 'SSLContext'):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
connect_args = dict(
host=hostname,
user=username,
pwd=password,
port=port,
)
if ssl_context:
connect_args.update(sslContext=ssl_context)
service_instance = connect.SmartConnect(**connect_args)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (hostname, port, username, invalid_login.msg))
except vim.fault.NoPermission as no_permission:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
except vmodl.fault.InvalidRequest as invalid_request:
# Request is malformed
module.fail_json(msg="Failed to get a response from server %s:%s as "
"request is malformed: %s" % (hostname, port, invalid_request.msg))
except Exception as generic_exc:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (hostname, port, generic_exc))
if service_instance is None:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port))
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force, timeout=0):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
if timeout > 0:
result.update(wait_for_poweroff(vm, timeout))
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
result['instance'] = gather_vm_facts(content, vm)
return result
def wait_for_poweroff(vm, timeout=300):
result = dict()
interval = 15
while timeout > 0:
if vm.runtime.powerState.lower() == 'poweredoff':
break
time.sleep(interval)
timeout -= interval
else:
result['failed'] = True
result['msg'] = 'Timeout while waiting for VM power off.'
return result
class PyVmomi(object):
def __init__(self, module):
"""
Constructor
"""
if not HAS_REQUESTS:
module.fail_json(msg="Unable to find 'requests' Python library which is required."
" Please install using 'pip install requests'")
if not HAS_PYVMOMI:
module.fail_json(msg='PyVmomi Python module required. Install using "pip install PyVmomi"')
self.module = module
self.params = module.params
self.si = None
self.current_vm_obj = None
self.content = connect_to_api(self.module)
def is_vcenter(self):
"""
Check if given hostname is vCenter or ESXi host
Returns: True if given connection is with vCenter server
False if given connection is with ESXi server
"""
api_type = None
try:
api_type = self.content.about.apiType
except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
def get_managed_objects_properties(self, vim_type, properties=None):
"""
Function to look up a Managed Object Reference in vCenter / ESXi Environment
:param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
:param properties: List of properties related to vim object e.g. Name
:return: local content object
"""
# Get Root Folder
root_folder = self.content.rootFolder
if properties is None:
properties = ['name']
# Create Container View with default root folder
mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
# Create Traversal spec
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name="traversal_spec",
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create Property Spec
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=vim_type, # Type of object to retrieved
all=False,
pathSet=properties
)
# Create Object Spec
object_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=mor,
skip=True,
selectSet=[traversal_spec]
)
# Create Filter Spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[object_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
return self.content.propertyCollector.RetrieveContents([filter_spec])
# Virtual Machine related functions
def get_vm(self):
"""
Function to find unique virtual machine either by UUID or Name.
Returns: virtual machine object if found, else None.
"""
vm_obj = None
user_desired_path = None
if self.params['uuid']:
vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif self.params['name']:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
vms = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == self.params['name']:
vms.append(temp_vm_object.obj)
break
# get_managed_objects_properties may return multiple virtual machine,
# following code tries to find user desired one depending upon the folder specified.
if len(vms) > 1:
# We have found multiple virtual machines, decide depending upon folder value
if self.params['folder'] is None:
self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
"Folder value is a required parameter to find uniqueness "
"of the virtual machine" % self.params['name'],
details="Please see documentation of the vmware_guest module "
"for folder parameter.")
# Get folder path where virtual machine is located
# User provided folder where user thinks virtual machine is present
user_folder = self.params['folder']
# User defined datacenter
user_defined_dc = self.params['datacenter']
# User defined datacenter's object
datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
# Get Path for Datacenter
dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
# Nested folder does not return trailing /
if not dcpath.endswith('/'):
dcpath += '/'
if user_folder in [None, '', '/']:
# User provided blank value or
# User provided only root value, we fail
self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
"name [%s], please specify folder path other than blank "
"or '/'" % self.params['name'])
elif user_folder.startswith('/vm/'):
# User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
else:
# User defined datacenter is not nested i.e. dcpath = '/' , or
# User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
# User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
# User defined folder starts with datacenter without '/' i.e.
# folder = DC0/vm/india/finance or
# folder = DC0/vm
user_desired_path = user_folder
for vm in vms:
# Check if user has provided same path as virtual machine
actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
continue
if user_desired_path in actual_vm_folder_path:
vm_obj = vm
break
elif vms:
# Unique virtual machine found.
vm_obj = vms[0]
if vm_obj:
self.current_vm_obj = vm_obj
return vm_obj
def gather_facts(self, vm):
"""
Function to gather facts of virtual machine.
Args:
vm: Name of virtual machine.
Returns: Facts dictionary of the given virtual machine.
"""
return gather_vm_facts(self.content, vm)
@staticmethod
def get_vm_path(content, vm_name):
"""
Function to find the path of virtual machine.
Args:
content: VMware content object
vm_name: virtual machine managed object
Returns: Folder of virtual machine if exists, else None
"""
folder_name = None
folder = vm_name.parent
if folder:
folder_name = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
folder_name = fp.name + '/' + folder_name
try:
fp = fp.parent
except BaseException:
break
folder_name = '/' + folder_name
return folder_name
def get_vm_or_template(self, template_name=None):
"""
Find the virtual machine or virtual machine template using name
used for cloning purpose.
Args:
template_name: Name of virtual machine or virtual machine template
Returns: virtual machine or virtual machine template object
"""
template_obj = None
if not template_name:
return template_obj
if "/" in template_name:
vm_obj_path = os.path.dirname(template_name)
vm_obj_name = os.path.basename(template_name)
template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
if template_obj:
return template_obj
else:
template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
if template_obj:
return template_obj
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
templates = []
for temp_vm_object in objects:
if len(temp_vm_object.propSet) != 1:
continue
for temp_vm_object_property in temp_vm_object.propSet:
if temp_vm_object_property.val == template_name:
templates.append(temp_vm_object.obj)
break
if len(templates) > 1:
# We have found multiple virtual machine templates
self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
elif templates:
template_obj = templates[0]
return template_obj
# Cluster related functions
def find_cluster_by_name(self, cluster_name, datacenter_name=None):
"""
Find Cluster by name in given datacenter
Args:
cluster_name: Name of cluster name to find
datacenter_name: (optional) Name of datacenter
Returns: True if found
"""
return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
def get_all_hosts_by_cluster(self, cluster_name):
"""
Get all hosts from cluster by cluster name
Args:
cluster_name: Name of cluster
Returns: List of hosts
"""
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
return [host for host in cluster_obj.host]
else:
return []
# Hosts related functions
def find_hostsystem_by_name(self, host_name):
"""
Find Host by name
Args:
host_name: Name of ESXi host
Returns: True if found
"""
return find_hostsystem_by_name(self.content, hostname=host_name)
def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
"""
Function to get all host system managed object
Args:
cluster_name: Name of Cluster
esxi_host_name: Name of ESXi server
Returns: A list of all host system managed objects, else empty list
"""
host_obj_list = []
if not self.is_vcenter():
hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
if hosts:
host_obj_list.append(list(hosts)[0])
else:
if cluster_name:
cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
if cluster_obj:
host_obj_list = [host for host in cluster_obj.host]
else:
self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
elif esxi_host_name:
if isinstance(esxi_host_name, str):
esxi_host_name = [esxi_host_name]
for host in esxi_host_name:
esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
if esxi_host_obj:
host_obj_list = [esxi_host_obj]
else:
self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
return host_obj_list
# Network related functions
@staticmethod
def find_host_portgroup_by_name(host, portgroup_name):
"""
Find Portgroup on given host
Args:
host: Host config object
portgroup_name: Name of portgroup
Returns: True if found else False
"""
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return False
def get_all_port_groups_by_host(self, host_system):
"""
Function to get all Port Group by host
Args:
host_system: Name of Host System
Returns: List of Port Group Spec
"""
pgs_list = []
for pg in host_system.config.network.portgroup:
pgs_list.append(pg)
return pgs_list
# Datacenter
def find_datacenter_by_name(self, datacenter_name):
"""
Function to get datacenter managed object by name
Args:
datacenter_name: Name of datacenter
Returns: datacenter managed object if found else None
"""
return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
def find_datastore_by_name(self, datastore_name):
"""
Function to get datastore managed object by name
Args:
datastore_name: Name of datastore
Returns: datastore managed object if found else None
"""
return find_datastore_by_name(self.content, datastore_name=datastore_name)
# Datastore cluster
def find_datastore_cluster_by_name(self, datastore_cluster_name):
"""
Function to get datastore cluster managed object by name
Args:
datastore_cluster_name: Name of datastore cluster
Returns: Datastore cluster managed object if found else None
"""
data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
for dsc in data_store_clusters:
if dsc.name == datastore_cluster_name:
return dsc
return None
| gpl-3.0 | 6,227,202,465,816,956,000 | 35.176117 | 138 | 0.581325 | false |
heke123/chromium-crosswalk | third_party/WebKit/Source/build/scripts/make_cssom_types.py | 6 | 1588 | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import css_properties
import in_generator
from name_utilities import enum_for_css_keyword
import template_expander
class CSSOMTypesWriter(css_properties.CSSProperties):
def __init__(self, in_file_path):
super(CSSOMTypesWriter, self).__init__(in_file_path)
for property in self._properties.values():
types = []
# Expand types
for singleType in property['typedom_types']:
if singleType == 'Length':
types.append('SimpleLength')
types.append('CalcLength')
else:
types.append(singleType)
property['typedom_types'] = types
# Generate Keyword ID values from keywords.
property['keywordIDs'] = map(
enum_for_css_keyword, property['keywords'])
self._outputs = {
'CSSOMTypes.cpp': self.generate_types,
'CSSOMKeywords.cpp': self.generate_keywords,
}
@template_expander.use_jinja('CSSOMTypes.cpp.tmpl')
def generate_types(self):
return {
'properties': self._properties,
}
@template_expander.use_jinja('CSSOMKeywords.cpp.tmpl')
def generate_keywords(self):
return {
'properties': self._properties,
}
if __name__ == '__main__':
in_generator.Maker(CSSOMTypesWriter).main(sys.argv)
| bsd-3-clause | -2,059,179,192,311,746,600 | 30.137255 | 72 | 0.602645 | false |
srsman/odoo | openerp/osv/fields.py | 45 | 75006 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Fields:
- simple
- relations (one2many, many2one, many2many)
- function
Fields Attributes:
* _classic_read: is a classic sql fields
* _type : field type
* _auto_join: for one2many and many2one fields, tells whether select
queries will join the relational table instead of replacing the
field condition by an equivalent-one based on a search.
* readonly
* required
* size
"""
import base64
import datetime as DT
import functools
import logging
import pytz
import re
import xmlrpclib
from operator import itemgetter
from contextlib import contextmanager
from psycopg2 import Binary
import openerp
import openerp.tools as tools
from openerp.tools.translate import _
from openerp.tools import float_repr, float_round, frozendict, html_sanitize
import simplejson
from openerp import SUPERUSER_ID, registry
@contextmanager
def _get_cursor():
# yield a valid cursor from any environment or create a new one if none found
from openerp.api import Environment
from openerp.http import request
try:
request.env # force request's env to be computed
except RuntimeError:
pass # ignore if not in a request
for env in Environment.envs:
if not env.cr.closed:
yield env.cr
break
else:
with registry().cursor() as cr:
yield cr
EMPTY_DICT = frozendict()
_logger = logging.getLogger(__name__)
def _symbol_set(symb):
if symb is None or symb == False:
return None
elif isinstance(symb, unicode):
return symb.encode('utf-8')
return str(symb)
class _column(object):
""" Base of all fields, a database column
An instance of this object is a *description* of a database column. It will
not hold any data, but only provide the methods to manipulate data of an
ORM record or even prepare/update the database to hold such a field of data.
"""
_classic_read = True
_classic_write = True
_auto_join = False
_properties = False
_type = 'unknown'
_obj = None
_multi = False
_symbol_c = '%s'
_symbol_f = _symbol_set
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
_deprecated = False
__slots__ = [
'copy', # whether value is copied by BaseModel.copy()
'string',
'help',
'required',
'readonly',
'_domain',
'_context',
'states',
'priority',
'change_default',
'size',
'ondelete',
'translate',
'select',
'manual',
'write',
'read',
'selectable',
'group_operator',
'groups', # CSV list of ext IDs of groups
'deprecated', # Optional deprecation warning
'_args',
'_prefetch',
]
def __init__(self, string='unknown', required=False, readonly=False, domain=[], context={}, states=None, priority=0, change_default=False, size=None, ondelete=None, translate=False, select=False, manual=False, **args):
"""
The 'manual' keyword argument specifies if the field is a custom one.
It corresponds to the 'state' column in ir_model_fields.
"""
# add parameters and default values
args['copy'] = args.get('copy', True)
args['string'] = string
args['help'] = args.get('help', '')
args['required'] = required
args['readonly'] = readonly
args['_domain'] = domain
args['_context'] = context
args['states'] = states
args['priority'] = priority
args['change_default'] = change_default
args['size'] = size
args['ondelete'] = ondelete.lower() if ondelete else None
args['translate'] = translate
args['select'] = select
args['manual'] = manual
args['write'] = args.get('write', False)
args['read'] = args.get('read', False)
args['selectable'] = args.get('selectable', True)
args['group_operator'] = args.get('group_operator', None)
args['groups'] = args.get('groups', None)
args['deprecated'] = args.get('deprecated', None)
args['_prefetch'] = args.get('_prefetch', True)
self._args = EMPTY_DICT
for key, val in args.iteritems():
setattr(self, key, val)
# prefetch only if _classic_write, not deprecated and not manual
if not self._classic_write or self.deprecated or self.manual:
self._prefetch = False
def __getattr__(self, name):
""" Access a non-slot attribute. """
if name == '_args':
raise AttributeError(name)
try:
return self._args[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
""" Set a slot or non-slot attribute. """
try:
object.__setattr__(self, name, value)
except AttributeError:
if self._args:
self._args[name] = value
else:
self._args = {name: value} # replace EMPTY_DICT
def __delattr__(self, name):
""" Remove a non-slot attribute. """
try:
del self._args[name]
except KeyError:
raise AttributeError(name)
def new(self, _computed_field=False, **args):
""" Return a column like `self` with the given parameters; the parameter
`_computed_field` tells whether the corresponding field is computed.
"""
# memory optimization: reuse self whenever possible; you can reduce the
# average memory usage per registry by 10 megabytes!
column = type(self)(**args)
return self if self.to_field_args() == column.to_field_args() else column
def to_field(self):
""" convert column `self` to a new-style field """
from openerp.fields import Field
return Field.by_type[self._type](column=self, **self.to_field_args())
def to_field_args(self):
""" return a dictionary with all the arguments to pass to the field """
base_items = [
('copy', self.copy),
('index', self.select),
('manual', self.manual),
('string', self.string),
('help', self.help),
('readonly', self.readonly),
('required', self.required),
('states', self.states),
('groups', self.groups),
('change_default', self.change_default),
('deprecated', self.deprecated),
]
truthy_items = filter(itemgetter(1), [
('group_operator', self.group_operator),
('size', self.size),
('ondelete', self.ondelete),
('translate', self.translate),
('domain', self._domain),
('context', self._context),
])
return dict(base_items + truthy_items + self._args.items())
def restart(self):
pass
def set(self, cr, obj, id, name, value, user=None, context=None):
cr.execute('update '+obj._table+' set '+name+'='+self._symbol_set[0]+' where id=%s', (self._symbol_set[1](value), id))
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
raise Exception(_('undefined get method !'))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
ids = obj.search(cr, uid, args+self._domain+[(name, 'ilike', value)], offset, limit, context=context)
res = obj.read(cr, uid, ids, [name], context=context)
return [x[name] for x in res]
def as_display_name(self, cr, uid, obj, value, context=None):
"""Converts a field value to a suitable string representation for a record,
e.g. when this field is used as ``rec_name``.
:param obj: the ``BaseModel`` instance this column belongs to
:param value: a proper value as returned by :py:meth:`~openerp.orm.osv.BaseModel.read`
for this column
"""
# delegated to class method, so a column type A can delegate
# to a column type B.
return self._as_display_name(self, cr, uid, obj, value, context=None)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# This needs to be a class method, in case a column type A as to delegate
# to a column type B.
return tools.ustr(value)
# ---------------------------------------------------------
# Simple fields
# ---------------------------------------------------------
class boolean(_column):
_type = 'boolean'
_symbol_c = '%s'
_symbol_f = bool
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(boolean, self).__init__(string=string, required=required, **args)
if required:
_logger.debug(
"required=True is deprecated: making a boolean field"
" `required` has no effect, as NULL values are "
"automatically turned into False. args: %r",args)
class integer(_column):
_type = 'integer'
_symbol_c = '%s'
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self,x: x or 0
__slots__ = []
def __init__(self, string='unknown', required=False, **args):
super(integer, self).__init__(string=string, required=required, **args)
class reference(_column):
_type = 'reference'
_classic_read = False # post-process to handle missing target
__slots__ = ['selection']
def __init__(self, string, selection, size=None, **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, size=size, selection=selection, **args)
def to_field_args(self):
args = super(reference, self).to_field_args()
args['selection'] = self.selection
return args
def get(self, cr, obj, ids, name, uid=None, context=None, values=None):
result = {}
# copy initial values fetched previously.
for value in values:
result[value['id']] = value[name]
if value[name]:
model, res_id = value[name].split(',')
if not obj.pool[model].exists(cr, uid, [int(res_id)], context=context):
result[value['id']] = False
return result
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
if value:
# reference fields have a 'model,id'-like value, that we need to convert
# to a real name
model_name, res_id = value.split(',')
if model_name in obj.pool and res_id:
model = obj.pool[model_name]
names = model.name_get(cr, uid, [int(res_id)], context=context)
return names[0][1] if names else False
return tools.ustr(value)
# takes a string (encoded in utf8) and returns a string (encoded in utf8)
def _symbol_set_char(self, symb):
#TODO:
# * we need to remove the "symb==False" from the next line BUT
# for now too many things rely on this broken behavior
# * the symb==None test should be common to all data types
if symb is None or symb == False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
u_symb = tools.ustr(symb)
return u_symb[:self.size].encode('utf8')
class char(_column):
_type = 'char'
__slots__ = ['_symbol_f', '_symbol_set', '_symbol_set_char']
def __init__(self, string="unknown", size=None, **args):
_column.__init__(self, string=string, size=size or None, **args)
# self._symbol_set_char defined to keep the backward compatibility
self._symbol_f = self._symbol_set_char = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
class text(_column):
_type = 'text'
__slots__ = []
class html(text):
_type = 'html'
_symbol_c = '%s'
__slots__ = ['_sanitize', '_strip_style', '_symbol_f', '_symbol_set']
def _symbol_set_html(self, value):
if value is None or value is False:
return None
if not self._sanitize:
return value
return html_sanitize(value, strip_style=self._strip_style)
def __init__(self, string='unknown', sanitize=True, strip_style=False, **args):
super(html, self).__init__(string=string, **args)
self._sanitize = sanitize
self._strip_style = strip_style
# symbol_set redefinition because of sanitize specific behavior
self._symbol_f = self._symbol_set_html
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(html, self).to_field_args()
args['sanitize'] = self._sanitize
return args
import __builtin__
def _symbol_set_float(self, x):
result = __builtin__.float(x or 0.0)
digits = self.digits
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
class float(_column):
_type = 'float'
_symbol_c = '%s'
_symbol_get = lambda self,x: x or 0.0
__slots__ = ['_digits', '_digits_compute', '_symbol_f', '_symbol_set']
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
def __init__(self, string='unknown', digits=None, digits_compute=None, required=False, **args):
_column.__init__(self, string=string, required=required, **args)
# synopsis: digits_compute(cr) -> (precision, scale)
self._digits = digits
self._digits_compute = digits_compute
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
def to_field_args(self):
args = super(float, self).to_field_args()
args['digits'] = self._digits_compute or self._digits
return args
def digits_change(self, cr):
pass
class date(_column):
_type = 'date'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def today(*args):
""" Returns the current date in a format fit for being a
default value to a ``date`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.date.today().strftime(
tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def context_today(model, cr, uid, context=None, timestamp=None):
"""Returns the current date as seen in the client's timezone
in a format fit for date fields.
This method may be passed as value to initialize _defaults.
:param Model model: model (osv) for which the date value is being
computed - automatically passed when used in
_defaults.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a
datetime, regular dates can't be converted
between timezones.)
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: str
"""
today = timestamp or DT.datetime.now()
context_today = None
if context and context.get('tz'):
tz_name = context['tz']
else:
user = model.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_today = utc.localize(today, is_dst=False) # UTC = no DST
context_today = utc_today.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific today date, "
"using the UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
@staticmethod
def date_to_datetime(model, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
user_date = DT.datetime.strptime(userdate, tools.DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = model.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + DT.timedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
class datetime(_column):
_type = 'datetime'
__slots__ = []
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
@staticmethod
def now(*args):
""" Returns the current datetime in a format fit for being a
default value to a ``datetime`` field.
This method should be provided as is to the _defaults dict, it
should not be called.
"""
return DT.datetime.now().strftime(
tools.DEFAULT_SERVER_DATETIME_FORMAT)
@staticmethod
def context_timestamp(cr, uid, timestamp, context=None):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:param dict context: the 'tz' key in the context should give the
name of the User/Client timezone (otherwise
UTC is used)
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, DT.datetime), 'Datetime instance expected'
if context and context.get('tz'):
tz_name = context['tz']
else:
registry = openerp.modules.registry.RegistryManager.get(cr.dbname)
user = registry['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
value = datetime.context_timestamp(cr, uid, DT.datetime.strptime(value, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
return tools.ustr(value.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))
class binary(_column):
_type = 'binary'
_classic_read = False
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast in symbol_f.
# This str coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
_symbol_c = '%s'
_symbol_f = lambda symb: symb and Binary(str(symb)) or None
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x and str(x)
__slots__ = ['filters']
def __init__(self, string='unknown', filters=None, **args):
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, filters=filters, **args)
def get(self, cr, obj, ids, name, user=None, context=None, values=None):
if not context:
context = {}
if not values:
values = []
res = {}
for i in ids:
val = None
for v in values:
if v['id'] == i:
val = v[name]
break
# If client is requesting only the size of the field, we return it instead
# of the content. Presumably a separate request will be done to read the actual
# content if it's needed at some point.
# TODO: after 6.0 we should consider returning a dict with size and content instead of
# having an implicit convention for the value
if val and context.get('bin_size_%s' % name, context.get('bin_size')):
res[i] = tools.human_size(long(val))
else:
res[i] = val
return res
class selection(_column):
_type = 'selection'
__slots__ = ['selection']
def __init__(self, selection, string='unknown', **args):
if callable(selection):
from openerp import api
selection = api.expected(api.cr_uid_context, selection)
_column.__init__(self, string=string, selection=selection, **args)
def to_field_args(self):
args = super(selection, self).to_field_args()
args['selection'] = self.selection
return args
@classmethod
def reify(cls, cr, uid, model, field, context=None):
""" Munges the field's ``selection`` attribute as necessary to get
something useable out of it: calls it if it's a function, applies
translations to labels if it's not.
A callable ``selection`` is considered translated on its own.
:param orm.Model model:
:param _column field:
"""
if callable(field.selection):
return field.selection(model, cr, uid, context)
if not (context and 'lang' in context):
return field.selection
# field_to_dict isn't given a field name, only a field object, we
# need to get the name back in order to perform the translation lookup
field_name = next(
name for name, column in model._columns.iteritems()
if column == field)
translation_filter = "%s,%s" % (model._name, field_name)
translate = functools.partial(
model.pool['ir.translation']._get_source,
cr, uid, translation_filter, 'selection', context['lang'])
return [
(value, translate(label))
for value, label in field.selection
]
# ---------------------------------------------------------
# Relationals fields
# ---------------------------------------------------------
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update
# (2, ID) remove (delete)
# (3, ID) unlink one (target id or target of relation)
# (4, ID) link
# (5) unlink all (only valid for one2many)
#
class many2one(_column):
_classic_read = False
_classic_write = True
_type = 'many2one'
_symbol_c = '%s'
_symbol_f = lambda x: x or None
_symbol_set = (_symbol_c, _symbol_f)
__slots__ = ['_obj', '_auto_join']
def __init__(self, obj, string='unknown', auto_join=False, **args):
args['ondelete'] = args.get('ondelete', 'set null')
_column.__init__(self, string=string, **args)
self._obj = obj
self._auto_join = auto_join
def to_field_args(self):
args = super(many2one, self).to_field_args()
args['comodel_name'] = self._obj
args['auto_join'] = self._auto_join
return args
def set(self, cr, obj_src, id, field, values, user=None, context=None):
if not context:
context = {}
obj = obj_src.pool[self._obj]
self._table = obj._table
if type(values) == type([]):
for act in values:
if act[0] == 0:
id_new = obj.create(cr, act[2])
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (id_new, id))
elif act[0] == 1:
obj.write(cr, [act[1]], act[2], context=context)
elif act[0] == 2:
cr.execute('delete from '+self._table+' where id=%s', (act[1],))
elif act[0] == 3 or act[0] == 5:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
elif act[0] == 4:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (act[1], id))
else:
if values:
cr.execute('update '+obj_src._table+' set '+field+'=%s where id=%s', (values, id))
else:
cr.execute('update '+obj_src._table+' set '+field+'=null where id=%s', (id,))
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', 'like', value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
return value[1] if isinstance(value, tuple) else tools.ustr(value)
class one2many(_column):
_classic_read = False
_classic_write = False
_type = 'one2many'
__slots__ = ['_obj', '_fields_id', '_limit', '_auto_join']
def __init__(self, obj, fields_id, string='unknown', limit=None, auto_join=False, **args):
# one2many columns are not copied by default
args['copy'] = args.get('copy', False)
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
self._fields_id = fields_id
self._limit = limit
self._auto_join = auto_join
#one2many can't be used as condition for defaults
assert(self.change_default != True)
def to_field_args(self):
args = super(one2many, self).to_field_args()
args['comodel_name'] = self._obj
args['inverse_name'] = self._fields_id
args['auto_join'] = self._auto_join
args['limit'] = self._limit
return args
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if self._context:
context = dict(context or {})
context.update(self._context)
# retrieve the records in the comodel
comodel = obj.pool[self._obj].browse(cr, user, [], context)
inverse = self._fields_id
domain = self._domain(obj) if callable(self._domain) else self._domain
domain = domain + [(inverse, 'in', ids)]
records = comodel.search(domain, limit=self._limit)
result = {id: [] for id in ids}
# read the inverse of records without prefetching other fields on them
for record in records.with_context(prefetch_fields=False):
# record[inverse] may be a record or an integer
result[int(record[inverse])].append(record.id)
return result
def set(self, cr, obj, id, field, values, user=None, context=None):
result = []
context = dict(context or {})
context.update(self._context)
if not values:
return
obj = obj.pool[self._obj]
rec = obj.browse(cr, user, [], context=context)
with rec.env.norecompute():
_table = obj._table
for act in values:
if act[0] == 0:
act[2][self._fields_id] = id
id_new = obj.create(cr, user, act[2], context=context)
result += obj._store_get_values(cr, user, [id_new], act[2].keys(), context)
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the model has on delete cascade, just delete the row
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, [act[1]], context=context)
else:
cr.execute('update '+_table+' set '+self._fields_id+'=null where id=%s', (act[1],))
elif act[0] == 4:
# table of the field (parent_model in case of inherit)
field = obj.pool[self._obj]._fields[self._fields_id]
field_model = field.base_field.model_name
field_table = obj.pool[field_model]._table
cr.execute("select 1 from {0} where id=%s and {1}=%s".format(field_table, self._fields_id), (act[1], id))
if not cr.fetchone():
# Must use write() to recompute parent_store structure if needed and check access rules
obj.write(cr, user, [act[1]], {self._fields_id:id}, context=context or {})
elif act[0] == 5:
inverse_field = obj._fields.get(self._fields_id)
assert inverse_field, 'Trying to unlink the content of a o2m but the pointed model does not have a m2o'
# if the o2m has a static domain we must respect it when unlinking
domain = self._domain(obj) if callable(self._domain) else self._domain
extra_domain = domain or []
ids_to_unlink = obj.search(cr, user, [(self._fields_id,'=',id)] + extra_domain, context=context)
# If the model has cascade deletion, we delete the rows because it is the intended behavior,
# otherwise we only nullify the reverse foreign key column.
if inverse_field.ondelete == "cascade":
obj.unlink(cr, user, ids_to_unlink, context=context)
else:
obj.write(cr, user, ids_to_unlink, {self._fields_id: False}, context=context)
elif act[0] == 6:
# Must use write() to recompute parent_store structure if needed
obj.write(cr, user, act[2], {self._fields_id:id}, context=context or {})
ids2 = act[2] or [0]
cr.execute('select id from '+_table+' where '+self._fields_id+'=%s and id <> ALL (%s)', (id,ids2))
ids3 = map(lambda x:x[0], cr.fetchall())
obj.write(cr, user, ids3, {self._fields_id:False}, context=context or {})
return result
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
domain = self._domain(obj) if callable(self._domain) else self._domain
return obj.pool[self._obj].name_search(cr, uid, value, domain, operator, context=context,limit=limit)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('One2Many columns should not be used as record name (_rec_name)')
#
# Values: (0, 0, { fields }) create
# (1, ID, { fields }) update (write fields to ID)
# (2, ID) remove (calls unlink on ID, that will also delete the relationship because of the ondelete)
# (3, ID) unlink (delete the relationship between the two objects but does not delete ID)
# (4, ID) link (add a relationship)
# (5, ID) unlink all
# (6, ?, ids) set a list of links
#
class many2many(_column):
"""Encapsulates the logic of a many-to-many bidirectional relationship, handling the
low-level details of the intermediary relationship table transparently.
A many-to-many relationship is always symmetrical, and can be declared and accessed
from either endpoint model.
If ``rel`` (relationship table name), ``id1`` (source foreign key column name)
or id2 (destination foreign key column name) are not specified, the system will
provide default values. This will by default only allow one single symmetrical
many-to-many relationship between the source and destination model.
For multiple many-to-many relationship between the same models and for
relationships where source and destination models are the same, ``rel``, ``id1``
and ``id2`` should be specified explicitly.
:param str obj: destination model
:param str rel: optional name of the intermediary relationship table. If not specified,
a canonical name will be derived based on the alphabetically-ordered
model names of the source and destination (in the form: ``amodel_bmodel_rel``).
Automatic naming is not possible when the source and destination are
the same, for obvious ambiguity reasons.
:param str id1: optional name for the column holding the foreign key to the current
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `src_model_id`).
:param str id2: optional name for the column holding the foreign key to the destination
model in the relationship table. If not specified, a canonical name
will be derived based on the model name (in the form: `dest_model_id`)
:param str string: field label
"""
_classic_read = False
_classic_write = False
_type = 'many2many'
__slots__ = ['_obj', '_rel', '_id1', '_id2', '_limit', '_auto_join']
def __init__(self, obj, rel=None, id1=None, id2=None, string='unknown', limit=None, **args):
"""
"""
args['_prefetch'] = args.get('_prefetch', False)
_column.__init__(self, string=string, **args)
self._obj = obj
if rel and '.' in rel:
raise Exception(_('The second argument of the many2many field %s must be a SQL table !'\
'You used %s, which is not a valid SQL table name.')% (string,rel))
self._rel = rel
self._id1 = id1
self._id2 = id2
self._limit = limit
self._auto_join = False
def to_field_args(self):
args = super(many2many, self).to_field_args()
args['comodel_name'] = self._obj
args['relation'] = self._rel
args['column1'] = self._id1
args['column2'] = self._id2
args['limit'] = self._limit
return args
def _sql_names(self, source_model):
"""Return the SQL names defining the structure of the m2m relationship table
:return: (m2m_table, local_col, dest_col) where m2m_table is the table name,
local_col is the name of the column holding the current model's FK, and
dest_col is the name of the column holding the destination model's FK, and
"""
tbl, col1, col2 = self._rel, self._id1, self._id2
if not all((tbl, col1, col2)):
# the default table name is based on the stable alphabetical order of tables
dest_model = source_model.pool[self._obj]
tables = tuple(sorted([source_model._table, dest_model._table]))
if not tbl:
assert tables[0] != tables[1], 'Implicit/Canonical naming of m2m relationship table '\
'is not possible when source and destination models are '\
'the same'
tbl = '%s_%s_rel' % tables
if not col1:
col1 = '%s_id' % source_model._table
if not col2:
col2 = '%s_id' % dest_model._table
return tbl, col1, col2
def _get_query_and_where_params(self, cr, model, ids, values, where_params):
""" Extracted from ``get`` to facilitate fine-tuning of the generated
query. """
query = 'SELECT %(rel)s.%(id2)s, %(rel)s.%(id1)s \
FROM %(rel)s, %(from_c)s \
WHERE %(rel)s.%(id1)s IN %%s \
AND %(rel)s.%(id2)s = %(tbl)s.id \
%(where_c)s \
%(order_by)s \
%(limit)s \
OFFSET %(offset)d' \
% values
return query, where_params
def get(self, cr, model, ids, name, user=None, offset=0, context=None, values=None):
if not context:
context = {}
if not values:
values = {}
res = {}
if not ids:
return res
for id in ids:
res[id] = []
if offset:
_logger.warning(
"Specifying offset at a many2many.get() is deprecated and may"
" produce unpredictable results.")
obj = model.pool[self._obj]
rel, id1, id2 = self._sql_names(model)
# static domains are lists, and are evaluated both here and on client-side, while string
# domains supposed by dynamic and evaluated on client-side only (thus ignored here)
# FIXME: make this distinction explicit in API!
domain = isinstance(self._domain, list) and self._domain or []
wquery = obj._where_calc(cr, user, domain, context=context)
obj._apply_ir_rules(cr, user, wquery, 'read', context=context)
order_by = obj._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
if where_c:
where_c = ' AND ' + where_c
limit_str = ''
if self._limit is not None:
limit_str = ' LIMIT %d' % self._limit
query, where_params = self._get_query_and_where_params(cr, model, ids, {'rel': rel,
'from_c': from_c,
'tbl': obj._table,
'id1': id1,
'id2': id2,
'where_c': where_c,
'limit': limit_str,
'order_by': order_by,
'offset': offset,
}, where_params)
cr.execute(query, [tuple(ids),] + where_params)
for r in cr.fetchall():
res[r[1]].append(r[0])
return res
def set(self, cr, model, id, name, values, user=None, context=None):
if not context:
context = {}
if not values:
return
rel, id1, id2 = self._sql_names(model)
obj = model.pool[self._obj]
for act in values:
if not (isinstance(act, list) or isinstance(act, tuple)) or not act:
continue
if act[0] == 0:
idnew = obj.create(cr, user, act[2], context=context)
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, idnew))
elif act[0] == 1:
obj.write(cr, user, [act[1]], act[2], context=context)
elif act[0] == 2:
obj.unlink(cr, user, [act[1]], context=context)
elif act[0] == 3:
cr.execute('delete from '+rel+' where ' + id1 + '=%s and '+ id2 + '=%s', (id, act[1]))
elif act[0] == 4:
# following queries are in the same transaction - so should be relatively safe
cr.execute('SELECT 1 FROM '+rel+' WHERE '+id1+' = %s and '+id2+' = %s', (id, act[1]))
if not cr.fetchone():
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s,%s)', (id, act[1]))
elif act[0] == 5:
cr.execute('delete from '+rel+' where ' + id1 + ' = %s', (id,))
elif act[0] == 6:
d1, d2,tables = obj.pool.get('ir.rule').domain_get(cr, user, obj._name, context=context)
if d1:
d1 = ' and ' + ' and '.join(d1)
else:
d1 = ''
cr.execute('delete from '+rel+' where '+id1+'=%s AND '+id2+' IN (SELECT '+rel+'.'+id2+' FROM '+rel+', '+','.join(tables)+' WHERE '+rel+'.'+id1+'=%s AND '+rel+'.'+id2+' = '+obj._table+'.id '+ d1 +')', [id, id]+d2)
for act_nbr in act[2]:
cr.execute('insert into '+rel+' ('+id1+','+id2+') values (%s, %s)', (id, act_nbr))
#
# TODO: use a name_search
#
def search(self, cr, obj, args, name, value, offset=0, limit=None, uid=None, operator='like', context=None):
return obj.pool[self._obj].search(cr, uid, args+self._domain+[('name', operator, value)], offset, limit, context=context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
raise NotImplementedError('Many2Many columns should not be used as record name (_rec_name)')
def get_nice_size(value):
size = 0
if isinstance(value, (int,long)):
size = value
elif value: # this is supposed to be a string
size = len(value)
if size < 12: # suppose human size
return value
return tools.human_size(size)
# See http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# and http://bugs.python.org/issue10066
invalid_xml_low_bytes = re.compile(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]')
def sanitize_binary_value(value):
# binary fields should be 7-bit ASCII base64-encoded data,
# but we do additional sanity checks to make sure the values
# are not something else that won't pass via XML-RPC
if isinstance(value, (xmlrpclib.Binary, tuple, list, dict)):
# these builtin types are meant to pass untouched
return value
# Handle invalid bytes values that will cause problems
# for XML-RPC. See for more info:
# - http://bugs.python.org/issue10066
# - http://www.w3.org/TR/2000/REC-xml-20001006#NT-Char
# Coercing to unicode would normally allow it to properly pass via
# XML-RPC, transparently encoded as UTF-8 by xmlrpclib.
# (this works for _any_ byte values, thanks to the fallback
# to latin-1 passthrough encoding when decoding to unicode)
value = tools.ustr(value)
# Due to Python bug #10066 this could still yield invalid XML
# bytes, specifically in the low byte range, that will crash
# the decoding side: [\x00-\x08\x0b-\x0c\x0e-\x1f]
# So check for low bytes values, and if any, perform
# base64 encoding - not very smart or useful, but this is
# our last resort to avoid crashing the request.
if invalid_xml_low_bytes.search(value):
# b64-encode after restoring the pure bytes with latin-1
# passthrough encoding
value = base64.b64encode(value.encode('latin-1'))
return value
# ---------------------------------------------------------
# Function fields
# ---------------------------------------------------------
class function(_column):
"""
A field whose value is computed by a function (rather
than being read from the database).
:param fnct: the callable that will compute the field value.
:param arg: arbitrary value to be passed to ``fnct`` when computing the value.
:param fnct_inv: the callable that will allow writing values in that field
(if not provided, the field is read-only).
:param fnct_inv_arg: arbitrary value to be passed to ``fnct_inv`` when
writing a value.
:param str type: type of the field simulated by the function field
:param fnct_search: the callable that allows searching on the field
(if not provided, search will not return any result).
:param store: store computed value in database
(see :ref:`The *store* parameter <field-function-store>`).
:type store: True or dict specifying triggers for field computation
:param multi: name of batch for batch computation of function fields.
All fields with the same batch name will be computed by
a single function call. This changes the signature of the
``fnct`` callable.
.. _field-function-fnct: The ``fnct`` parameter
.. rubric:: The ``fnct`` parameter
The callable implementing the function field must have the following signature:
.. function:: fnct(model, cr, uid, ids, field_name(s), arg, context)
Implements the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param field_name(s): name of the field to compute, or if ``multi`` is provided,
list of field names to compute.
:type field_name(s): str | [str]
:param arg: arbitrary value passed when declaring the function field
:rtype: dict
:return: mapping of ``ids`` to computed values, or if multi is provided,
to a map of field_names to computed values
The values in the returned dictionary must be of the type specified by the type
argument in the field declaration.
Here is an example with a simple function ``char`` function field::
# declarations
def compute(self, cr, uid, ids, field_name, arg, context):
result = {}
# ...
return result
_columns['my_char'] = fields.function(compute, type='char', size=50)
# when called with ``ids=[1,2,3]``, ``compute`` could return:
{
1: 'foo',
2: 'bar',
3: False # null values should be returned explicitly too
}
If ``multi`` is set, then ``field_name`` is replaced by ``field_names``: a list
of the field names that should be computed. Each value in the returned
dictionary must then be a dictionary mapping field names to values.
Here is an example where two function fields (``name`` and ``age``)
are both computed by a single function field::
# declarations
def compute(self, cr, uid, ids, field_names, arg, context):
result = {}
# ...
return result
_columns['name'] = fields.function(compute_person_data, type='char',\
size=50, multi='person_data')
_columns[''age'] = fields.function(compute_person_data, type='integer',\
multi='person_data')
# when called with ``ids=[1,2,3]``, ``compute_person_data`` could return:
{
1: {'name': 'Bob', 'age': 23},
2: {'name': 'Sally', 'age': 19},
3: {'name': 'unknown', 'age': False}
}
.. _field-function-fnct-inv:
.. rubric:: The ``fnct_inv`` parameter
This callable implements the write operation for the function field
and must have the following signature:
.. function:: fnct_inv(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context)
Callable that implements the ``write`` operation for the function field.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param int id: the identifier of the object to write on
:param str field_name: name of the field to set
:param fnct_inv_arg: arbitrary value passed when declaring the function field
:return: True
When writing values for a function field, the ``multi`` parameter is ignored.
.. _field-function-fnct-search:
.. rubric:: The ``fnct_search`` parameter
This callable implements the search operation for the function field
and must have the following signature:
.. function:: fnct_search(model, cr, uid, model_again, field_name, criterion, context)
Callable that implements the ``search`` operation for the function field by expanding
a search criterion based on the function field into a new domain based only on
columns that are stored in the database.
:param orm model: model to which the field belongs (should be ``self`` for
a model method)
:param orm model_again: same value as ``model`` (seriously! this is for backwards
compatibility)
:param str field_name: name of the field to search on
:param list criterion: domain component specifying the search criterion on the field.
:rtype: list
:return: domain to use instead of ``criterion`` when performing the search.
This new domain must be based only on columns stored in the database, as it
will be used directly without any translation.
The returned value must be a domain, that is, a list of the form [(field_name, operator, operand)].
The most generic way to implement ``fnct_search`` is to directly search for the records that
match the given ``criterion``, and return their ``ids`` wrapped in a domain, such as
``[('id','in',[1,3,5])]``.
.. _field-function-store:
.. rubric:: The ``store`` parameter
The ``store`` parameter allows caching the result of the field computation in the
database, and defining the triggers that will invalidate that cache and force a
recomputation of the function field.
When not provided, the field is computed every time its value is read.
The value of ``store`` may be either ``True`` (to recompute the field value whenever
any field in the same record is modified), or a dictionary specifying a more
flexible set of recomputation triggers.
A trigger specification is a dictionary that maps the names of the models that
will trigger the computation, to a tuple describing the trigger rule, in the
following form::
store = {
'trigger_model': (mapping_function,
['trigger_field1', 'trigger_field2'],
priority),
}
A trigger rule is defined by a 3-item tuple where:
* The ``mapping_function`` is defined as follows:
.. function:: mapping_function(trigger_model, cr, uid, trigger_ids, context)
Callable that maps record ids of a trigger model to ids of the
corresponding records in the source model (whose field values
need to be recomputed).
:param orm model: trigger_model
:param list trigger_ids: ids of the records of trigger_model that were
modified
:rtype: list
:return: list of ids of the source model whose function field values
need to be recomputed
* The second item is a list of the fields who should act as triggers for
the computation. If an empty list is given, all fields will act as triggers.
* The last item is the priority, used to order the triggers when processing them
after any write operation on a model that has function field triggers. The
default priority is 10.
In fact, setting store = True is the same as using the following trigger dict::
store = {
'model_itself': (lambda self, cr, uid, ids, context: ids,
[],
10)
}
"""
_properties = True
__slots__ = [
'_type',
'_classic_read',
'_classic_write',
'_symbol_c',
'_symbol_f',
'_symbol_set',
'_symbol_get',
'_fnct',
'_arg',
'_fnct_inv',
'_fnct_inv_arg',
'_fnct_search',
'_multi',
'store',
'_digits',
'_digits_compute',
'selection',
'_obj',
]
@property
def digits(self):
if self._digits_compute:
with _get_cursor() as cr:
return self._digits_compute(cr)
else:
return self._digits
#
# multi: compute several fields in one call
#
def __init__(self, fnct, arg=None, fnct_inv=None, fnct_inv_arg=None, type='float', fnct_search=None, obj=None, store=False, multi=False, **args):
self._classic_read = False
self._classic_write = False
self._prefetch = False
self._symbol_c = '%s'
self._symbol_f = _symbol_set
self._symbol_set = (self._symbol_c, self._symbol_f)
self._symbol_get = None
# pop attributes that should not be assigned to self
self._digits = args.pop('digits', (16,2))
self._digits_compute = args.pop('digits_compute', None)
self._obj = args.pop('relation', obj)
# function fields are not copied by default
args['copy'] = args.get('copy', False)
_column.__init__(self, **args)
self._type = type
self._fnct = fnct
self._arg = arg
self._fnct_inv = fnct_inv
self._fnct_inv_arg = fnct_inv_arg
self._fnct_search = fnct_search
self.store = store
self._multi = multi
if not fnct_inv:
self.readonly = 1
if not fnct_search and not store:
self.selectable = False
if callable(args.get('selection')):
from openerp import api
self.selection = api.expected(api.cr_uid_context, args['selection'])
if store:
if self._type != 'many2one':
# m2o fields need to return tuples with name_get, not just foreign keys
self._classic_read = True
self._classic_write = True
if type=='binary':
self._symbol_get=lambda x:x and str(x)
else:
self._prefetch = True
if type == 'char':
self._symbol_c = char._symbol_c
self._symbol_f = lambda x: _symbol_set_char(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
elif type == 'float':
self._symbol_c = float._symbol_c
self._symbol_f = lambda x: _symbol_set_float(self, x)
self._symbol_set = (self._symbol_c, self._symbol_f)
else:
type_class = globals().get(type)
if type_class is not None:
self._symbol_c = type_class._symbol_c
self._symbol_f = type_class._symbol_f
self._symbol_set = type_class._symbol_set
def new(self, _computed_field=False, **args):
if _computed_field:
# field is computed, we need an instance of a non-function column
type_class = globals()[self._type]
return type_class(**args)
else:
# HACK: function fields are tricky to recreate, simply return a copy
import copy
return copy.copy(self)
def to_field_args(self):
args = super(function, self).to_field_args()
args['store'] = bool(self.store)
if self._type in ('float',):
args['digits'] = self._digits_compute or self._digits
elif self._type in ('selection', 'reference'):
args['selection'] = self.selection
elif self._type in ('many2one', 'one2many', 'many2many'):
args['comodel_name'] = self._obj
return args
def digits_change(self, cr):
pass
def search(self, cr, uid, obj, name, args, context=None):
if not self._fnct_search:
#CHECKME: should raise an exception
return []
return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
def postprocess(self, cr, uid, obj, field, value=None, context=None):
return self._postprocess_batch(cr, uid, obj, field, {0: value}, context=context)[0]
def _postprocess_batch(self, cr, uid, obj, field, values, context=None):
if not values:
return values
if context is None:
context = {}
field_type = obj._columns[field]._type
new_values = dict(values)
if field_type == 'binary':
if context.get('bin_size'):
# client requests only the size of binary fields
for rid, value in values.iteritems():
if value:
new_values[rid] = get_nice_size(value)
elif not context.get('bin_raw'):
for rid, value in values.iteritems():
if value:
new_values[rid] = sanitize_binary_value(value)
return new_values
def get(self, cr, obj, ids, name, uid=False, context=None, values=None):
multi = self._multi
# if we already have a value, don't recompute it.
# This happen if case of stored many2one fields
if values and not multi and name in values[0]:
result = dict((v['id'], v[name]) for v in values)
elif values and multi and all(n in values[0] for n in name):
result = dict((v['id'], dict((n, v[n]) for n in name)) for v in values)
else:
result = self._fnct(obj, cr, uid, ids, name, self._arg, context)
if multi:
swap = {}
for rid, values in result.iteritems():
for f, v in values.iteritems():
if f not in name:
continue
swap.setdefault(f, {})[rid] = v
for field, values in swap.iteritems():
new_values = self._postprocess_batch(cr, uid, obj, field, values, context)
for rid, value in new_values.iteritems():
result[rid][field] = value
else:
result = self._postprocess_batch(cr, uid, obj, name, result, context)
return result
def set(self, cr, obj, id, name, value, user=None, context=None):
if not context:
context = {}
if self._fnct_inv:
self._fnct_inv(obj, cr, user, id, name, value, self._fnct_inv_arg, context)
@classmethod
def _as_display_name(cls, field, cr, uid, obj, value, context=None):
# Function fields are supposed to emulate a basic field type,
# so they can delegate to the basic type for record name rendering
return globals()[field._type]._as_display_name(field, cr, uid, obj, value, context=context)
# ---------------------------------------------------------
# Related fields
# ---------------------------------------------------------
class related(function):
"""Field that points to some data inside another field of the current record.
Example::
_columns = {
'foo_id': fields.many2one('my.foo', 'Foo'),
'bar': fields.related('foo_id', 'frol', type='char', string='Frol of Foo'),
}
"""
__slots__ = ['arg', '_relations']
def _related_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
# assume self._arg = ('foo', 'bar', 'baz')
# domain = [(name, op, val)] => search [('foo.bar.baz', op, val)]
field = '.'.join(self._arg)
return map(lambda x: (field, x[1], x[2]), domain)
def _related_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for instance in obj.browse(cr, uid, ids, context=context):
# traverse all fields except the last one
for field in self.arg[:-1]:
instance = instance[field][:1]
if instance:
# write on the last field of the target record
instance.write({self.arg[-1]: values})
def _related_read(self, obj, cr, uid, ids, field_name, args, context=None):
res = {}
for record in obj.browse(cr, SUPERUSER_ID, ids, context=context):
value = record
# traverse all fields except the last one
for field in self.arg[:-1]:
value = value[field][:1]
# read the last field on the target record
res[record.id] = value[self.arg[-1]]
if self._type == 'many2one':
# res[id] is a recordset; convert it to (id, name) or False.
# Perform name_get as root, as seeing the name of a related object depends on
# access right of source document, not target, so user may not have access.
value_ids = list(set(value.id for value in res.itervalues() if value))
value_name = dict(obj.pool[self._obj].name_get(cr, SUPERUSER_ID, value_ids, context=context))
res = dict((id, bool(value) and (value.id, value_name[value.id])) for id, value in res.iteritems())
elif self._type in ('one2many', 'many2many'):
# res[id] is a recordset; convert it to a list of ids
res = dict((id, value.ids) for id, value in res.iteritems())
return res
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(related, self).__init__(self._related_read, arg, self._related_write, fnct_inv_arg=arg, fnct_search=self._related_search, **args)
if self.store is True:
# TODO: improve here to change self.store = {...} according to related objects
pass
class sparse(function):
__slots__ = ['serialization_field']
def convert_value(self, obj, cr, uid, record, value, read_value, context=None):
"""
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
"""
if self._type == 'many2many':
if not value:
return []
assert value[0][0] == 6, 'Unsupported m2m value for sparse field: %s' % value
return value[0][2]
elif self._type == 'one2many':
if not read_value:
read_value = []
relation_obj = obj.pool[self.relation]
for vals in value:
assert vals[0] in (0,1,2), 'Unsupported o2m value for sparse field: %s' % vals
if vals[0] == 0:
read_value.append(relation_obj.create(cr, uid, vals[2], context=context))
elif vals[0] == 1:
relation_obj.write(cr, uid, vals[1], vals[2], context=context)
elif vals[0] == 2:
relation_obj.unlink(cr, uid, vals[1], context=context)
read_value.remove(vals[1])
return read_value
return value
def _sparse_write(self,obj,cr, uid, ids, field_name, value, args, context=None):
if not type(ids) == list:
ids = [ids]
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
if value is None:
# simply delete the key to unset it.
serialized.pop(field_name, None)
else:
serialized[field_name] = self.convert_value(obj, cr, uid, record, value, serialized.get(field_name), context=context)
obj.write(cr, uid, ids, {self.serialization_field: serialized}, context=context)
return True
def _sparse_read(self, obj, cr, uid, ids, field_names, args, context=None):
results = {}
records = obj.browse(cr, uid, ids, context=context)
for record in records:
# grab serialized value as object - already deserialized
serialized = getattr(record, self.serialization_field)
results[record.id] = {}
for field_name in field_names:
field_type = obj._columns[field_name]._type
value = serialized.get(field_name, False)
if field_type in ('one2many','many2many'):
value = value or []
if value:
# filter out deleted records as superuser
relation_obj = obj.pool[obj._columns[field_name].relation]
value = relation_obj.exists(cr, openerp.SUPERUSER_ID, value)
if type(value) in (int,long) and field_type == 'many2one':
relation_obj = obj.pool[obj._columns[field_name].relation]
# check for deleted record as superuser
if not relation_obj.exists(cr, openerp.SUPERUSER_ID, [value]):
value = False
results[record.id][field_name] = value
return results
def __init__(self, serialization_field, **kwargs):
self.serialization_field = serialization_field
super(sparse, self).__init__(self._sparse_read, fnct_inv=self._sparse_write, multi='__sparse_multi', **kwargs)
# ---------------------------------------------------------
# Dummy fields
# ---------------------------------------------------------
class dummy(function):
__slots__ = ['arg', '_relations']
def _dummy_search(self, tobj, cr, uid, obj=None, name=None, domain=None, context=None):
return []
def _dummy_write(self, obj, cr, uid, ids, field_name, values, args, context=None):
return False
def _dummy_read(self, obj, cr, uid, ids, field_name, args, context=None):
return {}
def __init__(self, *arg, **args):
self.arg = arg
self._relations = []
super(dummy, self).__init__(self._dummy_read, arg, self._dummy_write, fnct_inv_arg=arg, fnct_search=self._dummy_search, **args)
# ---------------------------------------------------------
# Serialized fields
# ---------------------------------------------------------
class serialized(_column):
""" A field able to store an arbitrary python data structure.
Note: only plain components allowed.
"""
_type = 'serialized'
__slots__ = []
def _symbol_set_struct(val):
return simplejson.dumps(val)
def _symbol_get_struct(self, val):
return simplejson.loads(val or '{}')
_symbol_c = '%s'
_symbol_f = _symbol_set_struct
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = _symbol_get_struct
def __init__(self, *args, **kwargs):
kwargs['_prefetch'] = kwargs.get('_prefetch', False)
super(serialized, self).__init__(*args, **kwargs)
# TODO: review completly this class for speed improvement
class property(function):
__slots__ = []
def to_field_args(self):
args = super(property, self).to_field_args()
args['company_dependent'] = True
return args
def _property_search(self, tobj, cr, uid, obj, name, domain, context=None):
ir_property = obj.pool['ir.property']
result = []
for field, operator, value in domain:
result += ir_property.search_multi(cr, uid, name, tobj._name, operator, value, context=context)
return result
def _property_write(self, obj, cr, uid, id, prop_name, value, obj_dest, context=None):
ir_property = obj.pool['ir.property']
ir_property.set_multi(cr, uid, prop_name, obj._name, {id: value}, context=context)
return True
def _property_read(self, obj, cr, uid, ids, prop_names, obj_dest, context=None):
ir_property = obj.pool['ir.property']
res = {id: {} for id in ids}
for prop_name in prop_names:
field = obj._fields[prop_name]
values = ir_property.get_multi(cr, uid, prop_name, obj._name, ids, context=context)
if field.type == 'many2one':
# name_get the non-null values as SUPERUSER_ID
vals = sum(set(filter(None, values.itervalues())),
obj.pool[field.comodel_name].browse(cr, uid, [], context=context))
vals_name = dict(vals.sudo().name_get()) if vals else {}
for id, value in values.iteritems():
ng = False
if value and value.id in vals_name:
ng = value.id, vals_name[value.id]
res[id][prop_name] = ng
else:
for id, value in values.iteritems():
res[id][prop_name] = value
return res
def __init__(self, **args):
if 'view_load' in args:
_logger.warning("view_load attribute is deprecated on ir.fields. Args: %r", args)
args = dict(args)
args['obj'] = args.pop('relation', '') or args.get('obj', '')
super(property, self).__init__(
fnct=self._property_read,
fnct_inv=self._property_write,
fnct_search=self._property_search,
multi='properties',
**args
)
class column_info(object):
""" Struct containing details about an osv column, either one local to
its model, or one inherited via _inherits.
.. attribute:: name
name of the column
.. attribute:: column
column instance, subclass of :class:`_column`
.. attribute:: parent_model
if the column is inherited, name of the model that contains it,
``None`` for local columns.
.. attribute:: parent_column
the name of the column containing the m2o relationship to the
parent model that contains this column, ``None`` for local columns.
.. attribute:: original_parent
if the column is inherited, name of the original parent model that
contains it i.e in case of multilevel inheritance, ``None`` for
local columns.
"""
__slots__ = ['name', 'column', 'parent_model', 'parent_column', 'original_parent']
def __init__(self, name, column, parent_model=None, parent_column=None, original_parent=None):
self.name = name
self.column = column
self.parent_model = parent_model
self.parent_column = parent_column
self.original_parent = original_parent
def __str__(self):
return '%s(%s, %s, %s, %s, %s)' % (
self.__class__.__name__, self.name, self.column,
self.parent_model, self.parent_column, self.original_parent)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 296,074,843,118,288,640 | 40.462687 | 228 | 0.562422 | false |
a-parhom/edx-platform | common/lib/capa/capa/tests/test_answer_pool.py | 37 | 27075 | """
Tests the logic of the "answer-pool" attribute, e.g.
<choicegroup answer-pool="4">
"""
import unittest
import textwrap
from capa.tests.helpers import test_capa_system, new_loncapa_problem
from capa.responsetypes import LoncapaProblemError
class CapaAnswerPoolTest(unittest.TestCase):
"""Capa Answer Pool Test"""
def setUp(self):
super(CapaAnswerPoolTest, self).setUp()
self.system = test_capa_system()
# XML problem setup used by a few tests.
common_question_xml = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed1(self):
problem = new_loncapa_problem(self.common_question_xml, seed=723)
the_html = problem.get_html()
# [('choice_3', u'wrong-3'), ('choice_5', u'correct-2'), ('choice_1', u'wrong-2'), ('choice_4', u'wrong-4')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(response.has_answerpool())
self.assertEqual(response.unmask_order(), ['choice_3', 'choice_5', 'choice_1', 'choice_4'])
def test_answer_pool_4_choices_1_multiplechoiceresponse_seed2(self):
problem = new_loncapa_problem(self.common_question_xml, seed=9)
the_html = problem.get_html()
# [('choice_0', u'wrong-1'), ('choice_4', u'wrong-4'), ('choice_3', u'wrong-3'), ('choice_2', u'correct-1')]
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertTrue(hasattr(response, 'has_answerpool'))
self.assertEqual(response.unmask_order(), ['choice_0', 'choice_4', 'choice_3', 'choice_2'])
def test_no_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
# Check about masking
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_0_answer_pool_4_choices_1_multiplechoiceresponse(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="0">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_invalid_answer_pool_value(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2.3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "answer-pool"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_none_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="false">wrong!!</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_invalid_answer_pool_all_correct(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="true">!wrong-1</choice>
<choice correct="true">!wrong-2</choice>
<choice correct="true">!wrong-3</choice>
<choice correct="true">!wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
new_loncapa_problem(xml_str)
def test_answer_pool_5_choices_1_multiplechoiceresponse_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="5">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'correct-2'.*'wrong-1'.*'wrong-2'.*.*'wrong-3'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertEqual(response.unmask_order(), ['choice_5', 'choice_0', 'choice_1', 'choice_3', 'choice_4'])
def test_answer_pool_2_multiplechoiceresponses_seed1(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-1'.*'correct-2'.*\].*</div>" # rng shared
# str2 = r"<div>.*\[.*'correct-2'.*'wrong-2'.*'wrong-3'.*\].*</div>" # rng independent
str3 = r"<div>\{.*'1_solution_2'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_2_multiplechoiceresponses_seed2(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=9)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-2'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_random_consistent(self):
"""
The point of this test is to make sure that the exact randomization
per seed does not change.
"""
xml_str = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="2">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="3">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
<choice correct="true">correct-3</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str)
the_html = problem.get_html()
str1 = (r"<div>.*\[.*'correct-2'.*'wrong-2'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'correct-2'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'correct-1'.*'wrong-4'.*\].*</div>.*" +
r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*\].*</div>")
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1)
def test_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
self.assertRegexpMatches(the_html, str1)
# attributes *not* present
response = problem.responders.values()[0]
self.assertFalse(response.has_mask())
self.assertFalse(response.has_answerpool())
def test_answer_pool_and_no_answer_pool(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true" explanation-id="solution1">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true" explanation-id="solution2">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solutionset>
<solution explanation-id="solution1">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 1st solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
<solution explanation-id="solution2">
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the 2nd solution</p>
</div>
</solution>
</solutionset>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
str2 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
str4 = r"<div>\{.*'1_solution_3'.*\}</div>"
self.assertRegexpMatches(the_html, str1)
self.assertRegexpMatches(the_html, str2)
self.assertRegexpMatches(the_html, str3)
self.assertRegexpMatches(the_html, str4)
without_new_lines = the_html.replace("\n", "")
self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
def test_answer_pool_without_solutionset(self):
xml_str = textwrap.dedent("""
<problem>
<p>What is the correct answer?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">wrong-1</choice>
<choice correct="false">wrong-2</choice>
<choice correct="true">correct-1</choice>
<choice correct="false">wrong-3</choice>
<choice correct="false">wrong-4</choice>
<choice correct="true">correct-2</choice>
</choicegroup>
</multiplechoiceresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>This is the solution</p>
<p>Not much to explain here, sorry!</p>
</div>
</solution>
</problem>
""")
problem = new_loncapa_problem(xml_str, seed=723)
the_html = problem.get_html()
self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
| agpl-3.0 | 5,500,496,429,524,710,000 | 40.589862 | 138 | 0.527424 | false |
ahu-odoo/odoo | addons/account_voucher/report/__init__.py | 378 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_voucher_sales_receipt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,969,069,067,367,116,000 | 44.125 | 78 | 0.619575 | false |
leppa/home-assistant | homeassistant/components/auth/__init__.py | 3 | 17184 | """Component to allow users to login and get tokens.
# POST /auth/token
This is an OAuth2 endpoint for granting tokens. We currently support the grant
types "authorization_code" and "refresh_token". Because we follow the OAuth2
spec, data should be send in formatted as x-www-form-urlencoded. Examples will
be in JSON as it's more readable.
## Grant type authorization_code
Exchange the authorization code retrieved from the login flow for tokens.
{
"client_id": "https://hassbian.local:8123/",
"grant_type": "authorization_code",
"code": "411ee2f916e648d691e937ae9344681e"
}
Return value will be the access and refresh tokens. The access token will have
a limited expiration. New access tokens can be requested using the refresh
token.
{
"access_token": "ABCDEFGH",
"expires_in": 1800,
"refresh_token": "IJKLMNOPQRST",
"token_type": "Bearer"
}
## Grant type refresh_token
Request a new access token using a refresh token.
{
"client_id": "https://hassbian.local:8123/",
"grant_type": "refresh_token",
"refresh_token": "IJKLMNOPQRST"
}
Return value will be a new access token. The access token will have
a limited expiration.
{
"access_token": "ABCDEFGH",
"expires_in": 1800,
"token_type": "Bearer"
}
## Revoking a refresh token
It is also possible to revoke a refresh token and all access tokens that have
ever been granted by that refresh token. Response code will ALWAYS be 200.
{
"token": "IJKLMNOPQRST",
"action": "revoke"
}
# Websocket API
## Get current user
Send websocket command `auth/current_user` will return current user of the
active websocket connection.
{
"id": 10,
"type": "auth/current_user",
}
The result payload likes
{
"id": 10,
"type": "result",
"success": true,
"result": {
"id": "USER_ID",
"name": "John Doe",
"is_owner": true,
"credentials": [{
"auth_provider_type": "homeassistant",
"auth_provider_id": null
}],
"mfa_modules": [{
"id": "totp",
"name": "TOTP",
"enabled": true
}]
}
}
## Create a long-lived access token
Send websocket command `auth/long_lived_access_token` will create
a long-lived access token for current user. Access token will not be saved in
Home Assistant. User need to record the token in secure place.
{
"id": 11,
"type": "auth/long_lived_access_token",
"client_name": "GPS Logger",
"lifespan": 365
}
Result will be a long-lived access token:
{
"id": 11,
"type": "result",
"success": true,
"result": "ABCDEFGH"
}
"""
from datetime import timedelta
import logging
import uuid
from aiohttp import web
import voluptuous as vol
from homeassistant.auth.models import (
TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
Credentials,
User,
)
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_REAL_IP
from homeassistant.components.http.auth import async_sign_path
from homeassistant.components.http.ban import log_invalid_auth
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from . import indieauth, login_flow, mfa_setup_flow
DOMAIN = "auth"
WS_TYPE_CURRENT_USER = "auth/current_user"
SCHEMA_WS_CURRENT_USER = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_CURRENT_USER}
)
WS_TYPE_LONG_LIVED_ACCESS_TOKEN = "auth/long_lived_access_token"
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
vol.Required("lifespan"): int, # days
vol.Required("client_name"): str,
vol.Optional("client_icon"): str,
}
)
WS_TYPE_REFRESH_TOKENS = "auth/refresh_tokens"
SCHEMA_WS_REFRESH_TOKENS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_REFRESH_TOKENS}
)
WS_TYPE_DELETE_REFRESH_TOKEN = "auth/delete_refresh_token"
SCHEMA_WS_DELETE_REFRESH_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_DELETE_REFRESH_TOKEN,
vol.Required("refresh_token_id"): str,
}
)
WS_TYPE_SIGN_PATH = "auth/sign_path"
SCHEMA_WS_SIGN_PATH = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SIGN_PATH,
vol.Required("path"): str,
vol.Optional("expires", default=30): int,
}
)
RESULT_TYPE_CREDENTIALS = "credentials"
RESULT_TYPE_USER = "user"
_LOGGER = logging.getLogger(__name__)
@bind_hass
def create_auth_code(hass, client_id: str, user: User) -> str:
"""Create an authorization code to fetch tokens."""
return hass.data[DOMAIN](client_id, user)
async def async_setup(hass, config):
"""Component to allow users to login."""
store_result, retrieve_result = _create_auth_code_store()
hass.data[DOMAIN] = store_result
hass.http.register_view(TokenView(retrieve_result))
hass.http.register_view(LinkUserView(retrieve_result))
hass.components.websocket_api.async_register_command(
WS_TYPE_CURRENT_USER, websocket_current_user, SCHEMA_WS_CURRENT_USER
)
hass.components.websocket_api.async_register_command(
WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
websocket_create_long_lived_access_token,
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_REFRESH_TOKENS, websocket_refresh_tokens, SCHEMA_WS_REFRESH_TOKENS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_DELETE_REFRESH_TOKEN,
websocket_delete_refresh_token,
SCHEMA_WS_DELETE_REFRESH_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SIGN_PATH, websocket_sign_path, SCHEMA_WS_SIGN_PATH
)
await login_flow.async_setup(hass, store_result)
await mfa_setup_flow.async_setup(hass)
return True
class TokenView(HomeAssistantView):
"""View to issue or revoke tokens."""
url = "/auth/token"
name = "api:auth:token"
requires_auth = False
cors_allowed = True
def __init__(self, retrieve_user):
"""Initialize the token view."""
self._retrieve_user = retrieve_user
@log_invalid_auth
async def post(self, request):
"""Grant a token."""
hass = request.app["hass"]
data = await request.post()
grant_type = data.get("grant_type")
# IndieAuth 6.3.5
# The revocation endpoint is the same as the token endpoint.
# The revocation request includes an additional parameter,
# action=revoke.
if data.get("action") == "revoke":
return await self._async_handle_revoke_token(hass, data)
if grant_type == "authorization_code":
return await self._async_handle_auth_code(
hass, data, str(request[KEY_REAL_IP])
)
if grant_type == "refresh_token":
return await self._async_handle_refresh_token(
hass, data, str(request[KEY_REAL_IP])
)
return self.json({"error": "unsupported_grant_type"}, status_code=400)
async def _async_handle_revoke_token(self, hass, data):
"""Handle revoke token request."""
# OAuth 2.0 Token Revocation [RFC7009]
# 2.2 The authorization server responds with HTTP status code 200
# if the token has been revoked successfully or if the client
# submitted an invalid token.
token = data.get("token")
if token is None:
return web.Response(status=200)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return web.Response(status=200)
await hass.auth.async_remove_refresh_token(refresh_token)
return web.Response(status=200)
async def _async_handle_auth_code(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is None or not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=400,
)
code = data.get("code")
if code is None:
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=400,
)
user = self._retrieve_user(client_id, RESULT_TYPE_USER, code)
if user is None or not isinstance(user, User):
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=400,
)
# refresh user
user = await hass.auth.async_get_user(user.id)
if not user.is_active:
return self.json(
{"error": "access_denied", "error_description": "User is not active"},
status_code=403,
)
refresh_token = await hass.auth.async_create_refresh_token(user, client_id)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"refresh_token": refresh_token.token,
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
async def _async_handle_refresh_token(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is not None and not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=400,
)
token = data.get("refresh_token")
if token is None:
return self.json({"error": "invalid_request"}, status_code=400)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return self.json({"error": "invalid_grant"}, status_code=400)
if refresh_token.client_id != client_id:
return self.json({"error": "invalid_request"}, status_code=400)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
class LinkUserView(HomeAssistantView):
"""View to link existing users to new credentials."""
url = "/auth/link_user"
name = "api:auth:link_user"
def __init__(self, retrieve_credentials):
"""Initialize the link user view."""
self._retrieve_credentials = retrieve_credentials
@RequestDataValidator(vol.Schema({"code": str, "client_id": str}))
async def post(self, request, data):
"""Link a user."""
hass = request.app["hass"]
user = request["hass_user"]
credentials = self._retrieve_credentials(
data["client_id"], RESULT_TYPE_CREDENTIALS, data["code"]
)
if credentials is None:
return self.json_message("Invalid code", status_code=400)
await hass.auth.async_link_user(user, credentials)
return self.json_message("User linked")
@callback
def _create_auth_code_store():
"""Create an in memory store."""
temp_results = {}
@callback
def store_result(client_id, result):
"""Store flow result and return a code to retrieve it."""
if isinstance(result, User):
result_type = RESULT_TYPE_USER
elif isinstance(result, Credentials):
result_type = RESULT_TYPE_CREDENTIALS
else:
raise ValueError("result has to be either User or Credentials")
code = uuid.uuid4().hex
temp_results[(client_id, result_type, code)] = (
dt_util.utcnow(),
result_type,
result,
)
return code
@callback
def retrieve_result(client_id, result_type, code):
"""Retrieve flow result."""
key = (client_id, result_type, code)
if key not in temp_results:
return None
created, _, result = temp_results.pop(key)
# OAuth 4.2.1
# The authorization code MUST expire shortly after it is issued to
# mitigate the risk of leaks. A maximum authorization code lifetime of
# 10 minutes is RECOMMENDED.
if dt_util.utcnow() - created < timedelta(minutes=10):
return result
return None
return store_result, retrieve_result
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_current_user(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return the current user."""
user = connection.user
enabled_modules = await hass.auth.async_get_enabled_mfa(user)
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"id": user.id,
"name": user.name,
"is_owner": user.is_owner,
"is_admin": user.is_admin,
"credentials": [
{
"auth_provider_type": c.auth_provider_type,
"auth_provider_id": c.auth_provider_id,
}
for c in user.credentials
],
"mfa_modules": [
{
"id": module.id,
"name": module.name,
"enabled": module.id in enabled_modules,
}
for module in hass.auth.auth_mfa_modules
],
},
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_create_long_lived_access_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Create or a long-lived access token."""
refresh_token = await hass.auth.async_create_refresh_token(
connection.user,
client_name=msg["client_name"],
client_icon=msg.get("client_icon"),
token_type=TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=msg["lifespan"]),
)
access_token = hass.auth.async_create_access_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], access_token))
@websocket_api.ws_require_user()
@callback
def websocket_refresh_tokens(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return metadata of users refresh tokens."""
current_id = connection.refresh_token_id
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
"id": refresh.id,
"client_id": refresh.client_id,
"client_name": refresh.client_name,
"client_icon": refresh.client_icon,
"type": refresh.token_type,
"created_at": refresh.created_at,
"is_current": refresh.id == current_id,
"last_used_at": refresh.last_used_at,
"last_used_ip": refresh.last_used_ip,
}
for refresh in connection.user.refresh_tokens.values()
],
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_delete_refresh_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a delete refresh token request."""
refresh_token = connection.user.refresh_tokens.get(msg["refresh_token_id"])
if refresh_token is None:
return websocket_api.error_message(
msg["id"], "invalid_token_id", "Received invalid token"
)
await hass.auth.async_remove_refresh_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], {}))
@websocket_api.ws_require_user()
@callback
def websocket_sign_path(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a sign path request."""
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"path": async_sign_path(
hass,
connection.refresh_token_id,
msg["path"],
timedelta(seconds=msg["expires"]),
)
},
)
)
| apache-2.0 | -4,940,339,518,614,416,000 | 29.740608 | 87 | 0.611383 | false |
flask-admin/flask-admin | flask_admin/model/form.py | 17 | 7221 | import inspect
import warnings
from flask_admin.form import BaseForm, rules
from flask_admin._compat import iteritems
from wtforms.fields import HiddenField
from wtforms.fields.core import UnboundField
from wtforms.validators import InputRequired
from .widgets import XEditableWidget
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
def create_editable_list_form(form_base_class, form_class, widget=None):
"""
Create a form class with all the fields wrapped in a FieldList.
Wrapping each field in FieldList allows submitting POST requests
in this format: ('<field_name>-<primary_key>', '<value>')
Used in the editable list view.
:param form_base_class:
WTForms form class, by default `form_base_class` from base.
:param form_class:
WTForms form class generated by `form.get_form`.
:param widget:
WTForms widget class. Defaults to `XEditableWidget`.
"""
if widget is None:
widget = XEditableWidget()
class ListForm(form_base_class):
list_form_pk = HiddenField(validators=[InputRequired()])
# iterate FormMeta to get unbound fields, replace widget, copy to ListForm
for name, obj in iteritems(form_class.__dict__):
if isinstance(obj, UnboundField):
obj.kwargs['widget'] = widget
setattr(ListForm, name, obj)
if name == "list_form_pk":
raise Exception('Form already has a list_form_pk column.')
return ListForm
class InlineBaseFormAdmin(object):
"""
Settings for inline form administration.
You can use this class to customize displayed form.
For example::
class MyUserInfoForm(InlineBaseFormAdmin):
form_columns = ('name', 'email')
"""
_defaults = ['form_base_class', 'form_columns', 'form_excluded_columns', 'form_args', 'form_extra_fields']
def __init__(self, **kwargs):
"""
Constructor
:param kwargs:
Additional options
"""
for k in self._defaults:
if not hasattr(self, k):
setattr(self, k, None)
for k, v in iteritems(kwargs):
setattr(self, k, v)
# Convert form rules
form_rules = getattr(self, 'form_rules', None)
if form_rules:
self._form_rules = rules.RuleSet(self, form_rules)
else:
self._form_rules = None
def get_form(self):
"""
If you want to use completely custom form for inline field, you can override
Flask-Admin form generation logic by overriding this method and returning your form.
"""
return None
def postprocess_form(self, form_class):
"""
Post process form. Use this to contribute fields.
For example::
class MyInlineForm(InlineFormAdmin):
def postprocess_form(self, form):
form.value = StringField('value')
return form
class MyAdmin(ModelView):
inline_models = (MyInlineForm(ValueModel),)
"""
return form_class
def on_model_change(self, form, model, is_created):
"""
Called when inline model is about to be saved.
:param form:
Inline form
:param model:
Model
:param is_created:
Will be set to True if the model is being created, False if edited
"""
pass
def _on_model_change(self, form, model, is_created):
"""
Compatibility helper.
"""
try:
self.on_model_change(form, model, is_created)
except TypeError:
msg = ('%s.on_model_change() now accepts third ' +
'parameter is_created. Please update your code') % self.model
warnings.warn(msg)
self.on_model_change(form, model)
class InlineFormAdmin(InlineBaseFormAdmin):
"""
Settings for inline form administration. Used by relational backends (SQLAlchemy, Peewee), where model
class can not be inherited from the parent model definition.
"""
def __init__(self, model, **kwargs):
"""
Constructor
:param model:
Model class
"""
self.model = model
super(InlineFormAdmin, self).__init__(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def get_converter(self, column):
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
# Search by module + name
for col_type in types:
type_string = '%s.%s' % (col_type.__module__, col_type.__name__)
if type_string in self.converters:
return self.converters[type_string]
# Search by name
for col_type in types:
if col_type.__name__ in self.converters:
return self.converters[col_type.__name__]
return None
def get_form(self, model, base_class=BaseForm,
only=None, exclude=None,
field_args=None):
raise NotImplementedError()
class InlineModelConverterBase(object):
form_admin_class = InlineFormAdmin
def __init__(self, view):
"""
Base constructor
:param view:
View class
"""
self.view = view
def get_label(self, info, name):
"""
Get inline model field label
:param info:
Inline model info
:param name:
Field name
"""
form_name = getattr(info, 'form_label', None)
if form_name:
return form_name
column_labels = getattr(self.view, 'column_labels', None)
if column_labels and name in column_labels:
return column_labels[name]
return None
def get_info(self, p):
"""
Figure out InlineFormAdmin information.
:param p:
Inline model. Can be one of:
- ``tuple``, first value is related model instance,
second is dictionary with options
- ``InlineFormAdmin`` instance
- Model class
"""
if isinstance(p, tuple):
return self.form_admin_class(p[0], **p[1])
elif isinstance(p, self.form_admin_class):
return p
return None
class FieldPlaceholder(object):
"""
Field placeholder for model convertors.
"""
def __init__(self, field):
self.field = field
| bsd-3-clause | 5,434,799,259,979,292,000 | 27.541502 | 110 | 0.560449 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/words/protocols/jabber/sasl_mechanisms.py | 13 | 8730 | # -*- test-case-name: twisted.words.test.test_jabbersaslmechanisms -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Protocol agnostic implementations of SASL authentication mechanisms.
"""
from __future__ import absolute_import, division
import binascii, random, time, os
from hashlib import md5
from zope.interface import Interface, Attribute, implementer
from twisted.python.compat import iteritems, networkString
class ISASLMechanism(Interface):
name = Attribute("""Common name for the SASL Mechanism.""")
def getInitialResponse():
"""
Get the initial client response, if defined for this mechanism.
@return: initial client response string.
@rtype: C{str}.
"""
def getResponse(challenge):
"""
Get the response to a server challenge.
@param challenge: server challenge.
@type challenge: C{str}.
@return: client response.
@rtype: C{str}.
"""
@implementer(ISASLMechanism)
class Anonymous(object):
"""
Implements the ANONYMOUS SASL authentication mechanism.
This mechanism is defined in RFC 2245.
"""
name = 'ANONYMOUS'
def getInitialResponse(self):
return None
@implementer(ISASLMechanism)
class Plain(object):
"""
Implements the PLAIN SASL authentication mechanism.
The PLAIN SASL authentication mechanism is defined in RFC 2595.
"""
name = 'PLAIN'
def __init__(self, authzid, authcid, password):
"""
@param authzid: The authorization identity.
@type authzid: L{unicode}
@param authcid: The authentication identity.
@type authcid: L{unicode}
@param password: The plain-text password.
@type password: L{unicode}
"""
self.authzid = authzid or u''
self.authcid = authcid or u''
self.password = password or u''
def getInitialResponse(self):
return (self.authzid.encode('utf-8') + b"\x00" +
self.authcid.encode('utf-8') + b"\x00" +
self.password.encode('utf-8'))
@implementer(ISASLMechanism)
class DigestMD5(object):
"""
Implements the DIGEST-MD5 SASL authentication mechanism.
The DIGEST-MD5 SASL authentication mechanism is defined in RFC 2831.
"""
name = 'DIGEST-MD5'
def __init__(self, serv_type, host, serv_name, username, password):
"""
@param serv_type: An indication of what kind of server authentication
is being attempted against. For example, C{u"xmpp"}.
@type serv_type: C{unicode}
@param host: The authentication hostname. Also known as the realm.
This is used as a scope to help select the right credentials.
@type host: C{unicode}
@param serv_name: An additional identifier for the server.
@type serv_name: C{unicode}
@param username: The authentication username to use to respond to a
challenge.
@type username: C{unicode}
@param username: The authentication password to use to respond to a
challenge.
@type password: C{unicode}
"""
self.username = username
self.password = password
self.defaultRealm = host
self.digest_uri = u'%s/%s' % (serv_type, host)
if serv_name is not None:
self.digest_uri += u'/%s' % (serv_name,)
def getInitialResponse(self):
return None
def getResponse(self, challenge):
directives = self._parse(challenge)
# Compat for implementations that do not send this along with
# a successful authentication.
if b'rspauth' in directives:
return b''
charset = directives[b'charset'].decode('ascii')
try:
realm = directives[b'realm']
except KeyError:
realm = self.defaultRealm.encode(charset)
return self._genResponse(charset,
realm,
directives[b'nonce'])
def _parse(self, challenge):
"""
Parses the server challenge.
Splits the challenge into a dictionary of directives with values.
@return: challenge directives and their values.
@rtype: C{dict} of C{str} to C{str}.
"""
s = challenge
paramDict = {}
cur = 0
remainingParams = True
while remainingParams:
# Parse a param. We can't just split on commas, because there can
# be some commas inside (quoted) param values, e.g.:
# qop="auth,auth-int"
middle = s.index(b"=", cur)
name = s[cur:middle].lstrip()
middle += 1
if s[middle:middle+1] == b'"':
middle += 1
end = s.index(b'"', middle)
value = s[middle:end]
cur = s.find(b',', end) + 1
if cur == 0:
remainingParams = False
else:
end = s.find(b',', middle)
if end == -1:
value = s[middle:].rstrip()
remainingParams = False
else:
value = s[middle:end].rstrip()
cur = end + 1
paramDict[name] = value
for param in (b'qop', b'cipher'):
if param in paramDict:
paramDict[param] = paramDict[param].split(b',')
return paramDict
def _unparse(self, directives):
"""
Create message string from directives.
@param directives: dictionary of directives (names to their values).
For certain directives, extra quotes are added, as
needed.
@type directives: C{dict} of C{str} to C{str}
@return: message string.
@rtype: C{str}.
"""
directive_list = []
for name, value in iteritems(directives):
if name in (b'username', b'realm', b'cnonce',
b'nonce', b'digest-uri', b'authzid', b'cipher'):
directive = name + b'=' + value
else:
directive = name + b'=' + value
directive_list.append(directive)
return b','.join(directive_list)
def _calculateResponse(self, cnonce, nc, nonce,
username, password, realm, uri):
"""
Calculates response with given encoded parameters.
@return: The I{response} field of a response to a Digest-MD5 challenge
of the given parameters.
@rtype: L{bytes}
"""
def H(s):
return md5(s).digest()
def HEX(n):
return binascii.b2a_hex(n)
def KD(k, s):
return H(k + b':' + s)
a1 = (H(username + b":" + realm + b":" + password) + b":" +
nonce + b":" +
cnonce)
a2 = b"AUTHENTICATE:" + uri
response = HEX(KD(HEX(H(a1)),
nonce + b":" + nc + b":" + cnonce + b":" +
b"auth" + b":" + HEX(H(a2))))
return response
def _genResponse(self, charset, realm, nonce):
"""
Generate response-value.
Creates a response to a challenge according to section 2.1.2.1 of
RFC 2831 using the C{charset}, C{realm} and C{nonce} directives
from the challenge.
"""
try:
username = self.username.encode(charset)
password = self.password.encode(charset)
digest_uri = self.digest_uri.encode(charset)
except UnicodeError:
# TODO - add error checking
raise
nc = networkString('%08x' % (1,)) # TODO: support subsequent auth.
cnonce = self._gen_nonce()
qop = b'auth'
# TODO - add support for authzid
response = self._calculateResponse(cnonce, nc, nonce,
username, password, realm,
digest_uri)
directives = {b'username': username,
b'realm' : realm,
b'nonce' : nonce,
b'cnonce' : cnonce,
b'nc' : nc,
b'qop' : qop,
b'digest-uri': digest_uri,
b'response': response,
b'charset': charset.encode('ascii')}
return self._unparse(directives)
def _gen_nonce(self):
nonceString = "%f:%f:%d" % (random.random(), time.time(), os.getpid())
nonceBytes = networkString(nonceString)
return md5(nonceBytes).hexdigest().encode('ascii')
| mit | -7,009,249,901,378,975,000 | 28.795222 | 78 | 0.544101 | false |
ScradFTW/WhoSampledMyLib | WhoSampledScraper.py | 1 | 6314 | """
File: WhoSampledScraper.py
Author: Brad Jobe
Version: 0.0.1
Scrapes relevant HTML content from an artist's track page
"""
import sys
import eyed3
import urllib2
from lxml import html
import httplib
import json
class WhoSampledScraper:
URL_WHOSAMPLED = "www.whosampled.com"
HTTP_PROTO = "http://"
HTTP_REDIRECT = "3"
SONGS_SAMPLED = 0
WHO_SAMPLED = 2
SONGS_SAMPLED_CALL = "songsSampled"
WHO_SAMPLED_CALL = "whoSampled"
def __init__(self, songLoc):
"""
Parses a songfile for the artist and title ID3 tags and creates the
theoretical path for the songfile's samplepage.
Param: The directory path to a song file, as a string.
Throws: MissingTagException if tag(s) could not be found.
"""
songfile = eyed3.load(songLoc)
try:
self.whoSampledHTML = None
self.artistName = songfile.tag.artist
self.songTitle = songfile.tag.title
self.sampleJSON = {}
if self.artistName == None or self.songTitle == None:
raise MissingTagException()
except MissingTagException:
print "audiofile at " + songLoc + " has missing tag information"
self.whoSampledPath = ("/" + self.artistName + "/" + \
self.songTitle + "/").replace(" ", "-")
self.sampleJSON[self.whoSampledPath] = { self.SONGS_SAMPLED_CALL:{}, \
self.WHO_SAMPLED_CALL: {} }
def getSongsSampled(self):
"""
Returns a list of songs that were sampled in the given track.
"""
jsonSamples = self.sampleScraper(self.SONGS_SAMPLED_CALL)
return self.convertJsontoList(jsonSamples)
def getWhoSampled(self):
"""
Returns a list of songs that have used the given track as a sample.
"""
jsonSamples = self.sampleScraper(self.WHO_SAMPLED_CALL)
return self.convertJsontoList(jsonSamples)
def getHTMLFromPath(self):
"""
Returns the html content from the song's sample page.
Throws: RedirectException if the url is redirected away from the
predicted path of the songs's sample page.
"""
urlCheck = urllib2.urlopen(self.HTTP_PROTO + \
self.URL_WHOSAMPLED + \
self.whoSampledPath)
try:
if urlCheck.geturl().lower() != (self.HTTP_PROTO + \
self.URL_WHOSAMPLED + \
self.whoSampledPath).lower():
raise RedirectException()
except RedirectException:
print "The URL of " + self.songTitle + " by " + self.artistName + \
" was redirected."
return None
return urlCheck.read()
def sampleScraper(self, calltype):
"""
Scrapes sample data from the song's sample page.
Params: a string of specifying what type of sample data is to be
scraped from the sample page.
Returns: a list of song samples, as strings, or an empty list.
"""
self.cachedSamples = self.loadCachedSampleData()
try:
self.cachedSamples[self.whoSampledPath] == None
except KeyError:
self.sampleJson = self.searchForSampleData(calltype)
else:
self.sampleJson = self.cachedSamples[self.whoSampledPath][calltype]
return self.sampleJson
def searchForSampleData(self, calltype):
"""
loads html of artist's track page on WhoSampled.com and parses it for
the relevant sample data.
args: specific type of sample data to parse for
returns: None if sample data could not be found, returns sample data
in json format if successful page parse
"""
if self.whoSampledHTML == None:
self.whoSampledHTML = self.getHTMLFromPath()
if self.whoSampledHTML == None:
return None
splitHTML = self.whoSampledHTML.split("<span Was sampled")
if calltype == self.SONGS_SAMPLED_CALL:
whoSampledDoc = html.document_fromstring( \
splitHTML[self.SONGS_SAMPLED])
elif calltype == self.WHO_SAMPLED_CALL and len(splitHTML) > 1:
whoSampledDoc = html.document_fromstring( \
splitHTML[self.WHO_SAMPLED])
elif len(splitHTML) <= 1:
return None
artistNamesSamples = whoSampledDoc.find_class("trackArtist")
songTitlesSamples = whoSampledDoc.find_class("trackName")
if len(artistNamesSamples) != len(songTitlesSamples) \
or len(artistNamesSamples) < 1:
return None
for i in range(0, len(artistNamesSamples)):
a = artistNamesSamples[i].text_content()
s = songTitlesSamples[i].text_content()
self.sampleJSON[self.whoSampledPath][calltype][a] = s
self.cacheSampleData()
return self.sampleJSON
def loadCachedSampleData(self):
"""
loads stored sample data from previous lookups
returns: json sample data
"""
with open("samples.json", "r") as inSampleFile:
jsonData = json.load(inSampleFile)
inSampleFile.close()
return jsonData
def cacheSampleData(self):
"""
stores sample data that has not been previously cached
"""
self.cachedSamples[self.whoSampledPath] \
= self.sampleJSON[self.whoSampledPath]
with open('samples.json', 'w') as outSampleFile:
json.dump(self.cachedSamples, outSampleFile)
outSampleFile.close()
def convertJsontoList(self, jsonSampleData):
"""
converts JSON sampled data to a python list
args: json to be converted
returns: python list of converted data
"""
sampleList = []
sampleDict = jsonSampleData
if bool(sampleDict) == False:
return None
for key in sampleDict:
sampleList.append(str(sampleDict[key]) + " " + str(key))
return sampleList
class RedirectException(Exception):
pass
class MissingTagException(Exception):
pass
| gpl-2.0 | 762,494,732,954,445,000 | 30.57 | 79 | 0.59376 | false |
brandsoulmates/incubator-airflow | tests/contrib/hooks/test_jira_hook.py | 44 | 1616 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,556,632,761,569,997,000 | 30.686275 | 76 | 0.674505 | false |
andyvand/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/chardistribution.py | 168 | 9153 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| lgpl-2.1 | 5,227,694,397,582,705,000 | 38.795652 | 79 | 0.626461 | false |
cloudera/hue | desktop/core/ext-py/SQLAlchemy-1.3.17/test/dialect/test_sqlite.py | 2 | 82848 | #!coding: utf-8
"""SQLite-specific tests."""
import datetime
import json
import os
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import column
from sqlalchemy import Computed
from sqlalchemy import create_engine
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import extract
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import MetaData
from sqlalchemy import pool
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import sql
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import tuple_
from sqlalchemy import types as sqltypes
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.sqlite import base as sqlite
from sqlalchemy.dialects.sqlite import pysqlite as pysqlite_dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.url import make_url
from sqlalchemy.schema import CreateTable
from sqlalchemy.schema import FetchedValue
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import combinations
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.types import Boolean
from sqlalchemy.types import Date
from sqlalchemy.types import DateTime
from sqlalchemy.types import Integer
from sqlalchemy.types import String
from sqlalchemy.types import Time
from sqlalchemy.util import u
from sqlalchemy.util import ue
class TestTypes(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "sqlite"
def test_boolean(self):
"""Test that the boolean only treats 1 as True
"""
meta = MetaData(testing.db)
t = Table(
"bool_table",
meta,
Column("id", Integer, primary_key=True),
Column("boo", Boolean(create_constraint=False)),
)
try:
meta.create_all()
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (1, 'false');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (2, 'true');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (3, '1');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (4, '0');"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (5, 1);"
)
testing.db.execute(
"INSERT INTO bool_table (id, boo) " "VALUES (6, 0);"
)
eq_(
t.select(t.c.boo).order_by(t.c.id).execute().fetchall(),
[(3, True), (5, True)],
)
finally:
meta.drop_all()
def test_string_dates_passed_raise(self):
assert_raises(
exc.StatementError,
testing.db.execute,
select([1]).where(bindparam("date", type_=Date)),
date=str(datetime.date(2007, 10, 30)),
)
def test_cant_parse_datetime_message(self):
for (typ, disp) in [
(Time, "time"),
(DateTime, "datetime"),
(Date, "date"),
]:
assert_raises_message(
ValueError,
"Couldn't parse %s string." % disp,
lambda: testing.db.execute(
text("select 'ASDF' as value").columns(value=typ)
).scalar(),
)
def test_native_datetime(self):
dbapi = testing.db.dialect.dbapi
connect_args = {
"detect_types": dbapi.PARSE_DECLTYPES | dbapi.PARSE_COLNAMES
}
engine = engines.testing_engine(
options={"connect_args": connect_args, "native_datetime": True}
)
t = Table(
"datetest",
MetaData(),
Column("id", Integer, primary_key=True),
Column("d1", Date),
Column("d2", sqltypes.TIMESTAMP),
)
t.create(engine)
try:
engine.execute(
t.insert(),
{
"d1": datetime.date(2010, 5, 10),
"d2": datetime.datetime(2010, 5, 10, 12, 15, 25),
},
)
row = engine.execute(t.select()).first()
eq_(
row,
(
1,
datetime.date(2010, 5, 10),
datetime.datetime(2010, 5, 10, 12, 15, 25),
),
)
r = engine.execute(func.current_date()).scalar()
assert isinstance(r, util.string_types)
finally:
t.drop(engine)
engine.dispose()
@testing.provide_metadata
def test_custom_datetime(self):
sqlite_date = sqlite.DATETIME(
# 2004-05-21T00:00:00
storage_format="%(year)04d-%(month)02d-%(day)02d"
"T%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(
t.insert().values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))
)
testing.db.execute("insert into t (d) values ('2004-05-21T00:00:00')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("2004-05-21T00:00:00",), ("2010-10-15T12:37:00",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[
(datetime.datetime(2004, 5, 21, 0, 0),),
(datetime.datetime(2010, 10, 15, 12, 37),),
],
)
@testing.provide_metadata
def test_custom_datetime_text_affinity(self):
sqlite_date = sqlite.DATETIME(
storage_format="%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d",
regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(
t.insert().values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))
)
testing.db.execute("insert into t (d) values ('20040521000000')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("20040521000000",), ("20101015123700",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[
(datetime.datetime(2004, 5, 21, 0, 0),),
(datetime.datetime(2010, 10, 15, 12, 37),),
],
)
@testing.provide_metadata
def test_custom_date_text_affinity(self):
sqlite_date = sqlite.DATE(
storage_format="%(year)04d%(month)02d%(day)02d",
regexp=r"(\d{4})(\d{2})(\d{2})",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(t.insert().values(d=datetime.date(2010, 10, 15)))
testing.db.execute("insert into t (d) values ('20040521')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("20040521",), ("20101015",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[(datetime.date(2004, 5, 21),), (datetime.date(2010, 10, 15),)],
)
@testing.provide_metadata
def test_custom_date(self):
sqlite_date = sqlite.DATE(
# 2004-05-21T00:00:00
storage_format="%(year)04d|%(month)02d|%(day)02d",
regexp=r"(\d+)\|(\d+)\|(\d+)",
)
t = Table("t", self.metadata, Column("d", sqlite_date))
self.metadata.create_all(testing.db)
testing.db.execute(t.insert().values(d=datetime.date(2010, 10, 15)))
testing.db.execute("insert into t (d) values ('2004|05|21')")
eq_(
testing.db.execute("select * from t order by d").fetchall(),
[("2004|05|21",), ("2010|10|15",)],
)
eq_(
testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(),
[(datetime.date(2004, 5, 21),), (datetime.date(2010, 10, 15),)],
)
def test_no_convert_unicode(self):
"""test no utf-8 encoding occurs"""
dialect = sqlite.dialect()
for t in (
String(),
sqltypes.CHAR(),
sqltypes.Unicode(),
sqltypes.UnicodeText(),
String(),
sqltypes.CHAR(),
sqltypes.Unicode(),
sqltypes.UnicodeText(),
):
bindproc = t.dialect_impl(dialect).bind_processor(dialect)
assert not bindproc or isinstance(
bindproc(util.u("some string")), util.text_type
)
class JSONTest(fixtures.TestBase):
__requires__ = ("json_type",)
__only_on__ = "sqlite"
@testing.provide_metadata
@testing.requires.reflects_json_type
def test_reflection(self):
Table("json_test", self.metadata, Column("foo", sqlite.JSON))
self.metadata.create_all()
reflected = Table("json_test", MetaData(), autoload_with=testing.db)
is_(reflected.c.foo.type._type_affinity, sqltypes.JSON)
assert isinstance(reflected.c.foo.type, sqlite.JSON)
@testing.provide_metadata
def test_rudimentary_roundtrip(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
self.metadata.create_all()
value = {"json": {"foo": "bar"}, "recs": ["one", "two"]}
with testing.db.connect() as conn:
conn.execute(sqlite_json.insert(), foo=value)
eq_(conn.scalar(select([sqlite_json.c.foo])), value)
@testing.provide_metadata
def test_extract_subobject(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
self.metadata.create_all()
value = {"json": {"foo": "bar"}}
with testing.db.connect() as conn:
conn.execute(sqlite_json.insert(), foo=value)
eq_(
conn.scalar(select([sqlite_json.c.foo["json"]])), value["json"]
)
@testing.provide_metadata
def test_deprecated_serializer_args(self):
sqlite_json = Table(
"json_test", self.metadata, Column("foo", sqlite.JSON)
)
data_element = {"foo": "bar"}
js = mock.Mock(side_effect=json.dumps)
jd = mock.Mock(side_effect=json.loads)
with testing.expect_deprecated(
"The _json_deserializer argument to the SQLite "
"dialect has been renamed",
"The _json_serializer argument to the SQLite "
"dialect has been renamed",
):
engine = engines.testing_engine(
options=dict(_json_serializer=js, _json_deserializer=jd)
)
self.metadata.create_all(engine)
engine.execute(sqlite_json.insert(), {"foo": data_element})
row = engine.execute(select([sqlite_json.c.foo])).first()
eq_(row, (data_element,))
eq_(js.mock_calls, [mock.call(data_element)])
eq_(jd.mock_calls, [mock.call(json.dumps(data_element))])
class DateTimeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_time_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00.000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
dt_out = datetime.datetime(2008, 6, 27, 12, 0, 0)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format_compact(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(
storage_format=(
"%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
),
regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627120000000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class DateTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_custom_format(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=r"(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "06/27/2008")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class TimeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.time(12, 0, 0, 125)
dt_out = datetime.time(12, 0, 0)
eq_(str(dt), "12:00:00.000125")
sldt = sqlite.TIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format(self):
dt = datetime.date(2008, 6, 27)
eq_(str(dt), "2008-06-27")
sldt = sqlite.DATE(
storage_format="%(year)04d%(month)02d%(day)02d",
regexp=r"(\d{4})(\d{2})(\d{2})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
class DefaultsTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "sqlite"
@testing.exclude(
"sqlite",
"<",
(3, 3, 8),
"sqlite3 changesets 3353 and 3440 modified "
"behavior of default displayed in pragma "
"table_info()",
)
def test_default_reflection(self):
# (ask_for, roundtripped_as_if_different)
specs = [
(String(3), '"foo"'),
(sqltypes.NUMERIC(10, 2), "100.50"),
(Integer, "5"),
(Boolean, "False"),
]
columns = [
Column("c%i" % (i + 1), t[0], server_default=text(t[1]))
for (i, t) in enumerate(specs)
]
db = testing.db
m = MetaData(db)
Table("t_defaults", m, *columns)
try:
m.create_all()
m2 = MetaData(db)
rt = Table("t_defaults", m2, autoload=True)
expected = [c[1] for c in specs]
for i, reflected in enumerate(rt.c):
eq_(str(reflected.server_default.arg), expected[i])
finally:
m.drop_all()
@testing.exclude(
"sqlite",
"<",
(3, 3, 8),
"sqlite3 changesets 3353 and 3440 modified "
"behavior of default displayed in pragma "
"table_info()",
)
def test_default_reflection_2(self):
db = testing.db
m = MetaData(db)
expected = ["'my_default'", "0"]
table = """CREATE TABLE r_defaults (
data VARCHAR(40) DEFAULT 'my_default',
val INTEGER NOT NULL DEFAULT 0
)"""
try:
db.execute(table)
rt = Table("r_defaults", m, autoload=True)
for i, reflected in enumerate(rt.c):
eq_(str(reflected.server_default.arg), expected[i])
finally:
db.execute("DROP TABLE r_defaults")
def test_default_reflection_3(self):
db = testing.db
table = """CREATE TABLE r_defaults (
data VARCHAR(40) DEFAULT 'my_default',
val INTEGER NOT NULL DEFAULT 0
)"""
try:
db.execute(table)
m1 = MetaData(db)
t1 = Table("r_defaults", m1, autoload=True)
db.execute("DROP TABLE r_defaults")
t1.create()
m2 = MetaData(db)
t2 = Table("r_defaults", m2, autoload=True)
self.assert_compile(
CreateTable(t2),
"CREATE TABLE r_defaults (data VARCHAR(40) "
"DEFAULT 'my_default', val INTEGER DEFAULT 0 "
"NOT NULL)",
)
finally:
db.execute("DROP TABLE r_defaults")
@testing.provide_metadata
def test_boolean_default(self):
t = Table(
"t",
self.metadata,
Column("x", Boolean, server_default=sql.false()),
)
t.create(testing.db)
with testing.db.connect() as conn:
conn.execute(t.insert())
conn.execute(t.insert().values(x=True))
eq_(
conn.execute(t.select().order_by(t.c.x)).fetchall(),
[(False,), (True,)],
)
@testing.provide_metadata
def test_function_default(self):
t = Table(
"t",
self.metadata,
Column("id", Integer, primary_key=True),
Column("x", DateTime(), server_default=func.now()),
)
t.create(testing.db)
with testing.db.connect() as conn:
now = conn.scalar(func.now())
today = datetime.datetime.today()
conn.execute(t.insert())
conn.execute(t.insert().values(x=today))
eq_(
conn.execute(select([t.c.x]).order_by(t.c.id)).fetchall(),
[(now,), (today,)],
)
@testing.provide_metadata
def test_expression_with_function_default(self):
t = Table(
"t",
self.metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer(), server_default=func.abs(-5) + 17),
)
t.create(testing.db)
with testing.db.connect() as conn:
conn.execute(t.insert())
conn.execute(t.insert().values(x=35))
eq_(
conn.execute(select([t.c.x]).order_by(t.c.id)).fetchall(),
[(22,), (35,)],
)
def test_old_style_default(self):
"""test non-quoted integer value on older sqlite pragma"""
dialect = sqlite.dialect()
info = dialect._get_column_info("foo", "INTEGER", False, 3, False)
eq_(info["default"], "3")
class DialectTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "sqlite"
def test_extra_reserved_words(self):
"""Tests reserved words in identifiers.
'true', 'false', and 'column' are undocumented reserved words
when used as column identifiers (as of 3.5.1). Covering them
here to ensure they remain in place if the dialect's
reserved_words set is updated in the future. """
meta = MetaData(testing.db)
t = Table(
"reserved",
meta,
Column("safe", Integer),
Column("true", Integer),
Column("false", Integer),
Column("column", Integer),
)
try:
meta.create_all()
t.insert().execute(safe=1)
list(t.select().execute())
finally:
meta.drop_all()
@testing.provide_metadata
def test_quoted_identifiers_functional_one(self):
"""Tests autoload of tables created with quoted column names."""
metadata = self.metadata
testing.db.execute(
"""CREATE TABLE "django_content_type" (
"id" integer NOT NULL PRIMARY KEY,
"django_stuff" text NULL
)
"""
)
testing.db.execute(
"""
CREATE TABLE "django_admin_log" (
"id" integer NOT NULL PRIMARY KEY,
"action_time" datetime NOT NULL,
"content_type_id" integer NULL
REFERENCES "django_content_type" ("id"),
"object_id" text NULL,
"change_message" text NOT NULL
)
"""
)
table1 = Table("django_admin_log", metadata, autoload=True)
table2 = Table("django_content_type", metadata, autoload=True)
j = table1.join(table2)
assert j.onclause.compare(table1.c.content_type_id == table2.c.id)
@testing.provide_metadata
def test_quoted_identifiers_functional_two(self):
""""test the edgiest of edge cases, quoted table/col names
that start and end with quotes.
SQLite claims to have fixed this in
http://www.sqlite.org/src/info/600482d161, however
it still fails if the FK points to a table name that actually
has quotes as part of its name.
"""
metadata = self.metadata
testing.db.execute(
r'''CREATE TABLE """a""" (
"""id""" integer NOT NULL PRIMARY KEY
)
'''
)
# unfortunately, still can't do this; sqlite quadruples
# up the quotes on the table name here for pragma foreign_key_list
# testing.db.execute(r'''
# CREATE TABLE """b""" (
# """id""" integer NOT NULL PRIMARY KEY,
# """aid""" integer NULL
# REFERENCES """a""" ("""id""")
# )
# ''')
table1 = Table(r'"a"', metadata, autoload=True)
assert '"id"' in table1.c
# table2 = Table(r'"b"', metadata, autoload=True)
# j = table1.join(table2)
# assert j.onclause.compare(table1.c['"id"']
# == table2.c['"aid"'])
@testing.provide_metadata
def test_description_encoding(self):
# amazingly, pysqlite seems to still deliver cursor.description
# as encoded bytes in py2k
t = Table(
"x",
self.metadata,
Column(u("méil"), Integer, primary_key=True),
Column(ue("\u6e2c\u8a66"), Integer),
)
self.metadata.create_all(testing.db)
result = testing.db.execute(t.select())
assert u("méil") in result.keys()
assert ue("\u6e2c\u8a66") in result.keys()
def test_pool_class(self):
e = create_engine("sqlite+pysqlite://")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///:memory:")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///foo.db")
assert e.pool.__class__ is pool.NullPool
@combinations(
(
"sqlite:///foo.db", # file path is absolute
([os.path.abspath("foo.db")], {}),
),
(
"sqlite:////abs/path/to/foo.db",
([os.path.abspath("/abs/path/to/foo.db")], {}),
),
("sqlite://", ([":memory:"], {})),
(
"sqlite:///?check_same_thread=true",
([":memory:"], {"check_same_thread": True}),
),
(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true",
(
["file:path/to/database?mode=ro&nolock=1"],
{"check_same_thread": True, "timeout": 10.0, "uri": True},
),
),
(
"sqlite:///file:path/to/database?" "mode=ro&uri=true",
(["file:path/to/database?mode=ro"], {"uri": True}),
),
(
"sqlite:///file:path/to/database?uri=true",
(["file:path/to/database"], {"uri": True}),
),
)
def test_connect_args(self, url, expected):
"""test create_connect_args scenarios including support for uri=True"""
d = pysqlite_dialect.dialect()
url = make_url(url)
eq_(d.create_connect_args(url), expected)
@testing.combinations(
("no_persisted", "ignore"),
("persisted_none", None),
("persisted_true", True),
("persisted_false", False),
id_="ia",
)
def test_column_computed(self, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support computed columns",
schema.CreateTable(t).compile,
dialect=sqlite.dialect(),
)
class AttachedDBTest(fixtures.TestBase):
__only_on__ = "sqlite"
def _fixture(self):
meta = self.metadata
self.conn = testing.db.connect()
Table("created", meta, Column("foo", Integer), Column("bar", String))
Table("local_only", meta, Column("q", Integer), Column("p", Integer))
ct = Table(
"created",
meta,
Column("id", Integer),
Column("name", String),
schema="test_schema",
)
Table(
"another_created",
meta,
Column("bat", Integer),
Column("hoho", String),
schema="test_schema",
)
meta.create_all(self.conn)
return ct
def setup(self):
self.conn = testing.db.connect()
self.metadata = MetaData()
def teardown(self):
self.metadata.drop_all(self.conn)
self.conn.close()
def test_no_tables(self):
insp = inspect(self.conn)
eq_(insp.get_table_names("test_schema"), [])
def test_column_names(self):
self._fixture()
insp = inspect(self.conn)
eq_(
[
d["name"]
for d in insp.get_columns("created", schema="test_schema")
],
["id", "name"],
)
eq_(
[d["name"] for d in insp.get_columns("created", schema=None)],
["foo", "bar"],
)
eq_(
[
d["name"]
for d in insp.get_columns("nonexistent", schema="test_schema")
],
[],
)
eq_(
[
d["name"]
for d in insp.get_columns("another_created", schema=None)
],
[],
)
eq_(
[
d["name"]
for d in insp.get_columns("local_only", schema="test_schema")
],
[],
)
eq_([d["name"] for d in insp.get_columns("local_only")], ["q", "p"])
def test_table_names_present(self):
self._fixture()
insp = inspect(self.conn)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_table_names_system(self):
self._fixture()
insp = inspect(self.conn)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_schema_names(self):
self._fixture()
insp = inspect(self.conn)
eq_(insp.get_schema_names(), ["main", "test_schema"])
# implicitly creates a "temp" schema
self.conn.execute("select * from sqlite_temp_master")
# we're not including it
insp = inspect(self.conn)
eq_(insp.get_schema_names(), ["main", "test_schema"])
def test_reflect_system_table(self):
meta = MetaData(self.conn)
alt_master = Table(
"sqlite_master",
meta,
autoload=True,
autoload_with=self.conn,
schema="test_schema",
)
assert len(alt_master.c) > 0
def test_reflect_user_table(self):
self._fixture()
m2 = MetaData()
c2 = Table("created", m2, autoload=True, autoload_with=self.conn)
eq_(len(c2.c), 2)
def test_crud(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
eq_(self.conn.execute(ct.select()).fetchall(), [(1, "foo")])
self.conn.execute(ct.update(), {"id": 2, "name": "bar"})
eq_(self.conn.execute(ct.select()).fetchall(), [(2, "bar")])
self.conn.execute(ct.delete())
eq_(self.conn.execute(ct.select()).fetchall(), [])
def test_col_targeting(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
row = self.conn.execute(ct.select()).first()
eq_(row["id"], 1)
eq_(row["name"], "foo")
def test_col_targeting_union(self):
ct = self._fixture()
self.conn.execute(ct.insert(), {"id": 1, "name": "foo"})
row = self.conn.execute(ct.select().union(ct.select())).first()
eq_(row["id"], 1)
eq_(row["name"], "foo")
class SQLTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests SQLite-dialect specific compilation."""
__dialect__ = sqlite.dialect()
def test_extract(self):
t = sql.table("t", sql.column("col1"))
mapping = {
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
}
for field, subst in mapping.items():
self.assert_compile(
select([extract(field, t.c.col1)]),
"SELECT CAST(STRFTIME('%s', t.col1) AS "
"INTEGER) AS anon_1 FROM t" % subst,
)
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_is_distinct_from(self):
self.assert_compile(
sql.column("x").is_distinct_from(None), "x IS NOT NULL"
)
self.assert_compile(
sql.column("x").isnot_distinct_from(False), "x IS 0"
)
def test_localtime(self):
self.assert_compile(
func.localtimestamp(), 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
)
def test_constraints_with_schemas(self):
metadata = MetaData()
Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
schema="master",
)
t2 = Table(
"t2",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="master",
)
t3 = Table(
"t3",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="alternate",
)
t4 = Table(
"t4",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
)
# schema->schema, generate REFERENCES with no schema name
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE master.t2 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(t1_id) REFERENCES t1 (id)"
")",
)
# schema->different schema, don't generate REFERENCES
self.assert_compile(
schema.CreateTable(t3),
"CREATE TABLE alternate.t3 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
# same for local schema
self.assert_compile(
schema.CreateTable(t4),
"CREATE TABLE t4 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
def test_column_defaults_ddl(self):
t = Table(
"t", MetaData(), Column("x", Boolean, server_default=sql.false())
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x BOOLEAN DEFAULT (0), CHECK (x IN (0, 1)))",
)
t = Table(
"t",
MetaData(),
Column("x", String(), server_default=func.sqlite_version()),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE t (x VARCHAR DEFAULT (sqlite_version()))",
)
t = Table(
"t",
MetaData(),
Column("x", Integer(), server_default=func.abs(-5) + 17),
)
self.assert_compile(
CreateTable(t), "CREATE TABLE t (x INTEGER DEFAULT (abs(-5) + 17))"
)
def test_create_partial_index(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
# test quoting and all that
idx2 = Index(
"test_idx2",
tbl.c.data,
sqlite_where=and_(tbl.c.data > "a", tbl.c.data < "b's"),
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data) "
"WHERE data > 5 AND data < 10",
dialect=sqlite.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=sqlite.dialect(),
)
def test_no_autoinc_on_composite_pk(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True, autoincrement=True),
Column("y", Integer, primary_key=True),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support autoincrement for composite",
CreateTable(t).compile,
dialect=sqlite.dialect(),
)
def test_in_tuple(self):
self.assert_compile(
tuple_(column("q"), column("p")).in_([(1, 2), (3, 4)]),
"(q, p) IN (VALUES (?, ?), (?, ?))",
)
class OnConflictDDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = sqlite.dialect()
def test_on_conflict_clause_column_not_null(self):
c = Column(
"test", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL"
)
self.assert_compile(
schema.CreateColumn(c),
"test INTEGER NOT NULL " "ON CONFLICT FAIL",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_column_many_clause(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"test",
Integer,
nullable=False,
primary_key=True,
sqlite_on_conflict_not_null="FAIL",
sqlite_on_conflict_primary_key="IGNORE",
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n ("
"test INTEGER NOT NULL ON CONFLICT FAIL, "
"PRIMARY KEY (test) ON CONFLICT IGNORE)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_unique_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x", String(30), unique=True, sqlite_on_conflict_unique="FAIL"
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (x VARCHAR(30), " "UNIQUE (x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_unique_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", String(30)),
UniqueConstraint("id", "x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER, x VARCHAR(30), "
"UNIQUE (id, x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"id",
Integer,
primary_key=True,
sqlite_on_conflict_primary_key="FAIL",
),
sqlite_autoincrement=True,
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER NOT NULL "
"PRIMARY KEY ON CONFLICT FAIL AUTOINCREMENT)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x",
String(30),
sqlite_on_conflict_primary_key="FAIL",
primary_key=True,
),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (x VARCHAR(30) NOT NULL, "
"PRIMARY KEY (x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_check_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", Integer),
CheckConstraint("id > x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n (id INTEGER, x INTEGER, "
"CHECK (id > x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_check_constraint_from_column(self):
meta = MetaData()
t = Table(
"n",
meta,
Column(
"x",
Integer,
CheckConstraint("x > 1", sqlite_on_conflict="FAIL"),
),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support on conflict "
"clause for column check constraint",
CreateTable(t).compile,
dialect=sqlite.dialect(),
)
def test_on_conflict_clause_primary_key_constraint(self):
meta = MetaData()
t = Table(
"n",
meta,
Column("id", Integer),
Column("x", String(30)),
PrimaryKeyConstraint("id", "x", sqlite_on_conflict="FAIL"),
)
self.assert_compile(
CreateTable(t),
"CREATE TABLE n ("
"id INTEGER NOT NULL, "
"x VARCHAR(30) NOT NULL, "
"PRIMARY KEY (id, x) ON CONFLICT FAIL)",
dialect=sqlite.dialect(),
)
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
"""Tests inserts and autoincrement."""
__only_on__ = "sqlite"
# empty insert (i.e. INSERT INTO table DEFAULT VALUES) fails on
# 3.3.7 and before
def _test_empty_insert(self, table, expect=1):
try:
table.create()
for wanted in expect, expect * 2:
table.insert().execute()
rows = table.select().execute().fetchall()
eq_(len(rows), wanted)
finally:
table.drop()
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk1(self):
self._test_empty_insert(
Table(
"a",
MetaData(testing.db),
Column("id", Integer, primary_key=True),
)
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk2(self):
# now warns due to [ticket:3216]
with expect_warnings(
"Column 'b.x' is marked as a member of the "
"primary key for table 'b'",
"Column 'b.y' is marked as a member of the "
"primary key for table 'b'",
):
assert_raises(
exc.IntegrityError,
self._test_empty_insert,
Table(
"b",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk2_fv(self):
assert_raises(
exc.DBAPIError,
self._test_empty_insert,
Table(
"b",
MetaData(testing.db),
Column(
"x",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
Column(
"y",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk3(self):
# now warns due to [ticket:3216]
with expect_warnings(
"Column 'c.x' is marked as a member of the primary key for table"
):
assert_raises(
exc.IntegrityError,
self._test_empty_insert,
Table(
"c",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column(
"y", Integer, DefaultClause("123"), primary_key=True
),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk3_fv(self):
assert_raises(
exc.DBAPIError,
self._test_empty_insert,
Table(
"c",
MetaData(testing.db),
Column(
"x",
Integer,
primary_key=True,
server_default=FetchedValue(),
),
Column("y", Integer, DefaultClause("123"), primary_key=True),
),
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_pk4(self):
self._test_empty_insert(
Table(
"d",
MetaData(testing.db),
Column("x", Integer, primary_key=True),
Column("y", Integer, DefaultClause("123")),
)
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_nopk1(self):
self._test_empty_insert(
Table("e", MetaData(testing.db), Column("id", Integer))
)
@testing.exclude("sqlite", "<", (3, 3, 8), "no database support")
def test_empty_insert_nopk2(self):
self._test_empty_insert(
Table(
"f",
MetaData(testing.db),
Column("x", Integer),
Column("y", Integer),
)
)
def test_inserts_with_spaces(self):
tbl = Table(
"tbl",
MetaData("sqlite:///"),
Column("with space", Integer),
Column("without", Integer),
)
tbl.create()
try:
tbl.insert().execute({"without": 123})
assert list(tbl.select().execute()) == [(None, 123)]
tbl.insert().execute({"with space": 456})
assert list(tbl.select().execute()) == [(None, 123), (456, None)]
finally:
tbl.drop()
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
try:
testing.db.execute("CREATE VIRTUAL TABLE t using FTS3;")
testing.db.execute("DROP TABLE t;")
return False
except Exception:
return True
metadata = cattable = matchtable = None
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "sqlite"
__skip_if__ = (full_text_search_missing,)
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
testing.db.execute(
"""
CREATE VIRTUAL TABLE cattable using FTS3 (
id INTEGER NOT NULL,
description VARCHAR(50),
PRIMARY KEY (id)
)
"""
)
cattable = Table("cattable", metadata, autoload=True)
testing.db.execute(
"""
CREATE VIRTUAL TABLE matchtable using FTS3 (
id INTEGER NOT NULL,
title VARCHAR(200),
category_id INTEGER NOT NULL,
PRIMARY KEY (id)
)
"""
)
matchtable = Table("matchtable", metadata, autoload=True)
metadata.create_all()
cattable.insert().execute(
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
]
)
matchtable.insert().execute(
[
{
"id": 1,
"title": "Agile Web Development with Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{
"id": 4,
"title": "The Definitive Guide to Django",
"category_id": 1,
},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
]
)
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_expression(self):
self.assert_compile(
matchtable.c.title.match("somstr"),
"matchtable.title MATCH ?",
dialect=sqlite.dialect(),
)
def test_simple_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([2, 5], [r.id for r in results])
def test_simple_prefix_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("nut*"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results])
def test_or_match(self):
results2 = (
matchtable.select()
.where(matchtable.c.title.match("nutshell OR ruby"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results2 = (
matchtable.select()
.where(matchtable.c.title.match("python nutshell"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
cattable.c.description.match("Ruby"),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([1, 3], [r.id for r in results])
class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL):
def test_sqlite_autoincrement(self):
table = Table(
"autoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)",
dialect=sqlite.dialect(),
)
def test_sqlite_autoincrement_constraint(self):
table = Table(
"autoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
UniqueConstraint("x"),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT, x "
"INTEGER, UNIQUE (x))",
dialect=sqlite.dialect(),
)
def test_sqlite_no_autoincrement(self):
table = Table(
"noautoinctable",
MetaData(),
Column("id", Integer, primary_key=True),
Column("x", Integer, default=None),
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE noautoinctable (id INTEGER "
"NOT NULL, x INTEGER, PRIMARY KEY (id))",
dialect=sqlite.dialect(),
)
def test_sqlite_autoincrement_int_affinity(self):
class MyInteger(sqltypes.TypeDecorator):
impl = Integer
table = Table(
"autoinctable",
MetaData(),
Column("id", MyInteger, primary_key=True),
sqlite_autoincrement=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE autoinctable (id INTEGER NOT "
"NULL PRIMARY KEY AUTOINCREMENT)",
dialect=sqlite.dialect(),
)
class ReflectHeadlessFKsTest(fixtures.TestBase):
__only_on__ = "sqlite"
def setup(self):
testing.db.execute("CREATE TABLE a (id INTEGER PRIMARY KEY)")
# this syntax actually works on other DBs perhaps we'd want to add
# tests to test_reflection
testing.db.execute(
"CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)"
)
def teardown(self):
testing.db.execute("drop table b")
testing.db.execute("drop table a")
def test_reflect_tables_fk_no_colref(self):
meta = MetaData()
a = Table("a", meta, autoload=True, autoload_with=testing.db)
b = Table("b", meta, autoload=True, autoload_with=testing.db)
assert b.c.id.references(a.c.id)
class KeywordInDatabaseNameTest(fixtures.TestBase):
__only_on__ = "sqlite"
@classmethod
def setup_class(cls):
with testing.db.begin() as conn:
conn.execute('ATTACH %r AS "default"' % conn.engine.url.database)
conn.execute('CREATE TABLE "default".a (id INTEGER PRIMARY KEY)')
@classmethod
def teardown_class(cls):
with testing.db.begin() as conn:
try:
conn.execute('drop table "default".a')
except Exception:
pass
conn.execute('DETACH DATABASE "default"')
def test_reflect(self):
with testing.db.begin() as conn:
meta = MetaData(bind=conn, schema="default")
meta.reflect()
assert "default.a" in meta.tables
class ConstraintReflectionTest(fixtures.TestBase):
__only_on__ = "sqlite"
@classmethod
def setup_class(cls):
with testing.db.begin() as conn:
conn.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)")
conn.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)")
conn.execute(
"CREATE TABLE b (id INTEGER PRIMARY KEY, "
"FOREIGN KEY(id) REFERENCES a1(id),"
"FOREIGN KEY(id) REFERENCES a2(id)"
")"
)
conn.execute(
"CREATE TABLE c (id INTEGER, "
"CONSTRAINT bar PRIMARY KEY(id),"
"CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id),"
"CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)"
")"
)
conn.execute(
# the lower casing + inline is intentional here
"CREATE TABLE d (id INTEGER, x INTEGER unique)"
)
conn.execute(
# the lower casing + inline is intentional here
"CREATE TABLE d1 "
'(id INTEGER, "some ( STUPID n,ame" INTEGER unique)'
)
conn.execute(
# the lower casing + inline is intentional here
'CREATE TABLE d2 ( "some STUPID n,ame" INTEGER unique)'
)
conn.execute(
# the lower casing + inline is intentional here
'CREATE TABLE d3 ( "some STUPID n,ame" INTEGER NULL unique)'
)
conn.execute(
# lower casing + inline is intentional
"CREATE TABLE e (id INTEGER, x INTEGER references a2(id))"
)
conn.execute(
'CREATE TABLE e1 (id INTEGER, "some ( STUPID n,ame" INTEGER '
'references a2 ("some ( STUPID n,ame"))'
)
conn.execute(
"CREATE TABLE e2 (id INTEGER, "
'"some ( STUPID n,ame" INTEGER NOT NULL '
'references a2 ("some ( STUPID n,ame"))'
)
conn.execute(
"CREATE TABLE f (x INTEGER, CONSTRAINT foo_fx UNIQUE(x))"
)
conn.execute(
"CREATE TEMPORARY TABLE g "
"(x INTEGER, CONSTRAINT foo_gx UNIQUE(x))"
)
conn.execute(
# intentional broken casing
"CREATE TABLE h (x INTEGER, COnstraINT foo_hx unIQUE(x))"
)
conn.execute(
"CREATE TABLE i (x INTEGER, y INTEGER, PRIMARY KEY(x, y))"
)
conn.execute(
"CREATE TABLE j (id INTEGER, q INTEGER, p INTEGER, "
"PRIMARY KEY(id), FOreiGN KEY(q,p) REFERENCes i(x,y))"
)
conn.execute(
"CREATE TABLE k (id INTEGER, q INTEGER, p INTEGER, "
"PRIMARY KEY(id), "
"conSTRAINT my_fk FOreiGN KEY ( q , p ) "
"REFERENCes i ( x , y ))"
)
meta = MetaData()
Table("l", meta, Column("bar", String, index=True), schema="main")
Table(
"m",
meta,
Column("id", Integer, primary_key=True),
Column("x", String(30)),
UniqueConstraint("x"),
)
Table(
"n",
meta,
Column("id", Integer, primary_key=True),
Column("x", String(30)),
UniqueConstraint("x"),
prefixes=["TEMPORARY"],
)
Table(
"p",
meta,
Column("id", Integer),
PrimaryKeyConstraint("id", name="pk_name"),
)
Table("q", meta, Column("id", Integer), PrimaryKeyConstraint("id"))
meta.create_all(conn)
# will contain an "autoindex"
conn.execute("create table o (foo varchar(20) primary key)")
conn.execute(
"CREATE TABLE onud_test (id INTEGER PRIMARY KEY, "
"c1 INTEGER, c2 INTEGER, c3 INTEGER, c4 INTEGER, "
"CONSTRAINT fk1 FOREIGN KEY (c1) REFERENCES a1(id) "
"ON DELETE SET NULL, "
"CONSTRAINT fk2 FOREIGN KEY (c2) REFERENCES a1(id) "
"ON UPDATE CASCADE, "
"CONSTRAINT fk3 FOREIGN KEY (c3) REFERENCES a2(id) "
"ON DELETE CASCADE ON UPDATE SET NULL,"
"CONSTRAINT fk4 FOREIGN KEY (c4) REFERENCES a2(id) "
"ON UPDATE NO ACTION)"
)
conn.execute(
"CREATE TABLE cp ("
"q INTEGER check (q > 1 AND q < 6),\n"
"CONSTRAINT cq CHECK (q == 1 OR (q > 2 AND q < 5))\n"
")"
)
conn.execute(
"CREATE TABLE implicit_referred (pk integer primary key)"
)
# single col foreign key with no referred column given,
# must assume primary key of referred table
conn.execute(
"CREATE TABLE implicit_referrer "
"(id integer REFERENCES implicit_referred)"
)
conn.execute(
"CREATE TABLE implicit_referred_comp "
"(pk1 integer, pk2 integer, primary key (pk1, pk2))"
)
# composite foreign key with no referred columns given,
# must assume primary key of referred table
conn.execute(
"CREATE TABLE implicit_referrer_comp "
"(id1 integer, id2 integer, foreign key(id1, id2) "
"REFERENCES implicit_referred_comp)"
)
# worst case - FK that refers to nonexistent table so we cant
# get pks. requires FK pragma is turned off
conn.execute(
"CREATE TABLE implicit_referrer_comp_fake "
"(id1 integer, id2 integer, foreign key(id1, id2) "
"REFERENCES fake_table)"
)
@classmethod
def teardown_class(cls):
with testing.db.begin() as conn:
for name in [
"implicit_referrer_comp_fake",
"implicit_referrer",
"implicit_referred",
"implicit_referrer_comp",
"implicit_referred_comp",
"m",
"main.l",
"k",
"j",
"i",
"h",
"g",
"f",
"e",
"e1",
"d",
"d1",
"d2",
"c",
"b",
"a1",
"a2",
]:
try:
conn.execute("drop table %s" % name)
except Exception:
pass
def test_legacy_quoted_identifiers_unit(self):
dialect = sqlite.dialect()
dialect._broken_fk_pragma_quotes = True
for row in [
(0, None, "target", "tid", "id", None),
(0, None, '"target"', "tid", "id", None),
(0, None, "[target]", "tid", "id", None),
(0, None, "'target'", "tid", "id", None),
(0, None, "`target`", "tid", "id", None),
]:
def _get_table_pragma(*arg, **kw):
return [row]
def _get_table_sql(*arg, **kw):
return (
"CREATE TABLE foo "
"(tid INTEGER, "
"FOREIGN KEY(tid) REFERENCES %s (id))" % row[2]
)
with mock.patch.object(
dialect, "_get_table_pragma", _get_table_pragma
):
with mock.patch.object(
dialect, "_get_table_sql", _get_table_sql
):
fkeys = dialect.get_foreign_keys(None, "foo")
eq_(
fkeys,
[
{
"referred_table": "target",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["tid"],
"options": {},
}
],
)
def test_foreign_key_name_is_none(self):
# and not "0"
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("b")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["id"],
"options": {},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["id"],
"options": {},
},
],
)
def test_foreign_key_name_is_not_none(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("c")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "foo1",
"constrained_columns": ["id"],
"options": {},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "foo2",
"constrained_columns": ["id"],
"options": {},
},
],
)
def test_foreign_key_implicit_parent(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer")
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id"],
"referred_schema": None,
"referred_table": "implicit_referred",
"referred_columns": ["pk"],
"options": {},
}
],
)
def test_foreign_key_composite_implicit_parent(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer_comp")
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id1", "id2"],
"referred_schema": None,
"referred_table": "implicit_referred_comp",
"referred_columns": ["pk1", "pk2"],
"options": {},
}
],
)
def test_foreign_key_implicit_missing_parent(self):
# test when the FK refers to a non-existent table and column names
# aren't given. only sqlite allows this case to exist
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("implicit_referrer_comp_fake")
# the referred table doesn't exist but the operation does not fail
eq_(
fks,
[
{
"name": None,
"constrained_columns": ["id1", "id2"],
"referred_schema": None,
"referred_table": "fake_table",
"referred_columns": [],
"options": {},
}
],
)
def test_foreign_key_implicit_missing_parent_reflection(self):
# full Table reflection fails however, which is not a new behavior
m = MetaData()
assert_raises_message(
exc.NoSuchTableError,
"fake_table",
Table,
"implicit_referrer_comp_fake",
m,
autoload_with=testing.db,
)
def test_unnamed_inline_foreign_key(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("e")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": None,
"constrained_columns": ["x"],
"options": {},
}
],
)
def test_unnamed_inline_foreign_key_quoted(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("e1")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["some ( STUPID n,ame"],
"referred_schema": None,
"options": {},
"name": None,
"constrained_columns": ["some ( STUPID n,ame"],
}
],
)
fks = inspector.get_foreign_keys("e2")
eq_(
fks,
[
{
"referred_table": "a2",
"referred_columns": ["some ( STUPID n,ame"],
"referred_schema": None,
"options": {},
"name": None,
"constrained_columns": ["some ( STUPID n,ame"],
}
],
)
def test_foreign_key_composite_broken_casing(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("j")
eq_(
fks,
[
{
"referred_table": "i",
"referred_columns": ["x", "y"],
"referred_schema": None,
"name": None,
"constrained_columns": ["q", "p"],
"options": {},
}
],
)
fks = inspector.get_foreign_keys("k")
eq_(
fks,
[
{
"referred_table": "i",
"referred_columns": ["x", "y"],
"referred_schema": None,
"name": "my_fk",
"constrained_columns": ["q", "p"],
"options": {},
}
],
)
def test_foreign_key_ondelete_onupdate(self):
inspector = Inspector(testing.db)
fks = inspector.get_foreign_keys("onud_test")
eq_(
fks,
[
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk1",
"constrained_columns": ["c1"],
"options": {"ondelete": "SET NULL"},
},
{
"referred_table": "a1",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk2",
"constrained_columns": ["c2"],
"options": {"onupdate": "CASCADE"},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk3",
"constrained_columns": ["c3"],
"options": {"ondelete": "CASCADE", "onupdate": "SET NULL"},
},
{
"referred_table": "a2",
"referred_columns": ["id"],
"referred_schema": None,
"name": "fk4",
"constrained_columns": ["c4"],
"options": {"onupdate": "NO ACTION"},
},
],
)
def test_foreign_key_options_unnamed_inline(self):
with testing.db.connect() as conn:
conn.execute(
"create table foo (id integer, "
"foreign key (id) references bar (id) on update cascade)"
)
insp = inspect(conn)
eq_(
insp.get_foreign_keys("foo"),
[
{
"name": None,
"referred_columns": ["id"],
"referred_table": "bar",
"constrained_columns": ["id"],
"referred_schema": None,
"options": {"onupdate": "CASCADE"},
}
],
)
def test_dont_reflect_autoindex(self):
inspector = Inspector(testing.db)
eq_(inspector.get_indexes("o"), [])
eq_(
inspector.get_indexes("o", include_auto_indexes=True),
[
{
"unique": 1,
"name": "sqlite_autoindex_o_1",
"column_names": ["foo"],
}
],
)
def test_create_index_with_schema(self):
"""Test creation of index with explicit schema"""
inspector = Inspector(testing.db)
eq_(
inspector.get_indexes("l", schema="main"),
[
{
"unique": 0,
"name": u"ix_main_l_bar",
"column_names": [u"bar"],
}
],
)
def test_unique_constraint_named(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("f"),
[{"column_names": ["x"], "name": "foo_fx"}],
)
def test_unique_constraint_named_broken_casing(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("h"),
[{"column_names": ["x"], "name": "foo_hx"}],
)
def test_unique_constraint_named_broken_temp(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("g"),
[{"column_names": ["x"], "name": "foo_gx"}],
)
def test_unique_constraint_unnamed_inline(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("d"),
[{"column_names": ["x"], "name": None}],
)
def test_unique_constraint_unnamed_inline_quoted(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("d1"),
[{"column_names": ["some ( STUPID n,ame"], "name": None}],
)
eq_(
inspector.get_unique_constraints("d2"),
[{"column_names": ["some STUPID n,ame"], "name": None}],
)
eq_(
inspector.get_unique_constraints("d3"),
[{"column_names": ["some STUPID n,ame"], "name": None}],
)
def test_unique_constraint_unnamed_normal(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("m"),
[{"column_names": ["x"], "name": None}],
)
def test_unique_constraint_unnamed_normal_temporary(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_unique_constraints("n"),
[{"column_names": ["x"], "name": None}],
)
def test_primary_key_constraint_named(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("p"),
{"constrained_columns": ["id"], "name": "pk_name"},
)
def test_primary_key_constraint_unnamed(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("q"),
{"constrained_columns": ["id"], "name": None},
)
def test_primary_key_constraint_no_pk(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_pk_constraint("d"),
{"constrained_columns": [], "name": None},
)
def test_check_constraint(self):
inspector = Inspector(testing.db)
eq_(
inspector.get_check_constraints("cp"),
[
{"sqltext": "q > 1 AND q < 6", "name": None},
{"sqltext": "q == 1 OR (q > 2 AND q < 5)", "name": "cq"},
],
)
class SavepointTest(fixtures.TablesTest):
"""test that savepoints work when we use the correct event setup"""
__only_on__ = "sqlite"
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", Integer, primary_key=True),
Column("user_name", String),
)
@classmethod
def setup_bind(cls):
engine = engines.testing_engine(options={"use_reaper": False})
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.execute("BEGIN")
return engine
def test_nested_subtransaction_rollback(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (3,)],
)
connection.close()
def test_nested_subtransaction_commit(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,), (3,)],
)
connection.close()
def test_rollback_to_subtransaction(self):
users = self.tables.users
connection = self.bind.connect()
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans3 = connection.begin()
connection.execute(users.insert(), user_id=3, user_name="user3")
trans3.rollback()
connection.execute(users.insert(), user_id=4, user_name="user4")
transaction.commit()
eq_(
connection.execute(
select([users.c.user_id]).order_by(users.c.user_id)
).fetchall(),
[(1,), (4,)],
)
connection.close()
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "sqlite"
def _fixed_lookup_fixture(self):
return [
(sqltypes.String(), sqltypes.VARCHAR()),
(sqltypes.String(1), sqltypes.VARCHAR(1)),
(sqltypes.String(3), sqltypes.VARCHAR(3)),
(sqltypes.Text(), sqltypes.TEXT()),
(sqltypes.Unicode(), sqltypes.VARCHAR()),
(sqltypes.Unicode(1), sqltypes.VARCHAR(1)),
(sqltypes.UnicodeText(), sqltypes.TEXT()),
(sqltypes.CHAR(3), sqltypes.CHAR(3)),
(sqltypes.NUMERIC, sqltypes.NUMERIC()),
(sqltypes.NUMERIC(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.Numeric, sqltypes.NUMERIC()),
(sqltypes.Numeric(10, 2), sqltypes.NUMERIC(10, 2)),
(sqltypes.DECIMAL, sqltypes.DECIMAL()),
(sqltypes.DECIMAL(10, 2), sqltypes.DECIMAL(10, 2)),
(sqltypes.INTEGER, sqltypes.INTEGER()),
(sqltypes.BIGINT, sqltypes.BIGINT()),
(sqltypes.Float, sqltypes.FLOAT()),
(sqltypes.TIMESTAMP, sqltypes.TIMESTAMP()),
(sqltypes.DATETIME, sqltypes.DATETIME()),
(sqltypes.DateTime, sqltypes.DATETIME()),
(sqltypes.DateTime(), sqltypes.DATETIME()),
(sqltypes.DATE, sqltypes.DATE()),
(sqltypes.Date, sqltypes.DATE()),
(sqltypes.TIME, sqltypes.TIME()),
(sqltypes.Time, sqltypes.TIME()),
(sqltypes.BOOLEAN, sqltypes.BOOLEAN()),
(sqltypes.Boolean, sqltypes.BOOLEAN()),
(
sqlite.DATE(storage_format="%(year)04d%(month)02d%(day)02d"),
sqltypes.DATE(),
),
(
sqlite.TIME(
storage_format="%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.TIME(),
),
(
sqlite.DATETIME(
storage_format="%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d"
),
sqltypes.DATETIME(),
),
]
def _unsupported_args_fixture(self):
return [
("INTEGER(5)", sqltypes.INTEGER()),
("DATETIME(6, 12)", sqltypes.DATETIME()),
]
def _type_affinity_fixture(self):
return [
("LONGTEXT", sqltypes.TEXT()),
("TINYINT", sqltypes.INTEGER()),
("MEDIUMINT", sqltypes.INTEGER()),
("INT2", sqltypes.INTEGER()),
("UNSIGNED BIG INT", sqltypes.INTEGER()),
("INT8", sqltypes.INTEGER()),
("CHARACTER(20)", sqltypes.TEXT()),
("CLOB", sqltypes.TEXT()),
("CLOBBER", sqltypes.TEXT()),
("VARYING CHARACTER(70)", sqltypes.TEXT()),
("NATIVE CHARACTER(70)", sqltypes.TEXT()),
("BLOB", sqltypes.BLOB()),
("BLOBBER", sqltypes.NullType()),
("DOUBLE PRECISION", sqltypes.REAL()),
("FLOATY", sqltypes.REAL()),
("SOMETHING UNKNOWN", sqltypes.NUMERIC()),
]
def _fixture_as_string(self, fixture):
for from_, to_ in fixture:
if isinstance(from_, sqltypes.TypeEngine):
from_ = str(from_.compile())
elif isinstance(from_, type):
from_ = str(from_().compile())
yield from_, to_
def _test_lookup_direct(self, fixture, warnings=False):
dialect = sqlite.dialect()
for from_, to_ in self._fixture_as_string(fixture):
if warnings:
def go():
return dialect._resolve_type_affinity(from_)
final_type = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
final_type = dialect._resolve_type_affinity(from_)
expected_type = type(to_)
is_(type(final_type), expected_type)
def _test_round_trip(self, fixture, warnings=False):
from sqlalchemy import inspect
conn = testing.db.connect()
for from_, to_ in self._fixture_as_string(fixture):
inspector = inspect(conn)
conn.execute("CREATE TABLE foo (data %s)" % from_)
try:
if warnings:
def go():
return inspector.get_columns("foo")[0]
col_info = testing.assert_warnings(
go, ["Could not instantiate"], regex=True
)
else:
col_info = inspector.get_columns("foo")[0]
expected_type = type(to_)
is_(type(col_info["type"]), expected_type)
# test args
for attr in ("scale", "precision", "length"):
if getattr(to_, attr, None) is not None:
eq_(
getattr(col_info["type"], attr),
getattr(to_, attr, None),
)
finally:
conn.execute("DROP TABLE foo")
def test_lookup_direct_lookup(self):
self._test_lookup_direct(self._fixed_lookup_fixture())
def test_lookup_direct_unsupported_args(self):
self._test_lookup_direct(
self._unsupported_args_fixture(), warnings=True
)
def test_lookup_direct_type_affinity(self):
self._test_lookup_direct(self._type_affinity_fixture())
def test_round_trip_direct_lookup(self):
self._test_round_trip(self._fixed_lookup_fixture())
def test_round_trip_direct_unsupported_args(self):
self._test_round_trip(self._unsupported_args_fixture(), warnings=True)
def test_round_trip_direct_type_affinity(self):
self._test_round_trip(self._type_affinity_fixture())
| apache-2.0 | -256,495,146,527,662,500 | 31.463166 | 79 | 0.48939 | false |
icereval/osf.io | addons/gitlab/models.py | 1 | 14594 | # -*- coding: utf-8 -*-
import os
import urlparse
from django.db import models
import markupsafe
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.gitlab import utils
from addons.gitlab.api import GitLabClient
from addons.gitlab.serializer import GitLabSerializer
from addons.gitlab import settings as gitlab_settings
from addons.gitlab.exceptions import ApiError, NotFoundError, GitLabError
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = gitlab_settings.HOOK_DOMAIN or settings.DOMAIN
class GitLabFileNode(BaseFileNode):
_provider = 'gitlab'
class GitLabFolder(GitLabFileNode, Folder):
pass
class GitLabFile(GitLabFileNode, File):
version_identifier = 'commitSha'
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
def touch(self, auth_header, revision=None, ref=None, branch=None, **kwargs):
revision = revision or ref or branch
return super(GitLabFile, self).touch(auth_header, revision=revision, **kwargs)
class GitLabProvider(object):
name = 'GitLab'
short_name = 'gitlab'
serializer = GitLabSerializer
def __init__(self, account=None):
super(GitLabProvider, self).__init__() # this does exactly nothing...
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = GitLabProvider
serializer = GitLabSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
repo_id = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
hook_secret = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True)
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def has_auth(self):
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='gitlab_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.repo_id = None
self.hook_id = None
self.hook_secret = None
def deauthorize(self, auth=None, log=True):
self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='gitlab_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.repo:
return 'https://{0}/{1}'.format(self.external_account.display_name, self.repo)
@property
def short_url(self):
if self.repo:
return self.repo
@property
def is_private(self):
connection = GitLabClient(external_account=self.external_account)
return not connection.repo(repo_id=self.repo_id)['public']
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('gitlab')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
valid_credentials = False
owner = self.user_settings.owner
connection = GitLabClient(external_account=self.external_account)
valid_credentials = True
try:
repos = connection.repos()
except GitLabError:
valid_credentials = False
if owner == user:
ret.update({'repos': repos})
ret.update({
'node_has_auth': True,
'gitlab_user': self.user or '',
'gitlab_repo': self.repo or '',
'gitlab_repo_id': self.repo_id if self.repo_id is not None else '0',
'gitlab_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'gitlab_host': self.external_account.display_name,
'gitlab_user_name': self.external_account.display_name,
'gitlab_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.external_account.oauth_key}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'host': 'https://{}'.format(self.external_account.oauth_secret),
'owner': self.user,
'repo': self.repo,
'repo_id': self.repo_id
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='gitlab')
if not metadata.get('extra'):
sha = None
urls = {}
else:
sha = metadata['extra']['fileSha']
urls = {
'view': '{0}?branch={1}'.format(url, sha),
'download': '{0}?action=download&branch={1}'.format(url, sha)
}
self.owner.add_log(
'gitlab_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'gitlab': {
'host': 'https://{0}'.format(self.external_account.display_name),
'user': self.user,
'repo': self.repo,
'sha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
connect = GitLabClient(external_account=self.external_account)
try:
repo = connect.repo(self.repo_id)
except (ApiError, GitLabError):
return
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if not repo['public'] else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the GitLab '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private GitLab '
'repository through this public project.'
)
else:
message += (
' The files in this GitLab repo can be viewed on GitLab '
'<u><a href="{url}">here</a></u>.'
).format(url=repo['http_url_to_repo'])
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'gitlab/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the GitLab add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private GitLab repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on GitLab.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
#########
# Hooks #
#########
# TODO: Should Events be added here?
# TODO: Move hook logic to service
def add_hook(self, save=True):
if self.user_settings:
connect = GitLabClient(external_account=self.external_account)
secret = utils.make_hook_secret()
hook = connect.add_hook(
self.user, self.repo,
'web',
{
'url': urlparse.urljoin(
hook_domain,
os.path.join(
self.owner.api_url, 'gitlab', 'hook/'
)
),
'content_type': gitlab_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=gitlab_settings.HOOK_EVENTS,
)
if hook:
self.hook_id = hook.id
self.hook_secret = secret
if save:
self.save()
def delete_hook(self, save=True):
"""
:return bool: Hook was deleted
"""
if self.user_settings and self.hook_id:
connection = GitLabClient(external_account=self.external_account)
try:
response = connection.delete_hook(self.user, self.repo, self.hook_id)
except (GitLabError, NotFoundError):
return False
if response:
self.hook_id = None
if save:
self.save()
return True
return False
| apache-2.0 | 153,512,257,335,121,280 | 31.943567 | 119 | 0.550158 | false |
toabctl/osc | tests/test_revertfiles.py | 15 | 3281 | import osc.core
import osc.oscerr
import os
from common import OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'revertfile_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestRevertFiles)
class TestRevertFiles(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def testRevertUnchanged(self):
"""revert an unchanged file (state == ' ')"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.revert, 'toadd2')
self._check_status(p, 'toadd2', '?')
def testRevertModified(self):
"""revert a modified file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('nochange')
self.__check_file('nochange')
self._check_status(p, 'nochange', ' ')
def testRevertAdded(self):
"""revert an added file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('toadd1')
self.assertTrue(os.path.exists('toadd1'))
self._check_addlist('replaced\naddedmissing\n')
self._check_status(p, 'toadd1', '?')
def testRevertDeleted(self):
"""revert a deleted file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('somefile')
self.__check_file('somefile')
self._check_deletelist('deleted\n')
self._check_status(p, 'somefile', ' ')
def testRevertMissing(self):
"""revert a missing (state == '!') file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('missing')
self.__check_file('missing')
self._check_status(p, 'missing', ' ')
def testRevertMissingAdded(self):
"""revert a missing file which was added to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('addedmissing')
self._check_addlist('toadd1\nreplaced\n')
self.assertRaises(osc.oscerr.OscIOError, p.status, 'addedmissing')
def testRevertReplaced(self):
"""revert a replaced (state == 'R') file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('replaced')
self.__check_file('replaced')
self._check_addlist('toadd1\naddedmissing\n')
self._check_status(p, 'replaced', ' ')
def testRevertConflict(self):
"""revert a file which is in the conflict state"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.revert('foo')
self.__check_file('foo')
self.assertFalse(os.path.exists(os.path.join('.osc', '_in_conflict')))
self._check_status(p, 'foo', ' ')
def testRevertSkipped(self):
"""revert a skipped file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.revert, 'skipped')
def __check_file(self, fname):
storefile = os.path.join('.osc', fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(storefile))
self.assertEqual(open(fname, 'r').read(), open(storefile, 'r').read())
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 | 6,173,838,704,352,916,000 | 32.824742 | 78 | 0.587626 | false |
theoryno3/pylearn2 | pylearn2/utils/tests/test_video.py | 44 | 2040 | """Tests for pylearn2.utils.video"""
import numpy
from theano.compat import six
from pylearn2.compat import OrderedDict
from pylearn2.utils.video import FrameLookup, spatiotemporal_cubes
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2011, David Warde-Farley / Universite de Montreal"
__license__ = "BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "wardefar@iro"
# TODO: write a test for get_video_dims, raising SkipTest
# if pyffmpeg can't be imported
def test_frame_lookup():
input_data = [('foo', 15), ('bar', 19), ('baz', 26)]
lookup = FrameLookup(input_data)
assert len(lookup) == (15 + 19 + 26)
assert lookup[15] == ('bar', 19, 0)
assert lookup[14] == ('foo', 15, 14)
assert lookup[15 + 19 + 4] == ('baz', 26, 4)
# The test below is crashing on Travis, though not on mkg's machine. Ian
# suggests commenting the test out for now, to fast-track PR #1133.
def test_spatiotemporal_cubes():
def check_patch_coverage(files):
rng = numpy.random.RandomState(1)
inputs = [(name, array.shape) for name, array in six.iteritems(files)]
shape = (5, 7, 7)
for fname, index in spatiotemporal_cubes(inputs, shape, 50000, rng):
cube = files[fname][index]
if len(files[fname].shape) == 3:
assert cube.shape == shape
else:
assert cube.shape[:3] == shape[:3]
cube[...] = True
for fname, array in six.iteritems(files):
assert array.all()
files = OrderedDict(
file1=numpy.zeros((10, 30, 21), dtype=bool),
file2=numpy.zeros((15, 25, 28), dtype=bool),
file3=numpy.zeros((7, 18, 22), dtype=bool),
)
check_patch_coverage(files)
# Check that stuff still works with an extra color channel dimension.
files = OrderedDict(
file1=numpy.zeros((10, 30, 21, 3), dtype=bool),
file2=numpy.zeros((15, 25, 28, 3), dtype=bool),
file3=numpy.zeros((7, 18, 22, 3), dtype=bool),
)
check_patch_coverage(files)
| bsd-3-clause | -6,163,109,224,304,883,000 | 34.172414 | 78 | 0.617157 | false |
tiborsimko/zenodo | tests/unit/auditor/test_records.py | 6 | 13412 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test for Zenodo Auditor Record checks."""
from __future__ import absolute_import, print_function
import logging
import pytest
from invenio_records.models import RecordMetadata
from zenodo.modules.auditor.records import RecordAudit, RecordCheck
from zenodo.modules.records.api import ZenodoRecord
@pytest.fixture()
def record_audit():
return RecordAudit('testAudit', logging.getLogger('auditorTesting'), [])
def test_record_audit(record_audit, full_record, db, communities, users,
oaiid_pid):
# Add the "ecfunded" community since it's usually being added automatically
# after processing a deposit if the record has an EC grant.
oaiid_pid.pid_value = full_record['communities'].append('ecfunded')
# Mint the OAI identifier
oaiid_pid.pid_value = full_record['_oai']['id']
db.session.add(oaiid_pid)
# Create the record metadata, to store the
record_model = RecordMetadata()
record_model.json = full_record
db.session.add(record_model)
db.session.commit()
record = ZenodoRecord(data=full_record, model=record_model)
check = RecordCheck(record_audit, record)
check.perform()
assert check.issues == {}
assert check.is_ok is True
assert check.dump() == {
'record': {
'recid': record['recid'],
'object_uuid': str(record.id),
},
'issues': {},
}
duplicate_community_params = (
([], None),
(['a', 'b'], None),
(['a', 'a', 'a', 'b'], ['a']),
(['a', 'a', 'b', 'b'], ['a', 'b']),
)
@pytest.mark.parametrize(('record_communities', 'issue'),
duplicate_community_params)
def test_duplicate_communities(record_audit, minimal_record,
record_communities, issue):
minimal_record.update({'communities': record_communities})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_communities()
result_issue = check.issues.get('communities', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
unresolvable_communities_params = (
([], None),
(['c1', 'c2', 'c3', 'c4', 'zenodo', 'ecfunded'], None),
(['foo'], ['foo']),
(['c1', 'c2', 'foo'], ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
)
@pytest.mark.parametrize(('record_communities', 'issue'),
unresolvable_communities_params)
def test_unresolvable_communities(record_audit, minimal_record, communities,
record_communities, issue):
minimal_record.update({'communities': record_communities})
check = RecordCheck(record_audit, minimal_record)
check._unresolvable_communities()
result_issue = check.issues.get('communities', {}).get('unresolvable')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_owners_params = (
([1], None),
([1, 2, 3], None),
([1, 1, 1, 2], [1]),
([1, 1, 2, 2], [1, 2]),
)
@pytest.mark.parametrize(('record_owners', 'issue'), duplicate_owners_params)
def test_duplicate_owners(record_audit, minimal_record, record_owners, issue):
minimal_record.update({'owners': record_owners})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_owners()
result_issue = check.issues.get('owners', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
unresolvable_owners_params = (
([1], None),
([1, 2, 3], None),
([4], [4]),
([1, 2, 3, 4], [4]),
)
@pytest.mark.parametrize(('record_owners', 'issue'),
unresolvable_owners_params)
def test_unresolvable_owners(record_audit, minimal_record, users,
record_owners, issue):
minimal_record.update({'owners': record_owners})
check = RecordCheck(record_audit, minimal_record)
check._unresolvable_owners()
result_issue = check.issues.get('owners', {}).get('unresolvable')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_grants_params = (
([], None),
([{'$ref': '1'}, {'$ref': '2'}], None),
([{'$ref': '1'}, {'$ref': '1'}], ['1']),
([{'$ref': '1'}, {'$ref': '1'}, {'$ref': '2'}], ['1']),
([{'$ref': '1'}, {'$ref': '1'}, {'$ref': '2'}, {'$ref': '2'}], ['1', '2']),
)
@pytest.mark.parametrize(('record_grants', 'issue'), duplicate_grants_params)
def test_duplicate_grants(record_audit, minimal_record, record_grants, issue):
minimal_record.update({'grants': record_grants})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_grants()
result_issue = check.issues.get('grants', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
duplicate_files_params = [
([{'key': 'a', 'version_id': 1}], None),
([{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 2},
{'key': 'c', 'version_id': 3}],
None),
([{'key': 'a', 'version_id': 1},
{'key': 'a', 'version_id': 2},
{'key': 'a', 'version_id': 3},
{'key': 'b', 'version_id': 4}],
[{'key': 'a', 'version_id': 1},
{'key': 'a', 'version_id': 2},
{'key': 'a', 'version_id': 3}]),
([{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 1},
{'key': 'c', 'version_id': 1},
{'key': 'd', 'version_id': 2}],
[{'key': 'a', 'version_id': 1},
{'key': 'b', 'version_id': 1},
{'key': 'c', 'version_id': 1}]),
]
@pytest.mark.parametrize(('record_files', 'issue'), duplicate_files_params)
def test_duplicate_files(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._duplicate_files()
result_issue = check.issues.get('files', {}).get('duplicates')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
missing_files_params = [
([{'key': 'a'}], False),
([{'key': 'a'}, {'key': 'b'}], False),
(None, True),
([], True),
]
@pytest.mark.parametrize(('record_files', 'issue'), missing_files_params)
def test_missing_files(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._missing_files()
result_issue = check.issues.get('files', {}).get('missing')
assert bool(result_issue) == bool(issue)
multiple_buckets_params = [
([{'bucket': 'a'}], None),
([{'bucket': 'a'}, {'bucket': 'a'}, {'bucket': 'a'}], None),
([{'bucket': 'a'}, {'bucket': 'a'}, {'bucket': 'b'}], ['a', 'b']),
([{'bucket': 'a'}, {'bucket': 'b'}, {'bucket': 'c'}], ['a', 'b', 'c']),
]
@pytest.mark.parametrize(('record_files', 'issue'), multiple_buckets_params)
def test_multiple_buckets(record_audit, minimal_record, record_files, issue):
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._multiple_buckets()
result_issue = check.issues.get('files', {}).get('multiple_buckets')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
bucket_mismatch_params = [
('a', [{'bucket': 'a'}], None),
('a', [{'key': 'f1', 'bucket': 'a'}, {'key': 'f2', 'bucket': 'a'}], None),
('a', [{'key': 'f1', 'bucket': 'b'}], [{'key': 'f1', 'bucket': 'b'}]),
('a', [{'key': 'f1', 'bucket': 'a'}, {'key': 'f2', 'bucket': 'b'}],
[{'key': 'f2', 'bucket': 'b'}]),
]
@pytest.mark.parametrize(('record_bucket', 'record_files', 'issue'),
bucket_mismatch_params)
def test_bucket_mismatch(record_audit, minimal_record, record_bucket,
record_files, issue):
minimal_record.update({'_buckets': {'record': record_bucket}})
minimal_record.update({'_files': record_files})
check = RecordCheck(record_audit, minimal_record)
check._bucket_mismatch()
result_issue = check.issues.get('files', {}).get('bucket_mismatch')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert result_issue == issue
oai_required_params = [
({'id': 'oai:zenodo.org:1', 'updated': '2016-01-01T12:00:00Z'}, None),
({}, {'id': True, 'updated': True}),
({'id': 'oai:zenodo.org:1'}, {'updated': True}),
({'updated': '2016-01-01T12:00:00Z'}, {'id': True}),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_required_params)
def test_oai_required(record_audit, minimal_record, record_oai, issue):
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_required()
result_issue = check.issues.get('oai', {}).get('missing')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
oai_non_minted_pid_params = [
({'id': 'oai:zenodo.org:123'}, None),
({'id': 'oai:zenodo.org:invalid'}, 'oai:zenodo.org:invalid'),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_non_minted_pid_params)
def test_oai_non_minted_pid(record_audit, minimal_record, db, oaiid_pid,
record_oai, issue):
db.session.add(oaiid_pid)
db.session.commit()
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_non_minted_pid()
result_issue = check.issues.get('oai', {}).get('non_minted_pid')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
oai_duplicate_sets_params = [
({}, None),
({'sets': ['a', 'b']}, None),
({'sets': ['a', 'a', 'a', 'b']}, ['a']),
({'sets': ['a', 'a', 'b', 'b']}, ['a', 'b']),
]
@pytest.mark.parametrize(('record_oai', 'issue'), oai_duplicate_sets_params)
def test_oai_duplicate_sets(record_audit, minimal_record, record_oai, issue):
minimal_record.update({'_oai': record_oai})
check = RecordCheck(record_audit, minimal_record)
check._oai_duplicate_sets()
result_issue = check.issues.get('oai', {}).get('duplicate_oai_sets')
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert len(result_issue) == len(issue)
assert set(result_issue) == set(issue)
oai_community_correspondence = [
([], [], None),
(['a'], ['user-a'], None),
(['a', 'b'], ['user-a', 'user-b'], None),
(['a'], [], {'missing_oai_sets': ['user-a']}),
(['a', 'b'], ['user-a'], {'missing_oai_sets': ['user-b'], }),
([], ['user-a'], {'redundant_oai_sets': ['user-a']}),
(['a'], ['user-a', 'user-b'], {'redundant_oai_sets': ['user-b']}),
(['a'], ['user-b'],
{'redundant_oai_sets': ['user-b'], 'missing_oai_sets': ['user-a']}),
]
@pytest.mark.parametrize(('record_communities', 'record_oai', 'issue'),
oai_community_correspondence)
def test_oai_community_correspondence(record_audit, minimal_record, db,
record_communities, record_oai, issue):
minimal_record.update({'communities': record_communities})
minimal_record.update({'_oai': {'sets': record_oai}})
check = RecordCheck(record_audit, minimal_record)
check._oai_community_correspondence()
result_issue = check.issues.get('oai', {})
assert bool(result_issue) == bool(issue)
if result_issue and issue:
assert result_issue == issue
def test_jsonschema(app, record_audit, minimal_record):
check = RecordCheck(record_audit, ZenodoRecord(minimal_record))
check.jsonschema()
assert check.issues.get('jsonschema') is None
minimal_record['invalid_key'] = 'should not be here'
check = RecordCheck(record_audit, ZenodoRecord(minimal_record))
check.jsonschema()
assert check.issues.get('jsonschema')
| gpl-2.0 | -510,509,355,662,428,600 | 33.30179 | 79 | 0.604682 | false |
drpngx/tensorflow | tensorflow/python/client/device_lib.py | 42 | 1459 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices(session_config=None):
"""List the available devices available in the local process.
Args:
session_config: a session config proto or None to use the default config.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [
_convert(s)
for s in pywrap_tensorflow.list_devices(session_config=session_config)
]
| apache-2.0 | 6,409,628,989,750,781,000 | 33.738095 | 80 | 0.706648 | false |
openstack/python-zaqarclient | zaqarclient/auth/__init__.py | 1 | 1268 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zaqarclient.auth import base
from zaqarclient.auth import keystone
from zaqarclient.auth import signed_url
_BACKENDS = {
'noauth': base.NoAuth,
'keystone': keystone.KeystoneAuth,
'signed-url': signed_url.SignedURLAuth,
}
def get_backend(backend='keystone', options=None):
"""Loads backend `auth_backend`
:params backend: The backend name to load.
Default: `keystone`
:type backend: `six.string_types`
:param options: Options to pass to the Auth
backend. Refer to the backend for more info.
:type options: `dict`
"""
if options is None:
options = {}
backend = _BACKENDS[backend](options)
return backend
| apache-2.0 | -6,660,981,345,585,998,000 | 29.190476 | 69 | 0.710568 | false |
palerdot/calibre | src/calibre/library/database2.py | 4 | 164046 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import os, sys, shutil, cStringIO, glob, time, functools, traceback, re, \
json, uuid, hashlib, copy, types
from collections import defaultdict, namedtuple
import threading, random
from itertools import repeat
from calibre import prints, force_unicode
from calibre.ebooks.metadata import (title_sort, author_to_author_sort,
string_to_authors, get_title_sort_pat)
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.database import LibraryDatabase
from calibre.library.field_metadata import FieldMetadata, TagsIcons
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.sqlite import connect, IntegrityError
from calibre.library.prefs import DBPrefs
from calibre.ebooks.metadata.book.base import Metadata
from calibre.constants import preferred_encoding, iswindows, filesystem_encoding
from calibre.ptempfile import (PersistentTemporaryFile,
base_dir, SpooledTemporaryFile)
from calibre.customize.ui import (run_plugins_on_import,
run_plugins_on_postimport)
from calibre import isbytestring
from calibre.utils.filenames import (ascii_filename, samefile,
WindowsAtomicFolderMove, hardlink_file)
from calibre.utils.date import (utcnow, now as nowf, utcfromtimestamp,
parse_only_date, UNDEFINED_DATE, parse_date)
from calibre.utils.config import prefs, tweaks, from_json, to_json
from calibre.utils.icu import sort_key, strcmp, lower
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
from calibre.ebooks import check_ebook_format
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.recycle_bin import delete_file, delete_tree
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
from calibre.db.adding import find_books_in_directory, import_book_directory_multiple, import_book_directory, recursive_import
from calibre.db.errors import NoSuchFormat
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.db.categories import Tag, CATEGORY_SORTS
from calibre.utils.localization import (canonicalize_lang,
calibre_langcode_to_name)
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
SPOOL_SIZE = 30*1024*1024
ProxyMetadata = namedtuple('ProxyMetadata', 'book_size ondevice_col db_approx_formats')
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'''
An ebook metadata database that stores references to ebook files on disk.
'''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.conn.execute('pragma user_version=%d'%int(val))
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
return property(doc=doc, fget=fget, fset=fset)
def connect(self):
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%(259-4*self.PATH_LIMIT-10))
exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
self.conn = connect(self.dbpath, self.row_factory)
if exists and self.user_version == 0:
self.conn.close()
os.remove(self.dbpath)
self.conn = connect(self.dbpath, self.row_factory)
if self.user_version == 0:
self.initialize_database()
# remember to add any filter to the connect method in sqlite.py as well
# so that various code that connects directly will not complain about
# missing functions
self.books_list_filter = self.conn.create_dynamic_filter('books_list_filter')
# Store temporary tables in memory
self.conn.execute('pragma temp_store=2')
self.conn.commit()
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
self.get_data_as_dict = types.MethodType(get_data_as_dict, self, LibraryDatabase2)
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.format_filename_cache = defaultdict(dict)
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
if not os.path.exists(library_path):
os.makedirs(library_path)
self.listeners = set([])
self.library_path = os.path.abspath(library_path)
self.row_factory = row_factory
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
apply_default_prefs = not os.path.exists(self.dbpath)
self.connect()
self.is_case_sensitive = (not iswindows and
not os.path.exists(self.dbpath.replace('metadata.db',
'MeTAdAtA.dB')))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
progress_callback = lambda x, y: True
dbprefs = DBPrefs(self)
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_template_cache()
self.initialize_dynamic()
def initialize_template_cache(self):
self.formatter_template_cache = {}
def get_property(self, idx, index_is_id=False, loc=-1):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
return row[loc]
def initialize_dynamic(self):
self.field_metadata = FieldMetadata() # Ensure we start with a clean copy
self.prefs = DBPrefs(self)
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
if not self.is_second_db:
set_saved_searches(self, 'saved_searches')
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
if not self.is_second_db:
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
# Load the format filename cache
self.refresh_format_cache()
self.conn.executescript('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
''')
self.conn.execute(
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.executescript(u'''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_filtered_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id and books_list_filter(book)) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.commit()
CustomColumns.__init__(self)
template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
{col}
'''
columns = ['id', 'title',
# col table link_col query
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
'timestamp',
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'),
'series_index',
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path',
'pubdate',
'uuid',
'has_cover',
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
('languages', 'languages', 'lang_code',
'sortconcat(link.id, languages.lang_code)'),
]
lines = []
for col in columns:
line = col
if isinstance(col, tuple):
line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3])
lines.append(line)
custom_map = self.custom_columns_in_meta()
# custom col labels are numbers (the id in the custom_columns table)
custom_cols = list(sorted(custom_map.keys()))
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20, 'languages':21}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.values())
for col in custom_cols:
self.FIELD_MAP[col] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label'],
base,
prefer_custom=True)
if self.custom_column_num_map[col]['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(col)+'_index'] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label']+'_index',
base,
prefer_custom=True)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
script = '''
DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS
SELECT
{0}
FROM books;
'''.format(', \n'.join(lines))
self.conn.executescript(script)
self.conn.commit()
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = self.prefs.get('grouped_search_make_user_categories', [])
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label=u'@' + cat, name=cat)
except:
traceback.print_exc()
if len(saved_searches().names()):
self.field_metadata.add_search_category(label='search', name=_('Searches'))
self.field_metadata.add_grouped_search_terms(
self.prefs.get('grouped_search_terms', {}))
self.book_on_device_func = None
self.data = ResultCache(self.FIELD_MAP, self.field_metadata, db_prefs=self.prefs)
self.search = self.data.search
self.search_getting_ids = self.data.search_getting_ids
self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort
self.multisort = self.data.multisort
self.index = self.data.index
self.refresh_ids = functools.partial(self.data.refresh_ids, self)
self.row = self.data.row
self.has_id = self.data.has_id
self.count = self.data.count
self.set_marked_ids = self.data.set_marked_ids
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
d = self.conn.get('SELECT book FROM metadata_dirtied', all=True)
with self.dirtied_lock:
self.dirtied_sequence = 0
self.dirtied_cache = {}
for x in d:
self.dirtied_cache[x[0]] = self.dirtied_sequence
self.dirtied_sequence += 1
self.refresh_ondevice = functools.partial(self.data.refresh_ondevice, self)
self.refresh()
self.last_update_check = self.last_modified()
def break_cycles(self):
self.data.break_cycles()
self.data = self.field_metadata = self.prefs = self.listeners = \
self.refresh_ondevice = None
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
self.conn.executescript(metadata_sqlite)
self.conn.commit()
if self.user_version == 0:
self.user_version = 1
def saved_search_names(self):
return saved_searches().names()
def saved_search_rename(self, old_name, new_name):
saved_searches().rename(old_name, new_name)
def saved_search_lookup(self, name):
return saved_searches().lookup(name)
def saved_search_add(self, name, val):
saved_searches().add(name, val)
def saved_search_delete(self, name):
saved_searches().delete(name)
def saved_search_set_all(self, smap):
saved_searches().set_all(smap)
def last_modified(self):
''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def refresh_format_cache(self):
self.format_filename_cache = defaultdict(dict)
for book_id, fmt, name in self.conn.get(
'SELECT book,format,name FROM data'):
self.format_filename_cache[book_id][fmt.upper() if fmt else ''] = name
self.format_metadata_cache = defaultdict(dict)
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.refresh_format_cache()
self.last_update_check = utcnow()
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index]
return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path
def construct_path_name(self, id):
'''
Construct the directory name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
path = author + '/' + title + ' (%d)'%id
return path
def construct_file_name(self, id):
'''
Construct the file name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT].decode('ascii', 'replace')
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
def rmtree(self, path, permanent=False):
if not self.normpath(self.library_path).startswith(self.normpath(path)):
delete_tree(path, permanent=permanent)
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def set_path(self, index, index_is_id=False):
'''
Set the path to the directory containing this books files based on its
current title and author. If there was a previous directory, its contents
are copied and it is deleted.
'''
id = index if index_is_id else self.id(index)
path = self.construct_path_name(id)
current_path = self.path(id, index_is_id=True).replace(os.sep, '/')
formats = self.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
# Check if the metadata used to construct paths has changed
fname = self.construct_file_name(id)
changed = False
for format in formats:
name = self.format_filename_cache[id].get(format.upper(), None)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
self.copy_cover_to(id, os.path.join(tpath, 'cover.jpg'),
index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
for format in formats:
copy_function = functools.partial(self.copy_format_to, id,
format, index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
try:
self.add_format(id, format, None, index_is_id=True,
path=tpath, notify=False, copy_function=copy_function)
except NoSuchFormat:
continue
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.dirtied([id], commit=False)
self.conn.commit()
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories
if source_ok:
if not samefile(spath, tpath):
if wam is not None:
wam.delete_originals()
self.rmtree(spath, permanent=True)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=True)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment to some temp file name, then rename it
# back to the correct name. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
def cover(self, index, index_is_id=False, as_file=False, as_image=False,
as_path=False):
'''
Return the cover image as a bytestring (in JPEG format) or None.
WARNING: Using as_path will copy the cover to a temp file and return
the path to the temp file. You should delete the temp file when you are
done with it.
:param as_file: If True return the image as an open file object (a SpooledTemporaryFile)
:param as_image: If True return the image as a QImage object
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if as_path:
pt = PersistentTemporaryFile('_dbcover.jpg')
with pt:
shutil.copyfileobj(f, pt)
return pt.name
if as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
else:
ret = f.read()
if as_image:
from PyQt4.Qt import QImage
i = QImage()
i.loadFromData(ret)
ret = i
return ret
def cover_last_modified(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except:
# Cover doesn't exist
pass
return self.last_modified()
### The field-style interface. These use field keys.
def get_field(self, idx, key, default=None, index_is_id=False):
mi = self.get_metadata(idx, index_is_id=index_is_id,
get_cover=key == 'cover')
return mi.get(key, default)
def standard_field_keys(self):
return self.field_metadata.standard_field_keys()
def custom_field_keys(self, include_composites=True):
return self.field_metadata.custom_field_keys(include_composites)
def all_field_keys(self):
return self.field_metadata.all_field_keys()
def sortable_field_keys(self):
return self.field_metadata.sortable_field_keys()
def searchable_fields(self):
return self.field_metadata.searchable_fields()
def search_term_to_field_key(self, term):
return self.field_metadata.search_term_to_field_key(term)
def custom_field_metadata(self, include_composites=True):
return self.field_metadata.custom_field_metadata(include_composites)
def all_metadata(self):
return self.field_metadata.all_metadata()
def metadata_for_field(self, key):
return self.field_metadata[key]
def clear_dirtied(self, book_id, sequence):
'''
Clear the dirtied indicator for the books. This is used when fetching
metadata, creating an OPF, and writing a file are separated into steps.
The last step is clearing the indicator
'''
with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence
if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned'
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
self.conn.commit()
try:
del self.dirtied_cache[book_id]
except:
pass
elif dc_sequence is not None:
# print 'book needs to be done again'
pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
commit=True, callback=None):
'''
Write metadata for each record to an individual OPF file. If callback
is not None, it is called once at the start with the number of book_ids
being processed. And once for every book_id, with arguments (book_id,
mi, ok).
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
'SELECT book FROM metadata_dirtied', all=True)]
if callback is not None:
book_ids = tuple(book_ids)
callback(len(book_ids), True, False)
for book_id in book_ids:
if not self.data.has_id(book_id):
if callback is not None:
callback(book_id, None, False)
continue
path, mi, sequence = self.get_metadata_for_dump(book_id)
if path is None:
if callback is not None:
callback(book_id, mi, False)
continue
try:
raw = metadata_to_opf(mi)
with lopen(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.clear_dirtied(book_id, sequence)
except:
pass
if callback is not None:
callback(book_id, mi, True)
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
with self.dirtied_lock:
l = len(self.dirtied_cache)
if l > 0:
# The random stuff is here to prevent a single book from
# blocking progress if its metadata cannot be written for some
# reason.
id_ = self.dirtied_cache.keys()[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_]
return (id_, sequence)
return (None, None)
def dirty_queue_length(self):
return len(self.dirtied_cache)
def commit_dirty_cache(self):
'''
Set the dirty indication for every book in the cache. The vast majority
of the time, the indication will already be set. However, sometimes
exceptions may have prevented a commit, which may remove some dirty
indications from the DB. This call will put them back. Note that there
is no problem with setting a dirty indication for a book that isn't in
fact dirty. Just wastes a few cycles.
'''
with self.dirtied_lock:
book_ids = list(self.dirtied_cache.keys())
self.dirtied_cache = {}
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
path, mi = (None, None)
# get the current sequence number for this book to pass back to the
# backup thread. This will avoid double calls in the case where the
# thread has not done the work between the put and the get_metadata
with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence
try:
# While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder.
if self.path(idx, index_is_id=True):
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
# Always set cover to cover.jpg. Even if cover doesn't exist,
# no harm done. This way no need to call dirtied when
# cover is set/removed
mi.cover = 'cover.jpg'
except:
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
pass
return (path, mi, sequence)
def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False):
'''
Convenience method to return metadata as a :class:`Metadata` object.
Note that the list of formats is not verified.
'''
idx = idx if index_is_id else self.id(idx)
try:
row = self.data._data[idx]
except:
row = None
if row is None:
raise ValueError('No book with id: %d'%idx)
fm = self.FIELD_MAP
mi = Metadata(None, template_cache=self.formatter_template_cache)
aut_list = row[fm['au_map']]
if aut_list:
aut_list = [p.split(':::') for p in aut_list.split(':#:') if p]
else:
aut_list = []
aum = []
aus = {}
aul = {}
try:
for (author, author_sort, link) in aut_list:
aut = author.replace('|', ',')
aum.append(aut)
aus[aut] = author_sort.replace('|', ',')
aul[aut] = link
except ValueError:
# Author has either ::: or :#: in it
for x in row[fm['authors']].split(','):
aum.append(x.replace('|', ','))
aul[aum[-1]] = ''
aus[aum[-1]] = aum[-1]
mi.title = row[fm['title']]
mi.authors = aum
mi.author_sort = row[fm['author_sort']]
mi.author_sort_map = aus
mi.author_link_map = aul
mi.comments = row[fm['comments']]
mi.publisher = row[fm['publisher']]
mi.timestamp = row[fm['timestamp']]
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
mi.last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
mi.format_metadata = {}
if not formats:
good_formats = None
else:
formats = sorted(formats.split(','))
mi.format_metadata = FormatMetadata(self, idx, formats)
good_formats = FormatsList(formats, mi.format_metadata)
mi.formats = good_formats
mi.db_approx_formats = formats
mi._proxy_metadata = p = ProxyMetadata(row[fm['size']], row[fm['ondevice']], formats)
mi.book_size = p.book_size
mi.ondevice_col= p.ondevice_col
tags = row[fm['tags']]
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
languages = row[fm['languages']]
if languages:
mi.languages = [i.strip() for i in languages.split(',')]
mi.series = row[fm['series']]
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.set_identifiers(self.get_identifiers(idx, index_is_id=True))
mi.application_id = idx
mi.id = idx
mi.set_all_user_metadata(self.field_metadata.custom_field_metadata())
for key, meta in self.field_metadata.custom_iteritems():
if meta['datatype'] == 'composite':
mi.set(key, val=row[meta['rec_index']])
else:
val, extra = self.get_custom_and_extra(idx, label=meta['label'],
index_is_id=True)
mi.set(key, val=val, extra=extra)
user_cats = self.prefs['user_categories']
user_cat_vals = {}
if get_user_categories:
for ucat in user_cats:
res = []
for name,cat,ign in user_cats[ucat]:
v = mi.get(cat, None)
if isinstance(v, list):
if name in v:
res.append([name,cat])
elif name == v:
res.append([name,cat])
user_cat_vals[ucat] = res
mi.user_categories = user_cat_vals
if get_cover:
if cover_as_data:
cdata = self.cover(idx, index_is_id=True)
if cdata:
mi.cover_data = ('jpeg', cdata)
else:
mi.cover = self.cover(idx, index_is_id=True, as_path=True)
mi.has_cover = _('Yes') if self.has_cover(idx) else ''
return mi
def has_book(self, mi):
title = mi.title
if title:
if not isinstance(title, unicode):
title = title.decode(preferred_encoding, 'replace')
return bool(self.conn.get('SELECT id FROM books where title=?', (title,), all=False))
return False
def has_id(self, id_):
return self.data._data[id_] is not None
def books_with_same_title(self, mi, all_matches=True):
title = mi.title
ans = set()
if title:
title = lower(force_unicode(title))
for book_id in self.all_ids():
x = self.title(book_id, index_is_id=True)
if lower(x) == title:
ans.add(book_id)
if not all_matches:
break
return ans
def find_identical_books(self, mi):
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
isinstance(pat, basestring) else pat, repl) for pat, repl in
[
(r'[\[\](){}<>\'";,:#]', ''),
(get_title_sort_pat(), ''),
(r'[-._]', ' '),
(r'\s+', ' ')
]
]
def fuzzy_title(title):
title = title.strip().lower()
for pat, repl in fuzzy_title_patterns:
title = pat.sub(repl, title)
return title
identical_book_ids = set([])
if mi.authors:
try:
quathors = mi.authors[:10] # Too many authors causes parsing of
# the search expression to fail
query = u' and '.join([u'author:"=%s"'%(a.replace('"', '')) for a in
quathors])
qauthors = mi.authors[10:]
except ValueError:
return identical_book_ids
try:
book_ids = self.data.parse(query)
except:
traceback.print_exc()
return identical_book_ids
if qauthors and book_ids:
matches = set()
qauthors = {lower(x) for x in qauthors}
for book_id in book_ids:
aut = self.authors(book_id, index_is_id=True)
if aut:
aut = {lower(x.replace('|', ',')) for x in
aut.split(',')}
if aut.issuperset(qauthors):
matches.add(book_id)
book_ids = matches
for book_id in book_ids:
fbook_title = self.title(book_id, index_is_id=True)
fbook_title = fuzzy_title(fbook_title)
mbook_title = fuzzy_title(mi.title)
if fbook_title == mbook_title:
identical_book_ids.add(book_id)
return identical_book_ids
def remove_cover(self, id, notify=True, commit=True):
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
self.conn.execute('UPDATE books SET has_cover=0 WHERE id=?', (id,))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], False, row_is_id=True)
if notify:
self.notify('cover', [id])
def set_cover(self, id, data, notify=True, commit=True):
'''
Set the cover for this book.
`data`: Can be either a QImage, QPixmap, file object or bytestring
'''
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
if not os.path.exists(base_path):
self.set_path(id, index_is_id=True)
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
self.dirtied([id])
if not os.path.exists(base_path):
os.makedirs(base_path)
path = os.path.join(base_path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
data.save(path)
else:
if callable(getattr(data, 'read', None)):
data = data.read()
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?',
(now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
def has_cover(self, id):
return self.data.get(id, self.FIELD_MAP['cover'], row_is_id=True)
def set_has_cover(self, id, val):
dval = 1 if val else 0
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
return self.book_on_device_func(id)
return None
def book_on_device_string(self, id):
loc = []
count = 0
on = self.book_on_device(id)
if on is not None:
m, a, b, count = on[:4]
if m is not None:
loc.append(_('Main'))
if a is not None:
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func
def all_formats(self):
formats = self.conn.get('SELECT DISTINCT format from data')
if not formats:
return set([])
return set([f[0] for f in formats])
def format_files(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return [(v, k) for k, v in self.format_filename_cache[id].iteritems()]
def formats(self, index, index_is_id=False, verify_formats=True):
''' Return available formats as a comma separated list or None if there are no available formats '''
id_ = index if index_is_id else self.id(index)
formats = self.data.get(id_, self.FIELD_MAP['formats'], row_is_id=True)
if not formats:
return None
if not verify_formats:
return formats
formats = formats.split(',')
ans = []
for fmt in formats:
if self.format_abspath(id_, fmt, index_is_id=True) is not None:
ans.append(fmt)
if not ans:
return None
return ','.join(ans)
def has_format(self, index, format, index_is_id=False):
return self.format_abspath(index, format, index_is_id) is not None
def format_last_modified(self, id_, fmt):
m = self.format_metadata(id_, fmt)
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
if allow_cache:
x = self.format_metadata_cache[id_].get(fmt, None)
if x is not None:
return x
path = self.format_abspath(id_, fmt, index_is_id=True)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
path = self.format_abspath(id_, fmt, index_is_id=True)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(id_, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_path(self, index, fmt, index_is_id=False):
'''
This method is intended to be used only in those rare situations, like
Drag'n Drop, when you absolutely need the path to the original file.
Otherwise, use format(..., as_path=True).
Note that a networked backend will always return None.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no format: %s'%(id_, fmt))
return path
def format_abspath(self, index, format, index_is_id=False):
'''
Return absolute path to the ebook file of format `format`
WARNING: This method will return a dummy path for a network backend DB,
so do not rely on it, use format(..., as_path=True) instead.
Currently used only in calibredb list, the viewer and the catalogs (via
get_data_as_dict()).
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
id = index if index_is_id else self.id(index)
try:
name = self.format_filename_cache[id][format.upper()]
except:
return None
if name:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
format = ('.' + format.lower()) if format else ''
fmt_path = os.path.join(path, name+format)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+format))
except: # If path contains strange characters this throws an exc
candidates = []
if format and candidates and os.path.exists(candidates[0]):
try:
shutil.copyfile(candidates[0], fmt_path)
except:
# This can happen if candidates[0] or fmt_path is too long,
# which can happen if the user copied the library from a
# non windows machine to a windows machine.
return None
return fmt_path
def copy_format_to(self, index, fmt, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the format ``fmt`` to the file like object ``dest``. If the
specified format does not exist, raises :class:`NoSuchFormat` error.
dest can also be a path, in which case the format is copied to it, iff
the path is different from the current path (taking case sensitivity
into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no %s file'%(id_, fmt))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(path, dest):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
def copy_cover_to(self, index, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the cover to the file like object ``dest``. Returns False
if no cover exists or dest is the same file as the current cover.
dest can also be a path in which case the cover is
copied to it iff the path is different from the current path (taking
case sensitivity into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def format(self, index, format, index_is_id=False, as_file=False,
mode='r+b', as_path=False, preserve_filename=False):
'''
Return the ebook format as a bytestring or `None` if the format doesn't exist,
or we don't have permission to write to the ebook file.
:param as_file: If True the ebook format is returned as a file object. Note
that the file object is a SpooledTemporaryFile, so if what you want to
do is copy the format to another file, use :method:`copy_format_to`
instead for performance.
:param as_path: Copies the format file to a temp file and returns the
path to the temp file
:param preserve_filename: If True and returning a path the filename is
the same as that used in the library. Note that using
this means that repeated calls yield the same
temp file (which is re-created each time)
:param mode: This is ignored (present for legacy compatibility)
'''
path = self.format_abspath(index, format, index_is_id=index_is_id)
if path is not None:
with lopen(path, mode) as f:
if as_path:
if preserve_filename:
bd = base_dir()
d = os.path.join(bd, 'format_abspath')
try:
os.makedirs(d)
except:
pass
fname = os.path.basename(path)
ret = os.path.join(d, fname)
with lopen(ret, 'wb') as f2:
shutil.copyfileobj(f, f2)
else:
with PersistentTemporaryFile('.'+format.lower()) as pt:
shutil.copyfileobj(f, pt)
ret = pt.name
elif as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
# Various bits of code try to use the name as the default
# title when reading metadata, so set it
ret.name = f.name
else:
ret = f.read()
return ret
def add_format_with_hooks(self, index, format, fpath, index_is_id=False,
path=None, notify=True, replace=True):
npath = self.run_import_plugins(fpath, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
id = index if index_is_id else self.id(index)
retval = self.add_format(id, format, stream, replace=replace,
index_is_id=True, path=path, notify=notify)
run_plugins_on_postimport(self, id, format)
return retval
def add_format(self, index, format, stream, index_is_id=False, path=None,
notify=True, replace=True, copy_function=None):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if path is None:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
if name and not replace:
return False
name = self.construct_file_name(id)
ext = ('.' + format.lower()) if format else ''
dest = os.path.join(path, name+ext)
pdir = os.path.dirname(dest)
if not os.path.exists(pdir):
os.makedirs(pdir)
size = 0
if copy_function is not None:
copy_function(dest)
size = os.path.getsize(dest)
else:
if (not getattr(stream, 'name', False) or not samefile(dest,
stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
self.conn.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(id, format.upper(), size, name))
self.update_last_modified([id], commit=False)
self.conn.commit()
self.format_filename_cache[id][format.upper()] = name
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
return True
def save_original_format(self, book_id, fmt, notify=True):
fmt = fmt.upper()
if 'ORIGINAL' in fmt:
raise ValueError('Cannot save original of an original fmt')
opath = self.format_abspath(book_id, fmt, index_is_id=True)
if opath is None:
return False
nfmt = 'ORIGINAL_'+fmt
with lopen(opath, 'rb') as f:
return self.add_format(book_id, nfmt, f, index_is_id=True, notify=notify)
def original_fmt(self, book_id, fmt):
fmt = fmt
nfmt = ('ORIGINAL_%s'%fmt).upper()
opath = self.format_abspath(book_id, nfmt, index_is_id=True)
return fmt if opath is None else nfmt
def restore_original_format(self, book_id, original_fmt, notify=True):
opath = self.format_abspath(book_id, original_fmt, index_is_id=True)
if opath is not None:
fmt = original_fmt.partition('_')[2]
with lopen(opath, 'rb') as f:
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
return True
return False
def delete_book(self, id, notify=True, commit=True, permanent=False,
do_clean=True):
'''
Removes book from the result cache and the underlying database.
If you set commit to False, you must call clean() manually afterwards
'''
try:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
except:
path = None
if path and os.path.exists(path):
self.rmtree(path, permanent=permanent)
parent = os.path.dirname(path)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=permanent)
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
if commit:
self.conn.commit()
if do_clean:
self.clean()
self.data.books_deleted([id])
if notify:
self.notify('delete', [id])
def remove_format(self, index, format, index_is_id=False, notify=True,
commit=True, db_only=False):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if name:
if not db_only:
try:
path = self.format_abspath(id, format, index_is_id=True)
if path:
delete_file(path)
except:
traceback.print_exc()
self.format_filename_cache[id].pop(format.upper(), None)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, format.upper()))
if commit:
self.conn.commit()
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
def clean_standard_field(self, field, commit=False):
# Don't bother with validity checking. Let the exception fly out so
# we can see what happened
def doit(table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%table
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(table)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
table=table, ltable_col=ltable_col)
self.conn.execute(st)
fm = self.field_metadata[field]
doit(fm['table'], fm['link_column'])
if commit:
self.conn.commit()
def clean(self):
'''
Remove orphaned entries.
'''
def doit(ltable, table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%ltable
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(ltable)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
ltable=ltable, table=table, ltable_col=ltable_col)
self.conn.execute(st)
for ltable, table, ltable_col in [
('authors', 'authors', 'author'),
('publishers', 'publishers', 'publisher'),
('tags', 'tags', 'tag'),
('series', 'series', 'series'),
('languages', 'languages', 'lang_code'),
]:
doit(ltable, table, ltable_col)
for id_, tag in self.conn.get('SELECT id, name FROM tags', all=True):
if not tag.strip():
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?',
(id_,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id_,))
self.clean_custom()
self.conn.commit()
def get_books_for_category(self, category, id_):
ans = set([])
if category not in self.field_metadata:
return ans
field = self.field_metadata[category]
if field['datatype'] == 'composite':
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
elif book[dex] == id_:
ans.add(book[0])
return ans
ans = self.conn.get(
'SELECT book FROM books_{tn}_link WHERE {col}=?'.format(
tn=field['table'], col=field['link_column']), (id_,))
return set(x[0] for x in ans)
########## data structures for get_categories
CATEGORY_SORTS = CATEGORY_SORTS
MATCH_TYPE = ('any', 'all')
class TCat_Tag(object):
def __init__(self, name, sort):
self.n = name
self.s = sort
self.c = 0
self.id_set = set()
self.rt = 0
self.rc = 0
self.id = None
def set_all(self, c, rt, rc, id):
self.c = c
self.rt = rt
self.rc = rc
self.id = id
def __str__(self):
return unicode(self)
def __unicode__(self):
return 'n=%s s=%s c=%d rt=%d rc=%d id=%s'%\
(self.n, self.s, self.c, self.rt, self.rc, self.id)
def clean_user_categories(self):
user_cats = self.prefs.get('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if unicode(i) not in user_cats:
new_cats[unicode(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
self.prefs.set('user_categories', new_cats)
except:
pass
return new_cats
def get_categories(self, sort='name', ids=None, icon_map=None):
#start = last = time.clock()
if icon_map is not None and type(icon_map) != TagsIcons:
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
if sort not in self.CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
self.books_list_filter.change([] if not ids else ids)
id_filter = None if ids is None else frozenset(ids)
tb_cats = self.field_metadata
tcategories = {}
tids = {}
md = []
# First, build the maps. We need a category->items map and an
# item -> (item_id, sort_val) map to use in the books loop
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats'] or cat.get('is_csp',
False):
continue
# Get the ids for the item values
if not cat['is_custom']:
funcs = {
'authors': self.get_authors_with_ids,
'series': self.get_series_with_ids,
'publisher': self.get_publishers_with_ids,
'tags': self.get_tags_with_ids,
'languages': self.get_languages_with_ids,
'rating': self.get_ratings_with_ids,
}
func = funcs.get(category, None)
if func:
list = func()
else:
raise ValueError(category + ' has no get with ids function')
else:
list = self.get_custom_items_with_ids(label=cat['label'])
tids[category] = {}
if category == 'authors':
for l in list:
(id, val, sort_val) = (l[0], l[1], l[2])
tids[category][val] = (id, sort_val)
elif category == 'languages':
for l in list:
id, val = l[0], calibre_langcode_to_name(l[1])
tids[category][l[1]] = (id, val)
elif cat['datatype'] == 'series':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, title_sort(val))
elif cat['datatype'] == 'rating':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, '{0:05.2f}'.format(val))
elif cat['datatype'] == 'text' and cat['is_multiple'] and \
cat['display'].get('is_names', False):
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, author_to_author_sort(val))
else:
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, val)
# add an empty category to the category map
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats.iterkeys():
cat = tb_cats[category]
if cat['datatype'] == 'composite' and \
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
#print 'end phase "collection":', time.clock() - last, 'seconds'
#last = time.clock()
# Now scan every book looking for category items.
# Code below is duplicated because it shaves off 10% of the loop time
id_dex = self.FIELD_MAP['id']
rating_dex = self.FIELD_MAP['rating']
tag_class = LibraryDatabase2.TCat_Tag
for book in self.data.iterall():
if id_filter is not None and book[id_dex] not in id_filter:
continue
rating = book[rating_dex]
# We kept track of all possible category field_map positions above
for (cat, dex, mult, is_comp) in md:
if not book[dex]:
continue
tid_cat = tids[cat]
tcats_cat = tcategories[cat]
if not mult:
val = book[dex]
if is_comp:
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, val)
tcats_cat[val] = item
item.c += 1
item.id = val
if rating > 0:
item.rt += rating
item.rc += 1
continue
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
else:
vals = book[dex].split(mult)
if is_comp:
vals = [v.strip() for v in vals if v.strip()]
for val in vals:
if val not in tid_cat:
tid_cat[val] = (val, val)
for val in vals:
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
#print 'end phase "books":', time.clock() - last, 'seconds'
#last = time.clock()
# Now do news
tcategories['news'] = {}
cat = tb_cats['news']
tn = cat['table']
cn = cat['column']
if ids is None:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_{1}'''.format(cn, tn)
else:
query = '''SELECT id, {0}, count, avg_rating, sort
FROM tag_browser_filtered_{1}'''.format(cn, tn)
# results will be sorted later
data = self.conn.get(query)
for r in data:
item = LibraryDatabase2.TCat_Tag(r[1], r[1])
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item
#print 'end phase "news":', time.clock() - last, 'seconds'
#last = time.clock()
# Build the real category list by iterating over the temporary copy
# and building the Tag instances.
categories = {}
tag_class = Tag
for category in tb_cats.iterkeys():
if category not in tcategories:
continue
cat = tb_cats[category]
# prepare the place where we will put the array of Tags
categories[category] = []
# icon_map is not None if get_categories is to store an icon and
# possibly a tooltip in the tag structure.
icon = None
label = tb_cats.key_to_label(category)
if icon_map:
if not tb_cats.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
datatype = cat['datatype']
avgr = lambda x: 0.0 if x.rc == 0 else x.rt/x.rc
# Duplicate the build of items below to avoid using a lambda func
# in the main Tag loop. Saves a few %
if datatype == 'rating':
formatter = (lambda x:u'\u2605'*int(x/2))
avgr = lambda x: x.n
# eliminate the zero ratings line as well as count == 0
items = [v for v in tcategories[category].values() if v.c > 0 and v.n != 0]
elif category == 'authors':
# Clean up the authors strings to human-readable form
formatter = (lambda x: x.replace('|', ','))
items = [v for v in tcategories[category].values() if v.c > 0]
elif category == 'languages':
# Use a human readable language string
formatter = calibre_langcode_to_name
items = [v for v in tcategories[category].values() if v.c > 0]
else:
formatter = (lambda x:unicode(x))
items = [v for v in tcategories[category].values() if v.c > 0]
# sort the list
if sort == 'name':
kf = lambda x:sort_key(x.s)
reverse=False
elif sort == 'popularity':
kf = lambda x: x.c
reverse=True
else:
kf = avgr
reverse=True
items.sort(key=kf, reverse=reverse)
if tweaks['categories_use_field_for_author_name'] == 'author_sort' and\
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
cat['datatype'] == 'text')):
use_sort_as_name = True
else:
use_sort_as_name = False
is_editable = (category not in ['news', 'rating', 'languages'] and
datatype != "composite")
categories[category] = [tag_class(formatter(r.n), count=r.c, id=r.id,
avg=avgr(r), sort=r.s, icon=icon,
category=category,
id_set=r.id_set, is_editable=is_editable,
use_sort_as_name=use_sort_as_name)
for r in items]
#print 'end phase "tags list":', time.clock() - last, 'seconds'
#last = time.clock()
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
r.id_set = None
for x in categories['rating']:
if r.name == x.name and r.id != x.id:
r.count = r.count + x.count
categories['rating'].remove(x)
break
# We delayed computing the standard formats category because it does not
# use a view, but is computed dynamically
categories['formats'] = []
icon = None
if icon_map and 'formats' in icon_map:
icon = icon_map['formats']
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
fmt = fmt[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=? AND
books_list_filter(book)''', (fmt,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=?''', (fmt,),
all=False)
if count > 0:
categories['formats'].append(Tag(fmt, count=count, icon=icon,
category='formats', is_editable=False))
if sort == 'popularity':
categories['formats'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['formats'].sort(key=lambda x:x.name)
# Now do identifiers. This works like formats
categories['identifiers'] = []
icon = None
if icon_map and 'identifiers' in icon_map:
icon = icon_map['identifiers']
for ident in self.conn.get('SELECT DISTINCT type FROM identifiers'):
ident = ident[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(book)
FROM identifiers
WHERE type=? AND
books_list_filter(book)''', (ident,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM identifiers
WHERE type=?''', (ident,),
all=False)
if count > 0:
categories['identifiers'].append(Tag(ident, count=count, icon=icon,
category='identifiers',
is_editable=False))
if sort == 'popularity':
categories['identifiers'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['identifiers'].sort(key=lambda x:x.name)
#### Now do the user-defined categories. ####
user_categories = dict.copy(self.clean_user_categories())
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c in categories.keys():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), categories[c]))
muc = self.prefs.get('grouped_search_make_user_categories', [])
gst = self.prefs.get('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in categories.keys():
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.keys(), key=sort_key):
items = []
names_seen = {}
for (name,label,ign) in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
if sort == 'popularity':
categories[cat_name] = \
sorted(items, key=lambda x: x.count, reverse=True)
elif sort == 'name':
categories[cat_name] = \
sorted(items, key=lambda x: sort_key(x.sort))
else:
categories[cat_name] = \
sorted(items, key=lambda x:x.avg_rating, reverse=True)
#### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
for srch in saved_searches().names():
items.append(Tag(srch, tooltip=saved_searches().lookup(srch),
sort=srch, icon=icon, category='search',
is_editable=False))
if len(items):
if icon_map is not None:
icon_map['search'] = icon_map['search']
categories['search'] = items
#print 'last phase ran in:', time.clock() - last, 'seconds'
#print 'get_categories ran in:', time.clock() - start, 'seconds'
return categories
############# End get_categories
def tags_older_than(self, tag, delta, must_have_tag=None,
must_have_authors=None):
'''
Return the ids of all books having the tag ``tag`` that are older than
than the specified time. tag comparison is case insensitive.
:param delta: A timedelta object or None. If None, then all ids with
the tag are returned.
:param must_have_tag: If not None the list of matches will be
restricted to books that have this tag
:param must_have_authors: A list of authors. If not None the list of
matches will be restricted to books that have these authors (case
insensitive).
'''
tag = tag.lower().strip()
mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
iindex = self.FIELD_MAP['id']
aindex = self.FIELD_MAP['authors']
mah = must_have_authors
if mah is not None:
mah = [x.replace(',', '|').lower() for x in mah]
mah = ','.join(mah)
for r in self.data._data:
if r is not None:
if delta is None or (now - r[tindex]) > delta:
if mah:
authors = r[aindex] or ''
if authors.lower() != mah:
continue
tags = r[gindex]
if tags:
tags = [x.strip() for x in tags.lower().split(',')]
if tag in tags and (mht is None or mht in tags):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:
if isinstance(tweaks['series_index_auto_increment'], (int, float)):
return float(tweaks['series_index_auto_increment'])
return 1.0
series_indices = self.conn.get(
('SELECT series_index FROM books WHERE id IN '
'(SELECT book FROM books_series_link where series=?) '
'ORDER BY series_index'),
(series_id,))
return self._get_next_series_num_for_list(series_indices)
def _get_next_series_num_for_list(self, series_indices):
return _get_next_series_num_for_list(series_indices)
def set(self, row, column, val, allow_case_change=False):
'''
Convenience method for setting the title, authors, publisher, tags or
rating
'''
id = self.data[row][0]
col = self.FIELD_MAP[column]
books_to_refresh = {id}
set_args = (row, col, val)
if column == 'authors':
val = string_to_authors(val)
books_to_refresh |= self.set_authors(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'title':
self.set_title(id, val, notify=False)
elif column == 'publisher':
books_to_refresh |= self.set_publisher(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'rating':
self.set_rating(id, val, notify=False)
elif column == 'tags':
books_to_refresh |= \
self.set_tags(id, [x.strip() for x in val.split(',') if x.strip()],
append=False, notify=False, allow_case_change=allow_case_change)
self.data.set(*set_args)
self.data.refresh_ids(self, [id])
self.set_path(id, True)
self.notify('metadata', [id])
return books_to_refresh
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
set_authors=True, commit=True, force_changes=False,
notify=True):
'''
Set metadata for the book `id` from the `Metadata` object `mi`
Setting force_changes=True will force set_metadata to update fields even
if mi contains empty values. In this case, 'None' is distinguished from
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
The tags, identifiers, and cover attributes are special cases. Tags and
identifiers cannot be set to None so then will always be replaced if
force_changes is true. You must ensure that mi contains the values you
want the book to have. Covers are always changed if a new cover is
provided, but are never deleted. Also note that force_changes has no
effect on setting title or authors.
'''
if callable(getattr(mi, 'to_book_metadata', None)):
# Handle code passing in a OPF object instead of a Metadata object
mi = mi.to_book_metadata()
def doit(func, *args, **kwargs):
try:
func(*args, **kwargs)
except:
if ignore_errors:
traceback.print_exc()
else:
raise
def should_replace_field(attr):
return (force_changes and (mi.get(attr, None) is not None)) or \
not mi.is_null(attr)
path_changed = False
if set_title and mi.title:
self._set_title(id, mi.title)
path_changed = True
if set_authors:
if not mi.authors:
mi.authors = [_('Unknown')]
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self._set_authors(id, authors)
path_changed = True
if path_changed:
self.set_path(id, index_is_id=True)
if should_replace_field('title_sort'):
self.set_title_sort(id, mi.title_sort, notify=False, commit=False)
if should_replace_field('author_sort'):
doit(self.set_author_sort, id, mi.author_sort, notify=False,
commit=False)
if should_replace_field('publisher'):
doit(self.set_publisher, id, mi.publisher, notify=False,
commit=False)
# Setting rating to zero is acceptable.
if mi.rating is not None:
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
if should_replace_field('series'):
doit(self.set_series, id, mi.series, notify=False, commit=False)
# force_changes has no effect on cover manipulation
if mi.cover_data[1] is not None:
doit(self.set_cover, id, mi.cover_data[1], commit=False)
elif isinstance(mi.cover, basestring) and mi.cover:
if os.access(mi.cover, os.R_OK):
with lopen(mi.cover, 'rb') as f:
raw = f.read()
if raw:
doit(self.set_cover, id, raw, commit=False)
# if force_changes is true, tags are always replaced because the
# attribute cannot be set to None.
if should_replace_field('tags'):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if should_replace_field('comments'):
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if should_replace_field('languages'):
doit(self.set_languages, id, mi.languages, notify=False, commit=False)
# Setting series_index to zero is acceptable
if mi.series_index is not None:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
if should_replace_field('pubdate'):
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
if getattr(mi, 'timestamp', None) is not None:
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
# identifiers will always be replaced if force_changes is True
mi_idents = mi.get_identifiers()
if force_changes:
self.set_identifiers(id, mi_idents, notify=False, commit=False)
elif mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in mi_idents.iteritems():
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi.iterkeys():
if key in self.field_metadata and \
user_mi[key]['datatype'] == self.field_metadata[key]['datatype'] and \
(user_mi[key]['datatype'] != 'text' or
user_mi[key]['is_multiple'] == self.field_metadata[key]['is_multiple']):
val = mi.get(key, None)
if force_changes or val is not None:
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
label=user_mi[key]['label'], commit=False, notify=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def authors_sort_strings(self, id, index_is_id=False):
'''
Given a book, return the list of author sort strings
for the book's authors
'''
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT sort
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (sort,) in aut_strings:
result.append(sort)
return result
# Given a book, return the map of author sort strings for the book's authors
def authors_with_sort_strings(self, id, index_is_id=False):
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT authors.id, authors.name, authors.sort, authors.link
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (id_, author, sort, link) in aut_strings:
result.append((id_, author.replace('|', ','), sort, link))
return result
# Given a book, return the author_sort string for authors of the book
def author_sort_from_book(self, id, index_is_id=False):
auts = self.authors_sort_strings(id, index_is_id)
return ' & '.join(auts).replace('|', ',')
# Given an author, return a list of books with that author
def books_for_author(self, id_, index_is_id=False):
id_ = id_ if index_is_id else self.id(id_)
books = self.conn.get('''
SELECT bl.book
FROM books_authors_link as bl
WHERE bl.author=?''', (id_,))
return [b[0] for b in books]
# Given a list of authors, return the author_sort string for the authors,
# preferring the author sort associated with the author over the computed
# string
def author_sort_from_authors(self, authors):
result = []
for aut in authors:
r = self.conn.get('SELECT sort FROM authors WHERE name=?',
(aut.replace(',', '|'),), all=False)
if r is None:
result.append(author_to_author_sort(aut))
else:
result.append(r)
return ' & '.join(result).replace('|', ',')
def _update_author_in_cache(self, id_, ss, final_authors):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (ss, id_))
self.data.set(id_, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in final_authors]),
row_is_id=True)
self.data.set(id_, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
aum = self.authors_with_sort_strings(id_, index_is_id=True)
self.data.set(id_, self.FIELD_MAP['au_map'],
':#:'.join([':::'.join((au.replace(',', '|'), aus, aul))
for (_, au, aus, aul) in aum]),
row_is_id=True)
def _set_authors(self, id, authors, allow_case_change=False):
if not authors:
authors = [_('Unknown')]
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
books_to_refresh = {id}
final_authors = []
for a in authors:
case_change = False
if not a:
continue
a = a.strip().replace(',', '|')
if not isinstance(a, unicode):
a = a.decode(preferred_encoding, 'replace')
aus = self.conn.get('SELECT id, name, sort FROM authors WHERE name=?', (a,))
if aus:
aid, name, sort = aus[0]
# Handle change of case
if name != a:
if allow_case_change:
ns = author_to_author_sort(a.replace('|', ','))
if strcmp(sort, ns) == 0:
sort = ns
self.conn.execute('''UPDATE authors SET name=?, sort=?
WHERE id=?''', (a, sort, aid))
case_change = True
else:
a = name
else:
aid = self.conn.execute('''INSERT INTO authors(name)
VALUES (?)''', (a,)).lastrowid
final_authors.append(a.replace('|', ','))
try:
self.conn.execute('''INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (id, aid))
except IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
if case_change:
bks = self.conn.get('''SELECT book FROM books_authors_link
WHERE author=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
for bk in books_to_refresh:
ss = self.author_sort_from_book(id, index_is_id=True)
aus = self.author_sort(bk, index_is_id=True)
if strcmp(aus, ss) == 0:
self._update_author_in_cache(bk, ss, final_authors)
# This can repeat what was done above in rare cases. Let it.
ss = self.author_sort_from_book(id, index_is_id=True)
self._update_author_in_cache(id, ss, final_authors)
self.clean_standard_field('authors', commit=True)
return books_to_refresh
def windows_check_if_files_in_use(self, book_id):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
path = self.path(book_id, index_is_id=True)
if path:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def set_authors(self, id, authors, notify=True, commit=True,
allow_case_change=False):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
:param authors: A list of authors.
'''
self.windows_check_if_files_in_use(id)
books_to_refresh = self._set_authors(id, authors,
allow_case_change=allow_case_change)
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.set_path(id, index_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_title_sort(self, id, title_sort_, notify=True, commit=True):
if not title_sort_:
return False
if isbytestring(title_sort_):
title_sort_ = title_sort_.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET sort=? WHERE id=?', (title_sort_, id))
self.data.set(id, self.FIELD_MAP['sort'], title_sort_, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
return True
def _set_title(self, id, title):
if not title:
return False
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
old_title = self.title(id, index_is_id=True)
# We cannot check if old_title == title as previous code might have
# already updated the cache
only_case_change = icu_lower(old_title) == icu_lower(title)
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
if only_case_change:
# SQLite update trigger will not update sort on a case change
self.conn.execute('UPDATE books SET sort=? WHERE id=?',
(title_sort(title), id))
ts = self.conn.get('SELECT sort FROM books WHERE id=?', (id,),
all=False)
if ts:
self.data.set(id, self.FIELD_MAP['sort'], ts, row_is_id=True)
return True
def set_title(self, id, title, notify=True, commit=True):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
'''
self.windows_check_if_files_in_use(id)
if not self._set_title(id, title):
return
self.set_path(id, index_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_languages(self, book_id, languages, notify=True, commit=True):
self.conn.execute(
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
FROM books_languages_link WHERE
books_languages_link.lang_code=languages.id) < 1''')
books_to_refresh = set([book_id])
final_languages = []
for l in languages:
lc = canonicalize_lang(l)
if not lc or lc in final_languages or lc in ('und', 'zxx', 'mis',
'mul'):
continue
final_languages.append(lc)
lc_id = self.conn.get('SELECT id FROM languages WHERE lang_code=?',
(lc,), all=False)
if lc_id is None:
lc_id = self.conn.execute('''INSERT INTO languages(lang_code)
VALUES (?)''', (lc,)).lastrowid
self.conn.execute('''INSERT INTO books_languages_link(book, lang_code)
VALUES (?,?)''', (book_id, lc_id))
self.dirtied(books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(book_id, self.FIELD_MAP['languages'],
u','.join(final_languages), row_is_id=True)
if notify:
self.notify('metadata', [book_id])
return books_to_refresh
def set_timestamp(self, id, dt, notify=True, commit=True):
if dt:
if isinstance(dt, (unicode, bytes)):
dt = parse_date(dt, as_utc=True, assume_utc=False)
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_pubdate(self, id, dt, notify=True, commit=True):
if not dt:
dt = UNDEFINED_DATE
if isinstance(dt, basestring):
dt = parse_only_date(dt)
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_publisher(self, id, publisher, notify=True, commit=True,
allow_case_change=False):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
books_to_refresh = {id}
if publisher:
case_change = False
if not isinstance(publisher, unicode):
publisher = publisher.decode(preferred_encoding, 'replace')
pubx = self.conn.get('''SELECT id,name from publishers
WHERE name=?''', (publisher,))
if pubx:
aid, cur_name = pubx[0]
if publisher != cur_name:
if allow_case_change:
self.conn.execute('''UPDATE publishers SET name=?
WHERE id=?''', (publisher, aid))
case_change = True
else:
publisher = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('''INSERT INTO publishers(name)
VALUES (?)''', (publisher,)).lastrowid
self.conn.execute('''INSERT INTO books_publishers_link(book, publisher)
VALUES (?,?)''', (id, aid))
if case_change:
bks = self.conn.get('''SELECT book FROM books_publishers_link
WHERE publisher=?''', (aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
FROM books_publishers_link
WHERE publisher=publishers.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_uuid(self, id, uuid, notify=True, commit=True):
if uuid:
self.conn.execute('UPDATE books SET uuid=? WHERE id=?', (uuid, id))
self.data.set(id, self.FIELD_MAP['uuid'], uuid, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def get_id_from_uuid(self, uuid):
if uuid:
return (self.data._uuid_map.get(uuid, None) or
self.conn.get('SELECT id FROM books WHERE uuid=?', (uuid,),
all=False))
# Convenience methods for tags_list_editor
# Note: we generally do not need to refresh_ids because library_view will
# refresh everything.
def get_ratings_with_ids(self):
result = self.conn.get('SELECT id,rating FROM ratings')
if not result:
return []
return result
def dirty_books_referencing(self, field, id, commit=True):
# Get the list of books to dirty -- all books that reference the item
table = self.field_metadata[field]['table']
link = self.field_metadata[field]['link_column']
bks = self.conn.get(
'SELECT book from books_{0}_link WHERE {1}=?'.format(table, link),
(id,))
books = []
for (book_id,) in bks:
books.append(book_id)
self.dirtied(books, commit=commit)
def get_tags_with_ids(self):
result = self.conn.get('SELECT id,name FROM tags')
if not result:
return []
return result
def get_languages_with_ids(self):
result = self.conn.get('SELECT id,lang_code FROM languages')
if not result:
return []
return result
def rename_tag(self, old_id, new_name):
# It is possible that new_name is in fact a set of names. Split it on
# comma to find out. If it is, then rename the first one and append the
# rest
new_names = [t.strip() for t in new_name.strip().split(',') if t.strip()]
new_name = new_names[0]
new_names = new_names[1:]
# get the list of books that reference the tag being changed
books = self.conn.get('''SELECT book from books_tags_link
WHERE tag=?''', (old_id,))
books = [b[0] for b in books]
new_id = self.conn.get(
'''SELECT id from tags
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
# easy cases. Simply rename the tag. Do it even if equal, in case
# there is a change of case
self.conn.execute('''UPDATE tags SET name=?
WHERE id=?''', (new_name, old_id))
new_id = old_id
else:
# It is possible that by renaming a tag, the tag will appear
# twice on a book. This will throw an integrity error, aborting
# all the changes. To get around this, we first delete any links
# to the new_id from books referencing the old_id, so that
# renaming old_id to new_id will be unique on the book
for book_id in books:
self.conn.execute('''DELETE FROM books_tags_link
WHERE book=? and tag=?''', (book_id, new_id))
# Change the link table to point at the new tag
self.conn.execute('''UPDATE books_tags_link SET tag=?
WHERE tag=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM tags WHERE id=?', (old_id,))
if new_names:
# have some left-over names to process. Add them to the book.
for book_id in books:
self.set_tags(book_id, new_names, append=True, notify=False,
commit=False)
self.dirtied(books, commit=False)
self.clean_standard_field('tags', commit=False)
self.conn.commit()
def delete_tag_using_id(self, id):
self.dirty_books_referencing('tags', id, commit=False)
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
def get_series_with_ids(self):
result = self.conn.get('SELECT id,name FROM series')
if not result:
return []
return result
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
self.conn.execute('UPDATE series SET name=? WHERE id=?',
(new_name, old_id))
else:
# New series exists. Must update the link, then assign a
# new series index to each of the books.
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
if change_index and tweaks['series_index_auto_increment'] != 'no_change':
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.clean_standard_field('series', commit=False)
self.conn.commit()
def delete_series_using_id(self, id):
self.dirty_books_referencing('series', id, commit=False)
books = self.conn.get('SELECT book from books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
for (book_id,) in books:
self.conn.execute('UPDATE books SET series_index=1.0 WHERE id=?', (book_id,))
self.conn.commit()
def get_publishers_with_ids(self):
result = self.conn.get('SELECT id,name FROM publishers')
if not result:
return []
return result
def rename_publisher(self, old_id, new_name):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from publishers
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
# New name doesn't exist. Simply change the old name
self.conn.execute('UPDATE publishers SET name=? WHERE id=?',
(new_name, old_id))
else:
# Change the link table to point at the new one
self.conn.execute('''UPDATE books_publishers_link
SET publisher=?
WHERE publisher=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.dirty_books_referencing('publisher', new_id, commit=False)
self.clean_standard_field('publisher', commit=False)
self.conn.commit()
def delete_publisher_using_id(self, old_id):
self.dirty_books_referencing('publisher', old_id, commit=False)
self.conn.execute('''DELETE FROM books_publishers_link
WHERE publisher=?''', (old_id,))
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.conn.commit()
def get_authors_with_ids(self):
result = self.conn.get('SELECT id,name,sort,link FROM authors')
if not result:
return []
return result
def get_author_id(self, author):
author = author.replace(',', '|')
result = self.conn.get('SELECT id FROM authors WHERE name=?',
(author,), all=False)
return result
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
if not link:
link = ''
self.conn.execute('UPDATE authors SET link=? WHERE id=?', (link.strip(), aid))
if commit:
self.conn.commit()
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
(new_sort.strip(), old_id))
if commit:
self.conn.commit()
# Now change all the author_sort fields in books by this author
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
for (book_id,) in bks:
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss, notify=notify, commit=commit)
def rename_author(self, old_id, new_name):
# Make sure that any commas in new_name are changed to '|'!
new_name = new_name.replace(',', '|').strip()
if not new_name:
new_name = _('Unknown')
# Get the list of books we must fix up, one way or the other
# Save the list so we can use it twice
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
books = []
for (book_id,) in bks:
books.append(book_id)
# check if the new author already exists
new_id = self.conn.get('SELECT id from authors WHERE name=?',
(new_name,), all=False)
if new_id is None or old_id == new_id:
# No name clash. Go ahead and update the author's name
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
else:
# First check for the degenerate case -- changing a value to itself.
# Update it in case there is a change of case, but do nothing else
if old_id == new_id:
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
self.conn.commit()
return new_id
# Author exists. To fix this, we must replace all the authors
# instead of replacing the one. Reason: db integrity checks can stop
# the rename process, which would leave everything half-done. We
# can't do it the same way as tags (delete and add) because author
# order is important.
for book_id in books:
# Get the existing list of authors
authors = self.conn.get('''
SELECT author from books_authors_link
WHERE book=?
ORDER BY id''',(book_id,))
# unpack the double-list structure, replacing the old author
# with the new one while we are at it
for i,aut in enumerate(authors):
authors[i] = aut[0] if aut[0] != old_id else new_id
# Delete the existing authors list
self.conn.execute('''DELETE FROM books_authors_link
WHERE book=?''',(book_id,))
# Change the authors to the new list
for aid in authors:
try:
self.conn.execute('''
INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (book_id, aid))
except IntegrityError:
# Sometimes books specify the same author twice in their
# metadata. Ignore it.
pass
# Now delete the old author from the DB
self.conn.execute('DELETE FROM authors WHERE id=?', (old_id,))
self.dirtied(books, commit=False)
self.conn.commit()
# the authors are now changed, either by changing the author's name
# or replacing the author in the list. Now must fix up the books.
for book_id in books:
# First, must refresh the cache to see the new authors
self.data.refresh_ids(self, [book_id])
# now fix the filesystem paths
self.set_path(book_id, index_is_id=True)
# Next fix the author sort. Reset it to the default
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss)
# the caller will do a general refresh, so we don't need to
# do one here
return new_id
# end convenience methods
def get_tags(self, id):
result = self.conn.get(
'SELECT name FROM tags WHERE id IN (SELECT tag FROM books_tags_link WHERE book=?)',
(id,), all=True)
if not result:
return set([])
return set([r[0] for r in result])
@classmethod
def cleanup_tags(cls, tags):
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
tags = [x.decode(preferred_encoding, 'replace')
if isbytestring(x) else x for x in tags]
tags = [u' '.join(x.split()) for x in tags]
ans, seen = [], set([])
for tag in tags:
if tag.lower() not in seen:
seen.add(tag.lower())
ans.append(tag)
return ans
def remove_all_tags(self, ids, notify=False, commit=True):
self.conn.executemany(
'DELETE FROM books_tags_link WHERE book=?', [(x,) for x in ids])
self.dirtied(ids, commit=False)
if commit:
self.conn.commit()
for x in ids:
self.data.set(x, self.FIELD_MAP['tags'], '', row_is_id=True)
if notify:
self.notify('metadata', ids)
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
add = self.cleanup_tags(add)
remove = self.cleanup_tags(remove)
remove = set(remove) - set(add)
if not ids or (not add and not remove):
return
# Add tags that do not already exist into the tag table
all_tags = self.all_tags()
lt = [t.lower() for t in all_tags]
new_tags = [t for t in add if t.lower() not in lt]
if new_tags:
self.conn.executemany('INSERT INTO tags(name) VALUES (?)', [(x,) for x in
new_tags])
# Create the temporary tables to store the ids for books and tags
# to be operated on
tables = ('temp_bulk_tag_edit_books', 'temp_bulk_tag_edit_add',
'temp_bulk_tag_edit_remove')
drops = '\n'.join(['DROP TABLE IF EXISTS %s;'%t for t in tables])
creates = '\n'.join(['CREATE TEMP TABLE %s(id INTEGER PRIMARY KEY);'%t
for t in tables])
self.conn.executescript(drops + creates)
# Populate the books temp table
self.conn.executemany(
'INSERT INTO temp_bulk_tag_edit_books VALUES (?)',
[(x,) for x in ids])
# Populate the add/remove tags temp tables
for table, tags in enumerate([add, remove]):
if not tags:
continue
table = tables[table+1]
insert = ('INSERT INTO %s(id) SELECT tags.id FROM tags WHERE name=?'
' COLLATE PYNOCASE LIMIT 1')
self.conn.executemany(insert%table, [(x,) for x in tags])
if remove:
self.conn.execute(
'''DELETE FROM books_tags_link WHERE
book IN (SELECT id FROM %s) AND
tag IN (SELECT id FROM %s)'''
% (tables[0], tables[2]))
if add:
self.conn.execute(
'''
INSERT OR REPLACE INTO books_tags_link(book, tag) SELECT {0}.id, {1}.id FROM
{0}, {1}
'''.format(tables[0], tables[1])
)
self.conn.executescript(drops)
self.dirtied(ids, commit=False)
self.conn.commit()
for x in ids:
tags = u','.join(self.get_tags(x))
self.data.set(x, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', ids)
def commit(self):
self.conn.commit()
def set_tags(self, id, tags, append=False, notify=True, commit=True,
allow_case_change=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not tags:
tags = []
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
otags = self.get_tags(id)
tags = self.cleanup_tags(tags)
books_to_refresh = {id}
for tag in (set(tags)-otags):
case_changed = False
tag = tag.strip()
if not tag:
continue
if not isinstance(tag, unicode):
tag = tag.decode(preferred_encoding, 'replace')
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
etag = existing_tags[idx]
tid = self.conn.get('SELECT id FROM tags WHERE name=?', (etag,), all=False)
if allow_case_change and etag != tag:
self.conn.execute('UPDATE tags SET name=? WHERE id=?', (tag, tid))
case_changed = True
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('''SELECT book FROM books_tags_link
WHERE book=? AND tag=?''', (id, tid), all=False):
self.conn.execute('''INSERT INTO books_tags_link(book, tag)
VALUES (?,?)''', (id, tid))
if case_changed:
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
(tid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
FROM books_tags_link WHERE tag=tags.id) < 1''')
self.dirtied(set([id])|books_to_refresh, commit=False)
if commit:
self.conn.commit()
tags = u','.join(self.get_tags(id))
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def unapply_tags(self, book_id, tags, notify=True):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('''DELETE FROM books_tags_link
WHERE tag=? AND book=?''', (id, book_id))
self.conn.commit()
self.data.refresh_ids(self, [book_id])
if notify:
self.notify('metadata', [id])
def is_tag_used(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
lt.index(tag.lower())
return True
except ValueError:
return False
def delete_tag(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (existing_tags[idx],), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def _get_series_values(self, val):
return _get_series_values(val)
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
(series, idx) = self._get_series_values(series)
books_to_refresh = {id}
if series:
case_change = False
if not isinstance(series, unicode):
series = series.decode(preferred_encoding, 'replace')
series = series.strip()
series = u' '.join(series.split())
sx = self.conn.get('SELECT id,name from series WHERE name=?', (series,))
if sx:
aid, cur_name = sx[0]
if cur_name != series:
if allow_case_change:
self.conn.execute('UPDATE series SET name=? WHERE id=?', (series, aid))
case_change = True
else:
series = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
if idx:
self.set_series_index(id, idx, notify=notify, commit=commit)
if case_change:
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
(aid,))
books_to_refresh |= set([bk[0] for bk in bks])
self.conn.execute('''DELETE FROM series
WHERE (SELECT COUNT(id) FROM books_series_link
WHERE series=series.id) < 1''')
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_series_index(self, id, idx, notify=True, commit=True):
if idx is None:
idx = 1.0
try:
idx = float(idx)
except:
idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_rating(self, id, rating, notify=True, commit=True):
if not rating:
rating = 0
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat is not None else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_comment(self, id, text, notify=True, commit=True):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
if text:
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
else:
text = ''
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
self.dirtied([id], commit=False)
if notify:
self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True, commit=True):
if not sort:
sort = ''
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify:
self.notify('metadata', [id])
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def get_all_identifier_types(self):
idents = self.conn.get('SELECT DISTINCT type FROM identifiers')
return [ident[0] for ident in idents]
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?',
(id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)',
(id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
identifiers.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
if not identifiers:
identifiers = {}
for typ, val in identifiers.iteritems():
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in cleaned.iteritems()])
raw = ','.join(['%s:%s'%(k, v) for k, v in
cleaned.iteritems()])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
with lopen(path, 'rb') as stream:
matches = self.data.get_matches('title', '='+title)
if matches:
tag_matches = self.data.get_matches('tags', '='+_('Catalog'))
matches = matches.intersection(tag_matches)
db_id = None
if matches:
db_id = list(matches)[0]
if db_id is None:
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(title, 'calibre'))
db_id = obj.lastrowid
self.data.books_added([db_id], self)
self.set_path(db_id, index_is_id=True)
self.conn.commit()
try:
mi = get_metadata(stream, format)
except:
mi = Metadata(title, ['calibre'])
stream.seek(0)
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if format == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
self.set_metadata(db_id, mi)
self.add_format(db_id, format, stream, index_is_id=True)
self.conn.commit()
self.data.refresh_ids(self, [db_id]) # Needed to update format list and size
return db_id
def add_news(self, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
stream = path if hasattr(path, 'read') else lopen(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, format, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
if mi.series_index is None:
mi.series_index = self.get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(mi.title, mi.authors[0]))
id = obj.lastrowid
self.data.books_added([id], self)
self.set_path(id, index_is_id=True)
self.conn.commit()
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
self.set_metadata(id, mi)
self.add_format(id, format, stream, index_is_id=True)
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
return id
def run_import_plugins(self, path_or_stream, format):
format = format.lower()
if hasattr(path_or_stream, 'seek'):
path_or_stream.seek(0)
pt = PersistentTemporaryFile('_import_plugin.'+format)
shutil.copyfileobj(path_or_stream, pt, 1024**2)
pt.close()
path = pt.name
else:
path = path_or_stream
return run_plugins_on_import(path, format)
def _add_newbook_tag(self, mi):
tags = prefs['new_book_tags']
if tags:
for tag in [t.strip() for t in tags]:
if tag:
if mi.tags is None:
mi.tags = [tag]
else:
mi.tags.append(tag)
def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi):
return None
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isbytestring(aus):
aus = aus.decode(preferred_encoding, 'replace')
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
if force_id is None:
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
else:
id = force_id
obj = self.conn.execute(
'INSERT INTO books(id, title, series_index, '
'author_sort) VALUES (?, ?, ?, ?)',
(id, title, series_index, aus))
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if cover is not None:
try:
self.set_cover(id, cover)
except:
traceback.print_exc()
return id
def add_books(self, paths, formats, metadata, add_duplicates=True,
return_ids=False):
'''
Add a book to the database. The result cache is not updated.
:param:`paths` List of paths to book files or file-like objects
'''
formats, metadata = iter(formats), iter(metadata)
duplicates = []
ids = []
postimport = []
for path in paths:
mi = metadata.next()
self._add_newbook_tag(mi)
format = formats.next()
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi))
continue
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
if isinstance(title, str):
title = title.decode(preferred_encoding)
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
ids.append(id)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, commit=True, ignore_errors=True)
npath = self.run_import_plugins(path, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = lopen(npath, 'rb')
format = check_ebook_format(stream, format)
self.add_format(id, format, stream, index_is_id=True)
stream.close()
postimport.append((id, format))
self.conn.commit()
self.data.refresh_ids(self, ids) # Needed to update format list and size
for book_id, fmt in postimport:
run_plugins_on_postimport(self, book_id, fmt)
if duplicates:
paths = list(duplicate[0] for duplicate in duplicates)
formats = list(duplicate[1] for duplicate in duplicates)
metadata = list(duplicate[2] for duplicate in duplicates)
return (paths, formats, metadata), (ids if return_ids else
len(ids))
return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True,
apply_import_tags=True, preserve_uuid=False):
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
if apply_import_tags:
self._add_newbook_tag(mi)
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
if isinstance(aus, str):
aus = aus.decode(preferred_encoding, 'replace')
title = mi.title if isinstance(mi.title, unicode) else \
mi.title.decode(preferred_encoding, 'replace')
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if preserve_uuid and mi.uuid:
self.set_uuid(id, mi.uuid, commit=False)
for path in formats:
ext = os.path.splitext(path)[1][1:].lower()
if ext == 'opf':
continue
if import_hooks:
self.add_format_with_hooks(id, ext, path, index_is_id=True)
else:
with lopen(path, 'rb') as f:
self.add_format(id, ext, f, index_is_id=True)
# Mark the book dirty, It probably already has been done by
# set_metadata, but probably isn't good enough
self.dirtied([id], commit=False)
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
if notify:
self.notify('add', [id])
return id
def get_top_level_move_items(self):
items = set(os.listdir(self.library_path))
paths = set([])
for x in self.data.universal_set():
path = self.path(x, index_is_id=True)
path = path.split(os.sep)[0]
paths.add(path)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {}
for x in paths:
path_map[x] = x
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = set(path_map)
paths = set([x.lower() for x in paths])
items = items.intersection(paths)
return items, path_map
def move_library_to(self, newloc, progress=None):
if progress is None:
progress = lambda x:x
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set([])
items, path_map = self.get_top_level_move_items()
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
self.connect()
try:
os.unlink(opath)
except:
pass
for dir in old_dirs:
try:
shutil.rmtree(dir)
except:
pass
def __iter__(self):
for record in self.data._data:
if record is not None:
yield record
def all_ids(self):
x = self.FIELD_MAP['id']
for i in iter(self):
yield i[x]
def migrate_old(self, db, progress):
from PyQt4.QtCore import QCoreApplication
header = _(u'<p>Migrating old database to ebook library in %s<br><center>')%self.library_path
progress.setValue(0)
progress.setLabelText(header)
QCoreApplication.processEvents()
db.conn.row_factory = lambda cursor, row: tuple(row)
db.conn.text_factory = lambda x: unicode(x, 'utf-8', 'replace')
books = db.conn.get('SELECT id, title, sort, timestamp, series_index, author_sort, isbn FROM books ORDER BY id ASC')
progress.setAutoReset(False)
progress.setRange(0, len(books))
for book in books:
self.conn.execute('INSERT INTO books(id, title, sort, timestamp, series_index, author_sort, isbn) VALUES(?, ?, ?, ?, ?, ?, ?, ?);', book)
tables = '''
authors ratings tags series books_tags_link
comments publishers
books_authors_link conversion_options
books_publishers_link
books_ratings_link
books_series_link feeds
'''.split()
for table in tables:
rows = db.conn.get('SELECT * FROM %s ORDER BY id ASC'%table)
for row in rows:
self.conn.execute('INSERT INTO %s VALUES(%s)'%(table, ','.join(repeat('?', len(row)))), row)
self.conn.commit()
self.refresh('timestamp', True)
for i, book in enumerate(books):
progress.setLabelText(header+_(u'Copying <b>%s</b>')%book[1])
id = book[0]
self.set_path(id, True)
formats = db.formats(id, index_is_id=True)
if not formats:
formats = []
else:
formats = formats.split(',')
for format in formats:
data = db.format(id, format, index_is_id=True)
if data:
self.add_format(id, format, cStringIO.StringIO(data), index_is_id=True)
cover = db.cover(id, index_is_id=True)
if cover:
self.set_cover(id, cover)
progress.setValue(i+1)
self.conn.commit()
progress.setLabelText(_('Compacting database'))
self.vacuum()
progress.reset()
return len(books)
def find_books_in_directory(self, dirpath, single_book_per_directory):
return find_books_in_directory(dirpath, single_book_per_directory)
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
return import_book_directory_multiple(self, dirpath, callback=callback, added_ids=added_ids)
def import_book_directory(self, dirpath, callback=None, added_ids=None):
return import_book_directory(self, dirpath, callback=callback, added_ids=added_ids)
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids)
def add_custom_book_data(self, book_id, name, val):
x = self.conn.get('SELECT id FROM books WHERE ID=?', (book_id,), all=False)
if x is None:
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in vals.iteritems()])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
WHERE book=? AND name=?''', (book_id, name), all=False)
if s is None:
return default
return json.loads(s, object_hook=from_json)
except:
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))
def all_author_names(self):
ai = self.FIELD_MAP['authors']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x.replace('|', ','))
return ans
def all_tag_names(self):
ai = self.FIELD_MAP['tags']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x)
return ans
def all_publisher_names(self):
ai = self.FIELD_MAP['publisher']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
def all_series_names(self):
ai = self.FIELD_MAP['series']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
| gpl-3.0 | -4,160,373,880,737,587,000 | 41.966475 | 149 | 0.526724 | false |
robovm/robovm-studio | python/helpers/profiler/thriftpy3/transport/TSSLSocket.py | 44 | 8264 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import socket
import ssl
from thriftpy3.transport import TSocket
from thriftpy3.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None,
ciphers=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
self.ciphers = ciphers
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs,
ciphers=self.ciphers)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error as e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error as e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None,
ciphers=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
@param ciphers: The cipher suites to allow. This is passed to
the ssl_wrap function as the 'ciphers' parameter.
@type ciphers: str
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
self.ciphers = ciphers
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION,
ciphers=self.ciphers)
except ssl.SSLError as ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
| apache-2.0 | -6,509,770,429,480,545,000 | 35.405286 | 79 | 0.630203 | false |
Deepak345/al-go-rithms | cryptography/steganography/python/steganography.py | 3 | 4403 | import getopt
import math
import os
import struct
import sys
import wave
def hide(sound_path, file_path, output_path, num_lsb):
sound = wave.open(sound_path, "r")
params = sound.getparams()
num_channels = sound.getnchannels()
sample_width = sound.getsampwidth()
num_frames = sound.getnframes()
num_samples = num_frames * num_channels
max_bytes_to_hide = (num_samples * num_lsb) // 8
filesize = os.stat(file_path).st_size
if filesize > max_bytes_to_hide:
required_LSBs = math.ceil(filesize * 8 / num_samples)
raise ValueError("Input file too large to hide, "
"requires {} LSBs, using {}"
.format(required_LSBs, num_lsb))
print("Using {} B out of {} B".format(filesize, max_bytes_to_hide))
print(sample_width)
if sample_width == 1: # samples are unsigned 8-bit integers
fmt = "{}B".format(num_samples)
mask = (1 << 8) - (1 << num_lsb)
min_sample = -(1 << 8)
elif sample_width == 2: # samples are signed 16-bit integers
fmt = "{}h".format(num_samples)
mask = (1 << 15) - (1 << num_lsb)
min_sample = -(1 << 15)
else:
raise ValueError("File has an unsupported bit-depth")
raw_data = list(struct.unpack(fmt, sound.readframes(num_frames)))
sound.close()
input_data = memoryview(open(file_path, "rb").read())
data_index = 0
sound_index = 0
values = []
buffer = 0
buffer_length = 0
done = False
print(input_data[1])
while not done:
while buffer_length < num_lsb and data_index // 8 < len(input_data):
buffer += (input_data[data_index // 8] >> (data_index % 8)
) << buffer_length
bits_added = 8 - (data_index % 8)
buffer_length += bits_added
data_index += bits_added
current_data = buffer % (1 << num_lsb)
buffer >>= num_lsb
buffer_length -= num_lsb
while (sound_index < len(raw_data) and
raw_data[sound_index] == min_sample):
values.append(struct.pack(fmt[-1], raw_data[sound_index]))
sound_index += 1
if sound_index < len(raw_data):
current_sample = raw_data[sound_index]
sound_index += 1
sign = 1
if current_sample < 0:
current_sample = -current_sample
sign = -1
altered_sample = sign * ((current_sample & mask) | current_data)
values.append(struct.pack(fmt[-1], altered_sample))
if data_index // 8 >= len(input_data) and buffer_length <= 0:
done = True
while sound_index < len(raw_data):
values.append(struct.pack(fmt[-1], raw_data[sound_index]))
sound_index += 1
sound_steg = wave.open(output_path, "w")
sound_steg.setparams(params)
sound_steg.writeframes(b"".join(values))
sound_steg.close()
def recover(sound_path, output_path, num_lsb, bytes_to_recover):
sound = wave.open(sound_path, "r")
num_channels = sound.getnchannels()
sample_width = sound.getsampwidth()
num_frames = sound.getnframes()
num_samples = num_frames * num_channels
if (sample_width == 1): # samples 8 bits
fmt = "{}B".format(num_samples)
min_sample = -(1 << 8)
elif (sample_width == 2): # samples 16 bits
fmt = "{}h".format(num_samples)
min_sample = -(1 << 15)
else:
raise ValueError("File has an unsupported bit-depth")
raw_data = list(struct.unpack(fmt, sound.readframes(num_frames)))
mask = (1 << num_lsb) - 1
output_file = open(output_path, "wb+")
data = bytearray()
sound_index = 0
buffer = 0
buffer_length = 0
while (bytes_to_recover > 0):
next_sample = raw_data[sound_index]
if (next_sample != min_sample):
buffer += (abs(next_sample) & mask) << buffer_length
buffer_length += num_lsb
sound_index += 1
while (buffer_length >= 8 and bytes_to_recover > 0):
current_data = buffer % (1 << 8)
buffer >>= 8
buffer_length -= 8
data += struct.pack('1B', current_data)
bytes_to_recover -= 1
output_file.write(bytes(data))
output_file.close()
| mit | 6,823,887,267,313,183,000 | 30.676259 | 76 | 0.551442 | false |
tashaxe/Red-DiscordBot | lib/youtube_dl/extractor/brightcove.py | 2 | 30054 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
compat_HTTPError,
)
from ..utils import (
determine_ext,
ExtractorError,
extract_attributes,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
unescapeHTML,
unsmuggle_url,
update_url_query,
clean_html,
mimetype2ext,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
'timestamp': 1368213670,
'upload_date': '20130510',
'uploader_id': '1589608506001',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
'timestamp': 1344975024,
'upload_date': '20120814',
'uploader_id': '1460825906',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
'timestamp': 1382041798,
'upload_date': '20131017',
'uploader_id': '1130468786001',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
'skip': 'Video gone',
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '3750436379001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'RBTV Old (do not use)',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'timestamp': 1409122195,
'upload_date': '20140827',
'uploader_id': '710858724001',
},
'skip': 'Video gone',
},
{
# playlist with 'videoList'
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
{
# playlist with 'playlistTab' (https://github.com/rg3/youtube-dl/issues/9965)
'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg',
'info_dict': {
'id': '1522758701001',
'title': 'Lesson 08',
},
'playlist_mincount': 10,
},
]
FLV_VCODECS = {
1: 'SORENSON',
2: 'ON2',
3: 'H264',
4: 'VP8',
}
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
data_url = object_doc.attrib.get('data', '')
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return data_url_params.get(name)
params = {}
playerID = find_param('playerID') or find_param('playerId')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# These fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
if videoPlayer is not None:
if isinstance(videoPlayer, list):
videoPlayer = videoPlayer[0]
videoPlayer = videoPlayer.strip()
# UUID is also possible for videoPlayer (e.g.
# http://www.popcornflix.com/hoodies-vs-hooligans/7f2d2b87-bbf2-4623-acfb-ea942b4f01dd
# or http://www8.hp.com/cn/zh/home.html)
if not (re.match(
r'^(?:\d+|[\da-fA-F]{8}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{12})$',
videoPlayer) or videoPlayer.startswith('ref:')):
return None
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC\.createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
return update_url_query(cls._FEDERATED_URL, params)
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'''(?x)
<meta\s+
(?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+
content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2
''', webpage)
if url_m:
url = unescapeHTML(url_m.group('url'))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url or 'idVideo' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query, referer=None):
headers = {}
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
headers['Referer'] = referer
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' in json_data:
playlist_info = json_data['videoList']
playlist_dto = playlist_info['mediaCollectionDTO']
elif 'playlistTabs' in json_data:
playlist_info = json_data['playlistTabs']
playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0]
else:
raise ExtractorError('Empty playlist')
videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_dto['displayName'])
def _extract_video_info(self, video_info):
video_id = compat_str(video_info['id'])
publisher_id = video_info.get('publisherId')
info = {
'id': video_id,
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
'uploader_id': compat_str(publisher_id) if publisher_id else None,
'duration': float_or_none(video_info.get('length'), 1000),
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
}
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(
url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
tbr = int_or_none(rend.get('encodingRate'), 1000)
a_format = {
'format_id': 'http%s' % ('-%s' % tbr if tbr else ''),
'url': url,
'ext': ext,
'filesize': int_or_none(rend.get('size')) or None,
'tbr': tbr,
}
if rend.get('audioOnly'):
a_format.update({
'vcodec': 'none',
})
else:
a_format.update({
'height': int_or_none(rend.get('frameHeight')),
'width': int_or_none(rend.get('frameWidth')),
'vcodec': rend.get('videoCodec'),
})
# m3u8 manifests with remote == false are media playlists
# Not calling _extract_m3u8_formats here to save network traffic
if ext == 'm3u8':
a_format.update({
'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''),
'ext': 'mp4',
'protocol': 'm3u8_native',
})
formats.append(a_format)
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')),
'filesize': int_or_none(video_info.get('FLVFullSize')),
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % video_id)
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}, {
# non numeric ref: prefixed video id
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
'only_matching': True,
}, {
# unavailable video without message but with error_code
'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(ie, webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#tag
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 4. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/in-page-embed-player-implementation.html
# 5. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url if url.startswith('http') else 'http:' + url)
# Look for <video> tags [2] and embed_in_page embeds [3]
# [2] looks like:
for video, script_tag, account_id, player_id, embed in re.findall(
r'''(?isx)
(<video\s+[^>]+>)
(?:.*?
(<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js
)
)?
''', webpage):
attrs = extract_attributes(video)
# According to examples from [4] it's unclear whether video id
# may be optional and what to do when it is
video_id = attrs.get('data-video-id')
if not video_id:
continue
account_id = account_id or attrs.get('data-account')
if not account_id:
continue
player_id = player_id or attrs.get('data-player') or 'default'
embed = embed or attrs.get('data-embed') or 'default'
bc_url = 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' % (
account_id, player_id, embed, video_id)
# Some brightcove videos may be embedded with video tag only and
# without script tag or any mentioning of brightcove at all. Such
# embeds are considered ambiguous since they are matched based only
# on data-video-id and data-account attributes and in the wild may
# not be brightcove embeds at all. Let's check reconstructed
# brightcove URLs in case of such embeds and only process valid
# ones. By this we ensure there is indeed a brightcove embed.
if not script_tag and not ie._is_valid_url(
bc_url, video_id, 'possible brightcove video'):
continue
entries.append(bc_url)
return entries
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
try:
json_data = self._download_json(api_url, video_id, headers={
'Accept': 'application/json;pk=%s' % policy_key
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
message = json_data.get('message') or json_data['error_code']
if json_data.get('error_subcode') == 'CLIENT_GEO':
self.raise_geo_restricted(msg=message)
raise ExtractorError(message, expected=True)
raise
title = json_data['name'].strip()
formats = []
for source in json_data.get('sources', []):
container = source.get('container')
ext = mimetype2ext(source.get('type'))
src = source.get('src')
if ext == 'ism' or container == 'WVM':
continue
elif ext == 'm3u8' or container == 'M2TS':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
if not src:
continue
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
width = int_or_none(source.get('width'))
f = {
'tbr': tbr,
'filesize': int_or_none(source.get('size')),
'container': container,
'ext': ext or container.lower(),
}
if width == 0 and height == 0:
f.update({
'vcodec': 'none',
})
else:
f.update({
'width': width,
'height': height,
'vcodec': source.get('codec'),
})
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'source_preference': 0 if src else -1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
errors = json_data.get('errors')
if not formats and errors:
error = errors[0]
raise ExtractorError(
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
self._sort_formats(formats)
subtitles = {}
for text_track in json_data.get('text_tracks', []):
if text_track.get('src'):
subtitles.setdefault(text_track.get('srclang'), []).append({
'url': text_track['src'],
})
is_live = False
duration = float_or_none(json_data.get('duration'), 1000)
if duration and duration < 0:
is_live = True
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(json_data.get('description')),
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
'duration': duration,
'timestamp': parse_iso8601(json_data.get('published_at')),
'uploader_id': account_id,
'formats': formats,
'subtitles': subtitles,
'tags': json_data.get('tags', []),
'is_live': is_live,
}
| gpl-3.0 | 2,646,579,848,236,509,700 | 42.674419 | 484 | 0.52739 | false |
horazont/aioxmpp | aioxmpp/ibr/service.py | 1 | 5326 | ########################################################################
# File name: service.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import asyncio
import aioxmpp
import logging
from aioxmpp.service import Service
from . import xso
logger = logging.getLogger(__name__)
async def get_registration_fields(xmlstream, timeout=60):
"""
A query is sent to the server to obtain the fields that need to be
filled to register with the server.
:param xmlstream: Specifies the stream connected to the server where
the account will be created.
:type xmlstream: :class:`aioxmpp.protocol.XMLStream`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
:return: :attr:`list`
"""
iq = aioxmpp.IQ(
to=aioxmpp.JID.fromstr(xmlstream._to),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
iq.autoset_id()
reply = await aioxmpp.protocol.send_and_wait_for(
xmlstream,
[iq],
[aioxmpp.IQ],
timeout=timeout
)
return reply.payload
async def register(xmlstream, query_xso, timeout=60):
"""
Create a new account on the server.
:param query_xso: XSO with the information needed for the registration.
:type query_xso: :class:`~aioxmpp.ibr.Query`
:param xmlstream: Specifies the stream connected to the server where
the account will be created.
:type xmlstream: :class:`aioxmpp.protocol.XMLStream`
:param timeout: Maximum time in seconds to wait for an IQ response, or
:data:`None` to disable the timeout.
:type timeout: :class:`~numbers.Real` or :data:`None`
"""
iq = aioxmpp.IQ(
to=aioxmpp.JID.fromstr(xmlstream._to),
type_=aioxmpp.IQType.SET,
payload=query_xso
)
iq.autoset_id()
await aioxmpp.protocol.send_and_wait_for(
xmlstream,
[iq],
[aioxmpp.IQ],
timeout=timeout
)
def get_used_fields(payload):
"""
Get a list containing the names of the fields that are used in the
xso.Query.
:param payload: Query object o be
:type payload: :class:`~aioxmpp.ibr.Query`
:return: :attr:`list`
"""
return [
tag
for tag, descriptor in payload.CHILD_MAP.items()
if descriptor.__get__(payload, type(payload)) is not None
]
class RegistrationService(Service):
"""
Service implementing the XMPP In-Band Registration(:xep:`0077`)
use cases for registered entities.
This service allows an already registered and authenticated entity
to request information about the registration, cancel an existing
registration, or change a password.
.. automethod:: get_client_info
.. automethod:: change_pass
.. automethod:: cancel_registration
"""
async def get_client_info(self):
"""
A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
reply = await self.client.send(iq)
return reply
async def change_pass(self, new_pass):
"""
Change the client password for 'new_pass'.
:param new_pass: New password of the client.
:type new_pass: :class:`str`
:param old_pass: Old password of the client.
:type old_pass: :class:`str`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.SET,
payload=xso.Query(self.client.local_jid.localpart, new_pass)
)
await self.client.send(iq)
async def cancel_registration(self):
"""
Cancels the currents client's account with the server.
Even if the cancelation is succesful, this method will raise an
exception due to he account no longer exists for the server, so the
client will fail.
To continue with the execution, this method should be surrounded by a
try/except statement.
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.SET,
payload=xso.Query()
)
iq.payload.remove = True
await self.client.send(iq)
| lgpl-3.0 | -6,274,695,310,275,401,000 | 28.588889 | 79 | 0.619414 | false |
nakagami/reportlab | tests/test_pdfgen_links.py | 1 | 6833 | #Copyright ReportLab Europe Ltd. 2000-2012
#this test and associates functionality kinds donated by Ian Sparks.
#see license.txt for license details
"""
Tests for internal links and destinations
"""
__version__='''$Id: test_pdfgen_links.py 3959 2012-09-27 14:39:39Z robin $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 Ian Sparks
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print("wrote", fn)
printLocation()
| bsd-3-clause | 5,419,041,058,837,564,000 | 35.345745 | 121 | 0.629153 | false |
anarchivist/pyflag | src/plugins/PreCanned/Basic.py | 2 | 5709 | """ These are PreCanned Reports.
PreCanned Reports are the PyFlag equivalent of the google 'Im Feeling
Lucky' feature - we basically just dump out some simple queries which
are used to get you started.
"""
import pyflag.Reports as Reports
import pyflag.conf
config=pyflag.conf.ConfObject()
import pyflag.Registry as Registry
class ImFeelingLucky(Reports.report):
"""
'Im Feeling Lucky' is a report which does basic analysis to get
you started on the case. Select which kind of analysis you want to
do.
"""
name = "Im Feeling Lucky"
family = "Disk Forensics"
def get_names(self, cls):
if type(cls.name)==str:
names = [cls.name,]
else:
names = cls.name
return names
def display(self, query, result):
query.clear('filter')
def left_pane_cb(path):
## We expect a directory here:
if not path.endswith('/'): path=path+'/'
seen = []
result = []
for cls in Registry.PRECANNED.classes:
if not cls.name: continue
for name in self.get_names(cls):
if name.startswith(path):
branches = name[len(path):].split('/')
branch = branches[0]
if branch not in seen:
seen.append(branch)
if len(branches)>1:
result.append((branch, branch, "branch"))
else:
result.append((branch, branch, "leaf"))
return result
def right_pane_cb(path, result):
for cls in Registry.PRECANNED.classes:
for name in self.get_names(cls):
if name == path:
query.set("open_tree",path)
cls().display(query, result)
return
result.heading("Precanned Analysis")
result.para("Select the type of automated analysis required. You can use this to get you started, and then drive the analysis further.")
result.tree(tree_cb = left_pane_cb, pane_cb = right_pane_cb)
class Images(Reports.PreCannedCaseTableReports):
""" Display a preview of images """
args = {'filter':' "Thumbnail" has_magic image and "Size" > 20000 ',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all images bigger than 20kb "
name = "/Disk Forensics/Multimedia/Graphics"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class Videos(Reports.PreCannedCaseTableReports):
""" Display a preview of Videos """
args = {'filter':' "Thumbnail" has_magic video',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all Videos "
name = "/Disk Forensics/Multimedia/Videos"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class OfficeFiles(Reports.PreCannedCaseTableReports):
""" Display a preview of Office files """
args = {'filter':' "Thumbnail" has_magic office ',
'order': 1, 'direction':0}
family = "Disk Forensics"
description = "View all Office files "
name = "/Disk Forensics/Multimedia/Office"
default_table = "TypeCaseTable"
columns = ['Thumbnail', 'InodeTable.Size','FileTable.Filename']
class HTMLPages(Registry.PreCanned):
args = {'filter':' "Thumbnail" has_magic HTML ',
'order': 4, 'direction':1}
family = "Disk Forensics"
report = "Browse Types"
description = "View all HTML Pages "
name = "/Disk Forensics/Multimedia/HTML Pages"
class HTMLURLs(Reports.PreCannedCaseTableReports):
args = {'filter': '"Content Type" contains html and Status = 200 ',
'_hidden': [ 4, 5, 6] }
report='Browse HTTP Requests'
family='Network Forensics'
description = 'View all HTML URLs'
name = [ "/Network Forensics/Web Applications/HTML URLs" ]
default_table = 'HTTPCaseTable'
columns = ['Timestamp', 'Inode', 'Method', 'URL', 'Content Type', 'InodeTable.Size', 'Status']
class ImageURLs(Reports.PreCannedCaseTableReports):
description = "Show larger images transferred over HTTP"
name = [ "/Network Forensics/Communications/Web/Images"]
family = 'Network Forensics'
args = {'filter':'Thumbnail has_magic image and Size > 20000',
'order': 0, 'direction': 1 }
default_table = 'HTTPCaseTable'
columns = ['Timestamp','TypeCaseTable.Thumbnail','InodeTable.Size', 'URL']
class VideoURLs(Reports.PreCannedCaseTableReports):
description = "Show videos downloaded over HTTP"
name = [ "/Network Forensics/Communications/Web/Videos"]
family = 'Network Forensics'
args = {'filter':'Thumbnail has_magic video',
'order': 0, 'direction': 1 }
default_table = 'HTTPCaseTable'
columns = ['Timestamp','TypeCaseTable.Thumbnail','InodeTable.Size', 'URL']
class GoogleSearches(Reports.PreCannedCaseTableReports):
description = "Shows possible Google searches."
name = [ "/Network Forensics/Web Applications/Google Searches" ]
family = 'Network Forensics'
args = {'filter': 'Parameter = q and "Content Type" contains html', '_hidden': 5}
default_table = 'HTTPCaseTable'
columns = ['HTTPCaseTable.Timestamp',
'HTTPCaseTable.Inode',
'HTTPParameterCaseTable.Parameter',
'HTTPParameterCaseTable.Value',
'HTTPCaseTable.URL',
'HTTPCaseTable.Content Type']
| gpl-2.0 | -4,786,494,579,017,481,000 | 38.372414 | 148 | 0.606236 | false |
mozilla-services/data-pipeline | reports/stability-summary/utils.py | 4 | 1274 | import boto3
from gzip import GzipFile
from cStringIO import StringIO
import sys
import csv
class S3CompressedWriter(object):
def __init__(self, bucket, path, mimetype='text/plain'):
self.bucket = bucket
self.path = path
self.mimetype = mimetype
self._buffer = None
def __enter__(self):
self._buffer = StringIO();
self._writer = GzipFile(mode="wb", fileobj=self._buffer)
return self._writer
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
self._writer.close()
self._buffer.seek(0)
s3 = boto3.resource('s3')
s3.Object(self.bucket, self.path).put(Body=self._buffer, ContentEncoding='gzip', ContentType=self.mimetype)
self._buffer = None
def __del__(self):
assert self._buffer is None
def S3CompressedReader(bucket, path):
s3 = boto3.resource('s3')
r = s3.Object(bucket, path).get()
body = StringIO(r['Body'].read())
return GzipFile(mode="rb", fileobj=body)
def HeaderCSVReader(fd, *args, **kwargs):
"""
Read CSV data from `fd`, separating the header list from the data.
"""
reader = csv.reader(fd, *args, **kwargs)
header = reader.next()
return header, reader
| mpl-2.0 | 427,980,651,294,834,200 | 29.333333 | 119 | 0.617739 | false |
bringsvor/account-financial-tools | __unported__/async_move_line_importer/model/move_line_importer.py | 36 | 14197 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import traceback
import logging
import base64
import threading
import csv
import tempfile
import psycopg2
import openerp.pooler as pooler
from openerp.osv import orm, fields
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class move_line_importer(orm.Model):
"""Asynchrone move / move line importer.
It will parse the saved CSV file using orm.BaseModel.load
in a thread. If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
_name = "move.line.importer"
_inherit = ['mail.thread']
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default.update(state='draft', report=False)
return super(move_line_importer, self).copy(cr, uid, id,
default=default,
context=context)
def track_success(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'done'
def track_error(sef, cr, uid, obj, context=None):
"""Used by mail subtype"""
return obj['state'] == 'error'
_track = {
'state': {
'async_move_line_importer.mvl_imported': track_success,
'async_move_line_importer.mvl_error': track_error,
},
}
_columns = {
'name': fields.datetime(
'Name',
required=True,
readonly=True
),
'state': fields.selection(
[('draft', 'New'),
('running', 'Running'),
('done', 'Success'),
('error', 'Error')],
readonly=True,
string='Status'
),
'report': fields.text(
'Report',
readonly=True
),
'file': fields.binary(
'File',
required=True
),
'delimiter': fields.selection(
[(',', ','), (';', ';'), ('|', '|')],
string="CSV delimiter",
required=True
),
'company_id': fields.many2one(
'res.company',
'Company'
),
'bypass_orm': fields.boolean(
'Fast import (use with caution)',
help="When enabled import will be faster but"
" it will not use orm and may"
" not support all CSV canvas. \n"
"Entry posted option will be skipped. \n"
"AA lines will only be created when"
" moves are posted. \n"
"Tax lines computation will be skipped. \n"
"This option should be used with caution"
" and in conjonction with provided canvas."
),
}
def _get_current_company(self, cr, uid, context=None,
model="move.line.importer"):
return self.pool.get('res.company')._company_default_get(
cr, uid,
model,
context=context
)
_defaults = {'state': 'draft',
'name': fields.datetime.now(),
'company_id': _get_current_company,
'delimiter': ',',
'bypass_orm': False}
def _parse_csv(self, cr, uid, imp_id):
"""Parse stored CSV file in order to be usable by BaseModel.load method.
Manage base 64 decoding.
:param imp_id: current importer id
:returns: (head [list of first row], data [list of list])
"""
# We use tempfile in order to avoid memory error with large files
with tempfile.TemporaryFile() as src:
imp = self.read(cr, uid, imp_id, ['file', 'delimiter'])
content = imp['file']
delimiter = imp['delimiter']
src.write(content)
with tempfile.TemporaryFile() as decoded:
src.seek(0)
base64.decode(src, decoded)
decoded.seek(0)
return self._prepare_csv_data(decoded, delimiter)
def _prepare_csv_data(self, csv_file, delimiter=","):
"""Parse a decoded CSV file and return head list and data list
:param csv_file: decoded CSV file
:param delimiter: CSV file delimiter char
:returns: (head [list of first row], data [list of list])
"""
try:
data = csv.reader(csv_file, delimiter=str(delimiter))
except csv.Error as error:
raise orm.except_orm(
_('CSV file is malformed'),
_("Maybe you have not choose correct separator \n"
"the error detail is : \n %s") % repr(error)
)
head = data.next()
head = [x.replace(' ', '') for x in head]
# Generator does not work with orm.BaseModel.load
values = [tuple(x) for x in data if x]
return (head, values)
def format_messages(self, messages):
"""Format error messages generated by the BaseModel.load method
:param messages: return of BaseModel.load messages key
:returns: formatted string
"""
res = []
for msg in messages:
rows = msg.get('rows', {})
res.append(_("%s. -- Field: %s -- rows %s to %s") % (
msg.get('message', 'N/A'),
msg.get('field', 'N/A'),
rows.get('from', 'N/A'),
rows.get('to', 'N/A'))
)
return "\n \n".join(res)
def _manage_load_results(self, cr, uid, imp_id, result, _do_commit=True,
context=None):
"""Manage the BaseModel.load function output and store exception.
Will generate success/failure report and store it into report field.
Manage commit and rollback even if load method uses PostgreSQL
Savepoints.
:param imp_id: current importer id
:param result: BaseModel.load returns
{ids: list(int)|False, messages: [Message]}
:param _do_commit: toggle commit management only used
for testing purpose only
:returns: current importer id
"""
# Import sucessful
state = msg = None
if not result['messages']:
msg = _("%s lines imported" % len(result['ids'] or []))
state = 'done'
else:
if _do_commit:
cr.rollback()
msg = self.format_messages(result['messages'])
state = 'error'
return (imp_id, state, msg)
def _write_report(self, cr, uid, imp_id, state, msg, _do_commit=True,
max_tries=5, context=None):
"""Commit report in a separated transaction.
It will avoid concurrent update error due to mail.message.
If transaction trouble happen we try 5 times to rewrite report
:param imp_id: current importer id
:param state: import state
:param msg: report summary
:returns: current importer id
"""
if _do_commit:
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
try:
self.write(local_cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
local_cr.commit()
# We handle concurrent error troubles
except psycopg2.OperationalError as pg_exc:
_logger.error(
"Can not write report. "
"System will retry %s time(s)" % max_tries
)
if (pg_exc.pg_code in orm.PG_CONCURRENCY_ERRORS_TO_RETRY and
max_tries >= 0):
local_cr.rollback()
local_cr.close()
remaining_try = max_tries - 1
self._write_report(cr, uid, imp_id, cr,
_do_commit=_do_commit,
max_tries=remaining_try,
context=context)
else:
_logger.exception(
'Can not log report - Operational update error'
)
raise
except Exception:
_logger.exception('Can not log report')
local_cr.rollback()
raise
finally:
if not local_cr.closed:
local_cr.close()
else:
self.write(cr, uid, [imp_id],
{'state': state, 'report': msg},
context=context)
return imp_id
def _load_data(self, cr, uid, imp_id, head, data, _do_commit=True,
context=None):
"""Function that does the load of parsed CSV file.
If will log exception and susccess into the report fields.
:param imp_id: current importer id
:param head: CSV file head (list of header)
:param data: CSV file content (list of data list)
:param _do_commit: toggle commit management
only used for testing purpose only
:returns: current importer id
"""
state = msg = None
try:
res = self.pool['account.move'].load(cr, uid, head, data,
context=context)
r_id, state, msg = self._manage_load_results(cr, uid, imp_id, res,
_do_commit=_do_commit,
context=context)
except Exception as exc:
if _do_commit:
cr.rollback()
ex_type, sys_exc, tb = sys.exc_info()
tb_msg = ''.join(traceback.format_tb(tb, 30))
_logger.error(tb_msg)
_logger.error(repr(exc))
msg = _("Unexpected exception.\n %s \n %s" % (repr(exc), tb_msg))
state = 'error'
finally:
self._write_report(cr, uid, imp_id, state, msg,
_do_commit=_do_commit, context=context)
if _do_commit:
try:
cr.commit()
except psycopg2.Error:
_logger.exception('Can not do final commit')
cr.close()
return imp_id
def _allows_thread(self, imp_id):
"""Check if there is a async import of this file running
:param imp_id: current importer id
:returns: void
:raise: orm.except in case on failure
"""
for th in threading.enumerate():
if th.getName() == 'async_move_line_import_%s' % imp_id:
raise orm.except_orm(
_('An import of this file is already running'),
_('Please try latter')
)
def _check_permissions(self, cr, uid, context=None):
"""Ensure that user is allowed to create move / move line"""
move_obj = self.pool['account.move']
move_line_obj = self.pool['account.move.line']
move_obj.check_access_rule(cr, uid, [], 'create')
move_obj.check_access_rights(cr, uid, 'create', raise_exception=True)
move_line_obj.check_access_rule(cr, uid, [], 'create')
move_line_obj.check_access_rights(cr, uid, 'create',
raise_exception=True)
def import_file(self, cr, uid, imp_id, context=None):
""" Will do an asynchronous load of a CSV file.
Will generate an success/failure report and generate some
maile threads. It uses BaseModel.load to lookup CSV.
If you set bypass_orm to True then the load function
will use a totally overridden create function that is a lot faster
but that totally bypass the ORM
"""
if isinstance(imp_id, list):
imp_id = imp_id[0]
if context is None:
context = {}
current = self.read(cr, uid, imp_id, ['bypass_orm', 'company_id'],
load='_classic_write')
context['company_id'] = current['company_id']
bypass_orm = current['bypass_orm']
if bypass_orm:
# Tells create funtion to bypass orm
# As we bypass orm we ensure that
# user is allowed to creat move / move line
self._check_permissions(cr, uid, context=context)
context['async_bypass_create'] = True
head, data = self._parse_csv(cr, uid, imp_id)
self.write(cr, uid, [imp_id], {'state': 'running',
'report': _('Import is running')})
self._allows_thread(imp_id)
db_name = cr.dbname
local_cr = pooler.get_db(db_name).cursor()
thread = threading.Thread(target=self._load_data,
name='async_move_line_import_%s' % imp_id,
args=(local_cr, uid, imp_id, head, data),
kwargs={'context': context.copy()})
thread.start()
return {}
| agpl-3.0 | 5,626,602,854,330,952,000 | 36.360526 | 80 | 0.513348 | false |
BrighterCommand/Brightside | brightside/dispatch.py | 1 | 14030 | """
File : dispatch.py
Author : ian
Created : 04-21-2017
Last Modified By : ian
Last Modified On : 04-21-2017
***********************************************************************
The MIT License (MIT)
Copyright © 2017 Ian Cooper <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
***********************************************************************
"""
import logging
import time
from enum import Enum
from multiprocessing import Event, Process
from threading import Thread
from typing import Callable, Dict
from brightside.channels import Channel
from brightside.command_processor import CommandProcessor, Request
from brightside.connection import Connection
from brightside.exceptions import ConfigurationException, MessagingException
from brightside.message_factory import create_quit_message
from brightside.message_pump import MessagePump
from brightside.messaging import BrightsideConsumerConfiguration, BrightsideConsumer, BrightsideMessage
class Performer:
def __init__(self,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request],
logger: logging.Logger=None
) -> None:
"""
Each Performer abstracts a process running a message pump.
That process is forked from the parent, as we cannot guarantee a message pump is only I/O bound and thus will
not scale because of the GIL.
The Performer is how the supervisor (the dispatcher) tracks the workers it has created
The Performer needs:
:param channel_name: The name of the channel we want to create a sub-process for
:param connection: The connection to the broker
:param consumer_factory: We need a user supplied callback to provide us an instance of the concumer for
the broker we are using. Arame? Something else?
:param command_processor_factory: We need a user supplied callback to create a commandprocessor with
subscribers, policies, outgoing tasks queues etc.
:param mapper_func: We need a user supplied callback to map on the wire messages to requests
"""
# TODO: The paramater needs to be a connection, not an AramaConnection as we can't decide to create an Arame Consumer
# here. Where do we make that choice?
self._channel_name = channel_name
self._connection = connection
self._consumer_configuration = consumer_configuration
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
self._logger = logger or logging.getLogger(__name__)
def stop(self) -> None:
self._consumer_configuration.pipeline.put(create_quit_message())
def run(self, started_event: Event) -> Process:
p = Process(target=_sub_process_main, args=(
started_event,
self._channel_name,
self._connection,
self._consumer_configuration,
self._consumer_factory,
self._command_processor_factory,
self._mapper_func))
self._logger.debug("Starting worker process for channel: %s on exchange %s on server %s",
self._channel_name, self._connection.exchange, self._connection.amqp_uri)
p.start()
started_event.wait(timeout=1)
return p
def _sub_process_main(started_event: Event,
channel_name: str,
connection: Connection,
consumer_configuration: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
This is the main method for the sub=process, everything we need to create the message pump and
channel it needs to be passed in as parameters that can be pickled as when we run they will be serialized
into this process. The data should be value types, not reference types as we will receive a copy of the original.
Inter-process communication is signalled by the event - to indicate startup - and the pipeline to facilitate a
sentinel or stop message
:param started_event: Used by the sub-process to signal that it is ready
:param channel_name: The name we want to give the channel to the broker for identification
:param connection: The 'broker' connection
:param consumer_configuration: How to configure our consumer of messages from the channel
:param consumer_factory: Callback to create the consumer. User code as we don't know what consumer library they
want to use. Arame? Something else?
:param command_processor_factory: Callback to register subscribers, policies, and task queues then build command
processor. User code that provides us with their requests and handlers
:param mapper_func: We need to map between messages on the wire and our handlers
:return:
"""
logger = logging.getLogger(__name__)
consumer = consumer_factory(connection, consumer_configuration, logger)
channel = Channel(name=channel_name, consumer=consumer, pipeline=consumer_configuration.pipeline)
# TODO: Fix defaults that need passed in config values
command_processor = command_processor_factory(channel_name)
message_pump = MessagePump(command_processor=command_processor, channel=channel, mapper_func=mapper_func,
timeout=500, unacceptable_message_limit=None, requeue_count=None)
logger.debug("Starting the message pump for %s", channel_name)
message_pump.run(started_event)
class ConsumerConfiguration:
def __init__(self,
connection: Connection,
consumer: BrightsideConsumerConfiguration,
consumer_factory: Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer],
command_processor_factory: Callable[[str], CommandProcessor],
mapper_func: Callable[[BrightsideMessage], Request]) -> None:
"""
The configuration parameters for one consumer - can create one or more performers from this, each of which is
a message pump reading from a queue
:param connection: The connection to the broker
:param consumer: The consumer we want to create (routing key, queue etc)
:param consumer_factory: A factory to create a consumer to read from a broker, a given implementation i.e. arame
the command processor factory creates a command processor configured for a pipeline
:param mapper_func: Maps between messages on the queue and requests (commnands/events)
"""
self._connection = connection
self._consumer = consumer
self._consumer_factory = consumer_factory
self._command_processor_factory = command_processor_factory
self._mapper_func = mapper_func
@property
def connection(self) -> Connection:
return self._connection
@property
def brightside_configuration(self) -> BrightsideConsumerConfiguration:
return self._consumer
@property
def consumer_factory(self) -> Callable[[Connection, BrightsideConsumerConfiguration, logging.Logger], BrightsideConsumer]:
return self._consumer_factory
@property
def command_processor_factory(self):
return self._command_processor_factory
@property
def mapper_func(self) -> Callable[[BrightsideMessage], Request]:
return self._mapper_func
class DispatcherState(Enum):
ds_awaiting = 0,
ds_notready = 1,
ds_running = 2,
ds_stopped = 3,
ds_stopping = 4
class Dispatcher:
"""
The dispatcher orchestrates the creation of consumers, where a consumer is the sub-process that runs a message pump
to consumer messages from a given channel and dispatch to handlers. The dispatcher can start more than one performer
for a given channel.
The dispatcher also orchestrates the shutdown of consumers. It does this by posting a stop message into each running
consumers queue, thus allowing the current handler to run to completion but killing the consumer before it can
consume another work item from the queue.
As such the dispatcher tracks consumer instances.
In addition, as we must pass a factory method to the sub-process that creates the command processor for that channel
i.e. handler and policy registration, outgoing queues, the Dispatcher also acts a registry of those factory methods
for individual channels.
THe dispatcher uses a thread to 'stay running' until end is called. This means that receive is non-blocking. The
supervisor thread yields regularly to avoid spinning the CPU. This means there can be a delay between signalling to
end and the shutdown beginning.
Shutdown will finish work in progress, as it inserts a quit message in the queue that gets consumerd 'next'
"""
def __init__(self, consumers: Dict[str, ConsumerConfiguration]) -> None:
self._state = DispatcherState.ds_notready
self._consumers = consumers
self._performers = {k: Performer(
k,
v.connection,
v.brightside_configuration,
v.consumer_factory,
v.command_processor_factory,
v.mapper_func)
for k, v in self._consumers.items()}
self._running_performers = {}
self._supervisor = None
self._state = DispatcherState.ds_awaiting
@property
def state(self):
return self._state
def receive(self):
def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
for k, v in self._performers.items():
event = Event()
dispatcher._running_performers[k] = v.run(event)
event.wait(3) # TODO: Do we want to configure this polling interval?
initialized.set()
while self._state == DispatcherState.ds_running:
time.sleep(5) # yield to avoid spinning, between checking for changes to state
if self._state == DispatcherState.ds_awaiting:
initialized = Event()
self._supervisor = Thread(target=_receive, args=(self, initialized))
initialized.wait(5) # TODO: Should this be number of performs and configured with related?
self._state = DispatcherState.ds_running
self._supervisor.start()
def end(self):
if self._state == DispatcherState.ds_running:
for channel, process in list(self._running_performers.items()):
self._performers[channel].stop()
process.join(10) # TODO: We really want to make this configurable
self._state = DispatcherState.ds_stopping
self._supervisor.join(5)
self._running_performers.clear()
self._supervisor = None
self._state = DispatcherState.ds_stopped
# Do we want to determine if any processes have failed to complete Within the time frame
def open(self, consumer_name: str) -> None:
# TODO: Build then refactor with receive
# Find the consumer
if consumer_name not in self._consumers:
raise ConfigurationException("The consumer {} could not be found, did you register it?".format(consumer_name))
consumer = self._consumers[consumer_name]
performer = Performer(consumer_name,
consumer.connection,
consumer.brightside_configuration,
consumer.consumer_factory,
consumer.command_processor_factory,
consumer.mapper_func)
self._performers[consumer_name] = performer
# if we have a supervisor thread
if self._state == DispatcherState.ds_running:
# start and add to items monitored by supervisor (running performers)
pass
# else
elif self._state == DispatcherState.ds_stopped:
# start the supervisor with the single consumer
self._state = DispatcherState.ds_awaiting
self.receive()
else:
raise MessagingException("Dispatcher in a un-recognised state to open new connection; state was {}", self._state)
| mit | 5,254,343,536,259,278,000 | 45.736667 | 132 | 0.669353 | false |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/direct/directscripts/extract_docs.py | 8 | 10595 | """ This script generates a pandadoc.hpp file representing the Python
wrappers that can be parsed by doxygen to generate the Python documentation.
You need to run this before invoking Doxyfile.python.
It requires a valid makepanda installation with interrogatedb .in
files in the lib/pandac/input directory. """
__all__ = []
import os
import panda3d, pandac
from panda3d.dtoolconfig import *
LICENSE = """PANDA 3D SOFTWARE
Copyright (c) Carnegie Mellon University. All rights reserved.
All use of this software is subject to the terms of the revised BSD
license. You should have received a copy of this license along
with this source code in a file named \"LICENSE.\"""".split("\n")
def comment(code):
if not code:
return ""
comment = ''
empty_line = False
for line in code.splitlines(False):
line = line.strip('\t\n /')
if line:
if empty_line:
# New paragraph.
comment += '\n\n'
empty_line = False
elif comment:
comment += '\n'
comment += '/// ' + line
else:
empty_line = True
if comment:
return comment
else:
return ''
def block_comment(code):
if not code:
return ""
lines = code.split("\n")
newlines = []
indent = 0
reading_desc = False
for line in lines:
if line.startswith("////"):
continue
line = line.rstrip()
strline = line.lstrip('/ \t')
if ':' in strline:
pre, post = strline.split(':', 1)
pre = pre.rstrip()
if pre == "Description":
strline = post.lstrip()
elif pre in ("Class", "Access", "Function", "Created by", "Enum"):
continue
if strline or len(newlines) > 0:
newlines.append('/// ' + strline)
#if reading_desc:
# newlines.append('/// ' + line[min(indent, len(line) - len(strline)):])
#else:
# # A "Description:" text starts the description.
# if strline.startswith("Description"):
# strline = strline[11:].lstrip(': \t')
# indent = len(line) - len(strline)
# reading_desc = True
# newlines.append('/// ' + strline)
# else:
# print line
newcode = '\n'.join(newlines)
if len(newcode) > 0:
return newcode
else:
return ""
def translateFunctionName(name):
if name.startswith("__"):
return name
new = ""
for i in name.split("_"):
if new == "":
new += i
elif i == "":
pass
elif len(i) == 1:
new += i[0].upper()
else:
new += i[0].upper() + i[1:]
return new
def translateTypeName(name, mangle=True):
# Equivalent to C++ classNameFromCppName
class_name = ""
bad_chars = "!@#$%^&*()<>,.-=+~{}? "
next_cap = False
first_char = mangle
for chr in name:
if (chr == '_' or chr == ' ') and mangle:
next_cap = True
elif chr in bad_chars:
if not mangle:
class_name += '_'
elif next_cap or first_char:
class_name += chr.upper()
next_cap = False
first_char = False
else:
class_name += chr
return class_name
def translated_type_name(type, scoped=True):
while interrogate_type_is_wrapped(type):
if interrogate_type_is_const(type):
return 'const ' + translated_type_name(interrogate_type_wrapped_type(type))
else:
type = interrogate_type_wrapped_type(type)
typename = interrogate_type_name(type)
if typename in ("PyObject", "_object"):
return "object"
elif typename == "PN_stdfloat":
return "float"
if interrogate_type_is_atomic(type):
token = interrogate_type_atomic_token(type)
if token == 7:
return 'str'
else:
return typename
if not typename.endswith('_t'):
# Hack: don't mangle size_t etc.
typename = translateTypeName(typename)
if scoped and interrogate_type_is_nested(type):
return translated_type_name(interrogate_type_outer_class(type)) + '::' + typename
else:
return typename
def processElement(handle, element):
if interrogate_element_has_comment(element):
print >>handle, comment(interrogate_element_comment(element))
print >>handle, translated_type_name(interrogate_element_type(element)),
print >>handle, interrogate_element_name(element) + ';'
def processFunction(handle, function, isConstructor = False):
for i_wrapper in xrange(interrogate_function_number_of_python_wrappers(function)):
wrapper = interrogate_function_python_wrapper(function, i_wrapper)
if interrogate_wrapper_has_comment(wrapper):
print >>handle, block_comment(interrogate_wrapper_comment(wrapper))
if not isConstructor:
if interrogate_function_is_method(function):
if not interrogate_wrapper_number_of_parameters(wrapper) > 0 or not interrogate_wrapper_parameter_is_this(wrapper, 0):
print >>handle, "static",
if interrogate_wrapper_has_return_value(wrapper):
print >>handle, translated_type_name(interrogate_wrapper_return_type(wrapper)),
else:
pass#print >>handle, "void",
print >>handle, translateFunctionName(interrogate_function_name(function)) + "(",
else:
print >>handle, "__init__(",
first = True
for i_param in range(interrogate_wrapper_number_of_parameters(wrapper)):
if not interrogate_wrapper_parameter_is_this(wrapper, i_param):
if not first:
print >>handle, ",",
print >>handle, translated_type_name(interrogate_wrapper_parameter_type(wrapper, i_param)),
if interrogate_wrapper_parameter_has_name(wrapper, i_param):
print >>handle, interrogate_wrapper_parameter_name(wrapper, i_param),
first = False
print >>handle, ");"
def processType(handle, type):
typename = translated_type_name(type, scoped=False)
derivations = [ translated_type_name(interrogate_type_get_derivation(type, n)) for n in range(interrogate_type_number_of_derivations(type)) ]
if interrogate_type_has_comment(type):
print >>handle, block_comment(interrogate_type_comment(type))
if interrogate_type_is_enum(type):
print >>handle, "enum %s {" % typename
for i_value in range(interrogate_type_number_of_enum_values(type)):
docstring = comment(interrogate_type_enum_value_comment(type, i_value))
if docstring:
print >>handle, docstring
print >>handle, interrogate_type_enum_value_name(type, i_value), "=", interrogate_type_enum_value(type, i_value), ","
elif interrogate_type_is_typedef(type):
wrapped_type = translated_type_name(interrogate_type_wrapped_type(type))
print >>handle, "typedef %s %s;" % (wrapped_type, typename)
return
else:
if interrogate_type_is_struct(type):
classtype = "struct"
elif interrogate_type_is_class(type):
classtype = "class"
elif interrogate_type_is_union(type):
classtype = "union"
else:
print "I don't know what type %s is" % interrogate_type_true_name(type)
return
if len(derivations) > 0:
print >>handle, "%s %s : public %s {" % (classtype, typename, ", public ".join(derivations))
else:
print >>handle, "%s %s {" % (classtype, typename)
print >>handle, "public:"
for i_ntype in xrange(interrogate_type_number_of_nested_types(type)):
processType(handle, interrogate_type_get_nested_type(type, i_ntype))
for i_method in xrange(interrogate_type_number_of_constructors(type)):
processFunction(handle, interrogate_type_get_constructor(type, i_method), True)
for i_method in xrange(interrogate_type_number_of_methods(type)):
processFunction(handle, interrogate_type_get_method(type, i_method))
for i_method in xrange(interrogate_type_number_of_make_seqs(type)):
print >>handle, "list", translateFunctionName(interrogate_make_seq_seq_name(interrogate_type_get_make_seq(type, i_method))), "();"
for i_element in xrange(interrogate_type_number_of_elements(type)):
processElement(handle, interrogate_type_get_element(type, i_element))
print >>handle, "};"
def processModule(handle, package):
print >>handle, "namespace %s {" % package
if package != "core":
print >>handle, "using namespace core;"
for i_type in xrange(interrogate_number_of_global_types()):
type = interrogate_get_global_type(i_type)
if interrogate_type_has_module_name(type):
module_name = interrogate_type_module_name(type)
if "panda3d." + package == module_name:
processType(handle, type)
else:
print "Type %s has no module name" % typename
for i_func in xrange(interrogate_number_of_global_functions()):
func = interrogate_get_global_function(i_func)
if interrogate_function_has_module_name(func):
module_name = interrogate_function_module_name(func)
if "panda3d." + package == module_name:
processFunction(handle, func)
else:
print "Type %s has no module name" % typename
print >>handle, "}"
if __name__ == "__main__":
handle = open("pandadoc.hpp", "w")
print >>handle, comment("Panda3D modules that are implemented in C++.")
print >>handle, "namespace panda3d {"
# Determine the path to the interrogatedb files
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "..", "..", "etc"))
interrogate_add_search_directory(os.path.join(os.path.dirname(pandac.__file__), "input"))
import panda3d.core
processModule(handle, "core")
for lib in os.listdir(os.path.dirname(panda3d.__file__)):
if lib.endswith(('.pyd', '.so')) and not lib.startswith('core.'):
module_name = os.path.splitext(lib)[0]
__import__("panda3d." + module_name)
processModule(handle, module_name)
print >>handle, "}"
handle.close()
| apache-2.0 | -3,074,245,126,152,500,000 | 34.434783 | 145 | 0.5916 | false |
wkoathp/glance | glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py | 17 | 3573 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
from oslo_db import exception as db_exception
from sqlalchemy import and_, func, orm
from sqlalchemy import MetaData, Table
from sqlalchemy.exc import OperationalError, ProgrammingError
NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key'
ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key')
def upgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
try:
UniqueConstraint('image_id',
name=_get_original_keyname(migrate_engine.name),
table=image_members).drop()
except (OperationalError, ProgrammingError, db_exception.DBError):
UniqueConstraint('image_id',
name=_infer_original_keyname(image_members),
table=image_members).drop()
UniqueConstraint('image_id',
'member',
'deleted_at',
name=NEW_KEYNAME,
table=image_members).create()
def downgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
_sanitize(migrate_engine, image_members)
UniqueConstraint('image_id',
name=NEW_KEYNAME,
table=image_members).drop()
UniqueConstraint('image_id',
'member',
name=_get_original_keyname(migrate_engine.name),
table=image_members).create()
def _get_image_members_table(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
return Table('image_members', meta, autoload=True)
def _get_original_keyname(db):
return {'mysql': 'image_id',
'postgresql': 'image_members_image_id_member_key'}[db]
def _infer_original_keyname(table):
for i in table.indexes:
if ORIGINAL_KEYNAME_RE.match(i.name):
return i.name
def _sanitize(migrate_engine, table):
"""
Avoid possible integrity error by removing deleted rows
to accommodate less restrictive uniqueness constraint
"""
session = orm.sessionmaker(bind=migrate_engine)()
# find the image_member rows containing duplicate combinations
# of image_id and member
qry = (session.query(table.c.image_id, table.c.member)
.group_by(table.c.image_id, table.c.member)
.having(func.count() > 1))
for image_id, member in qry:
# only remove duplicate rows already marked deleted
d = table.delete().where(and_(table.c.deleted == True,
table.c.image_id == image_id,
table.c.member == member))
d.execute()
session.close()
| apache-2.0 | -5,340,895,149,612,231,000 | 36.21875 | 78 | 0.616289 | false |
bepitulaz/huntingdimana | env/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| gpl-3.0 | 1,460,198,814,240,796,200 | 30.3 | 79 | 0.610224 | false |
citrix-openstack/build-ryu | ryu/tests/integrated/test_add_flow_v12_actions.py | 5 | 15350 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.tests.integrated import tester
LOG = logging.getLogger(__name__)
class RunTest(tester.TestFlowBase):
""" Test case for add flows of Actions
"""
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTest, self).__init__(*args, **kwargs)
self._verify = []
def add_apply_actions(self, dp, actions, match=None):
inst = [dp.ofproto_parser.OFPInstructionActions(
dp.ofproto.OFPIT_APPLY_ACTIONS, actions)]
if match is None:
match = dp.ofproto_parser.OFPMatch()
m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
dp.ofproto.OFPFC_ADD,
0, 0, 0xff, 0xffffffff,
dp.ofproto.OFPP_ANY,
dp.ofproto.OFPG_ANY,
0, match, inst)
dp.send_msg(m)
def add_set_field_action(self, dp, field, value, match=None):
self._verify = [dp.ofproto.OFPAT_SET_FIELD,
'field', field, value]
f = dp.ofproto_parser.OFPMatchField.make(field, value)
actions = [dp.ofproto_parser.OFPActionSetField(f), ]
self.add_apply_actions(dp, actions, match=match)
def verify_default(self, dp, stats):
verify = self._verify
self._verify = []
type_ = name = field = value = None
if len(verify) == 1:
(type_, ) = verify
elif len(verify) == 3:
(type_, name, value) = verify
elif len(verify) == 4:
(type_, name, field, value) = verify
else:
return "self._verify is invalid."
try:
action = stats[0].instructions[0].actions[0]
if action.cls_action_type != type_:
return "Action type error. send:%s, val:%s" \
% (type_, action.cls_action_type)
except IndexError:
return "Action is not setting."
s_val = None
if name:
try:
s_val = getattr(action, name)
except AttributeError:
pass
if name == 'field':
if s_val.header != field:
return "Field error. send:%s val:%s" \
% (field, s_val.field)
s_val = s_val.value
if name and s_val != value:
return "Value error. send:%s=%s val:%s" \
% (name, value, s_val)
return True
def verify_action_drop(self, dp, stats):
for s in stats:
for i in s.instructions:
if len(i.actions):
return "has actions. %s" % (i.actions)
return True
# Test of General Actions
def test_action_output(self, dp):
out_port = 255
self._verify = [dp.ofproto.OFPAT_OUTPUT,
'port', out_port]
actions = [dp.ofproto_parser.OFPActionOutput(out_port, 0), ]
self.add_apply_actions(dp, actions)
def test_action_drop(self, dp):
self.add_apply_actions(dp, [])
# Test of Push-Tag/Pop-Tag Actions
def test_action_push_vlan(self, dp):
ethertype = ether.ETH_TYPE_8021Q
self._verify = [dp.ofproto.OFPAT_PUSH_VLAN,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPushVlan(ethertype)]
self.add_apply_actions(dp, actions)
def test_action_pop_vlan(self, dp):
self._verify = [dp.ofproto.OFPAT_POP_VLAN, ]
actions = [dp.ofproto_parser.OFPActionPopVlan(), ]
self.add_apply_actions(dp, actions)
def test_action_push_mpls(self, dp):
ethertype = ether.ETH_TYPE_MPLS
self._verify = [dp.ofproto.OFPAT_PUSH_MPLS,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPushMpls(ethertype), ]
self.add_apply_actions(dp, actions)
def test_action_pop_mpls(self, dp):
ethertype = ether.ETH_TYPE_8021Q
self._verify = [dp.ofproto.OFPAT_POP_MPLS,
'ethertype', ethertype]
actions = [dp.ofproto_parser.OFPActionPopMpls(ethertype), ]
self.add_apply_actions(dp, actions)
# Test of Set-Filed Actions
def test_action_set_field_dl_dst(self, dp):
field = dp.ofproto.OXM_OF_ETH_DST
dl_dst = 'e2:7a:09:79:0b:0f'
value = self.haddr_to_bin(dl_dst)
self.add_set_field_action(dp, field, value)
def test_action_set_field_dl_src(self, dp):
field = dp.ofproto.OXM_OF_ETH_SRC
dl_src = '08:82:63:b6:62:05'
value = self.haddr_to_bin(dl_src)
self.add_set_field_action(dp, field, value)
def test_action_set_field_dl_type(self, dp):
field = dp.ofproto.OXM_OF_ETH_TYPE
value = ether.ETH_TYPE_IPV6
self.add_set_field_action(dp, field, value)
def test_action_set_field_vlan_vid(self, dp):
field = dp.ofproto.OXM_OF_VLAN_VID
value = 0x1e4
self.add_set_field_action(dp, field, value)
def test_action_set_field_vlan_pcp(self, dp):
field = dp.ofproto.OXM_OF_VLAN_PCP
value = 3
match = dp.ofproto_parser.OFPMatch()
match.set_vlan_vid(1)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_nw_dscp(self, dp):
field = dp.ofproto.OXM_OF_IP_DSCP
value = 32
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_nw_ecn(self, dp):
field = dp.ofproto.OXM_OF_IP_ECN
value = 1
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ip_proto(self, dp):
field = dp.ofproto.OXM_OF_IP_PROTO
value = inet.IPPROTO_TCP
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv4_src(self, dp):
field = dp.ofproto.OXM_OF_IPV4_SRC
ipv4_src = '192.168.3.92'
value = self.ipv4_to_int(ipv4_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv4_dst(self, dp):
field = dp.ofproto.OXM_OF_IPV4_DST
ipv4_dst = '192.168.74.122'
value = self.ipv4_to_int(ipv4_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_tcp_src(self, dp):
field = dp.ofproto.OXM_OF_TCP_SRC
value = 105
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_TCP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_tcp_dst(self, dp):
field = dp.ofproto.OXM_OF_TCP_DST
value = 75
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_TCP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_udp_src(self, dp):
field = dp.ofproto.OXM_OF_UDP_SRC
value = 197
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_UDP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_udp_dst(self, dp):
field = dp.ofproto.OXM_OF_UDP_DST
value = 17
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_IP)
match.set_ip_proto(inet.IPPROTO_UDP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_icmpv4_type(self, dp):
field = dp.ofproto.OXM_OF_ICMPV4_TYPE
value = 8
match = dp.ofproto_parser.OFPMatch()
match.set_ip_proto(inet.IPPROTO_ICMP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_icmpv4_code(self, dp):
field = dp.ofproto.OXM_OF_ICMPV4_CODE
value = 2
match = dp.ofproto_parser.OFPMatch()
match.set_ip_proto(inet.IPPROTO_ICMP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_op(self, dp):
field = dp.ofproto.OXM_OF_ARP_OP
value = 2
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_spa(self, dp):
field = dp.ofproto.OXM_OF_ARP_SPA
nw_src = '192.168.132.179'
value = self.ipv4_to_int(nw_src)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_tpa(self, dp):
field = dp.ofproto.OXM_OF_ARP_TPA
nw_dst = '192.168.118.85'
value = self.ipv4_to_int(nw_dst)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_sha(self, dp):
field = dp.ofproto.OXM_OF_ARP_SHA
arp_sha = '50:29:e7:7f:6c:7f'
value = self.haddr_to_bin(arp_sha)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_arp_tha(self, dp):
field = dp.ofproto.OXM_OF_ARP_THA
arp_tha = '71:c8:72:2f:47:fd'
value = self.haddr_to_bin(arp_tha)
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_ARP)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_ipv6_src(self, dp):
field = dp.ofproto.OXM_OF_IPV6_SRC
ipv6_src = '7527:c798:c772:4a18:117a:14ff:c1b6:e4ef'
value = self.ipv6_to_int(ipv6_src)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_dst(self, dp):
field = dp.ofproto.OXM_OF_IPV6_DST
ipv6_dst = '8893:65b3:6b49:3bdb:3d2:9401:866c:c96'
value = self.ipv6_to_int(ipv6_dst)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_flabel(self, dp):
field = dp.ofproto.OXM_OF_IPV6_FLABEL
value = 0x2c12
self.add_set_field_action(dp, field, value)
def test_action_set_field_icmpv6_type(self, dp):
field = dp.ofproto.OXM_OF_ICMPV6_TYPE
value = 129
self.add_set_field_action(dp, field, value)
def test_action_set_field_icmpv6_code(self, dp):
field = dp.ofproto.OXM_OF_ICMPV6_CODE
value = 2
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_target(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_TARGET
target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
value = self.ipv6_to_int(target)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_sll(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_SLL
sll = "54:db:3f:3e:27:19"
value = self.haddr_to_bin(sll)
self.add_set_field_action(dp, field, value)
def test_action_set_field_ipv6_nd_tll(self, dp):
field = dp.ofproto.OXM_OF_IPV6_ND_TLL
tll = "83:13:48:1e:d0:b0"
value = self.haddr_to_bin(tll)
self.add_set_field_action(dp, field, value)
def test_action_set_field_mpls_label(self, dp):
field = dp.ofproto.OXM_OF_MPLS_LABEL
value = 0x4c
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_MPLS)
self.add_set_field_action(dp, field, value, match)
def test_action_set_field_mpls_tc(self, dp):
field = dp.ofproto.OXM_OF_MPLS_TC
value = 0b101
match = dp.ofproto_parser.OFPMatch()
match.set_dl_type(ether.ETH_TYPE_MPLS)
self.add_set_field_action(dp, field, value, match)
# Test of Change-TTL Actions
def test_action_set_mpls_ttl(self, dp):
mpls_ttl = 8
self._verify = [dp.ofproto.OFPAT_SET_MPLS_TTL,
'mpls_ttl', mpls_ttl]
actions = [dp.ofproto_parser.OFPActionSetMplsTtl(mpls_ttl), ]
self.add_apply_actions(dp, actions)
def test_action_dec_mpls_ttl(self, dp):
self._verify = [dp.ofproto.OFPAT_DEC_MPLS_TTL]
actions = [dp.ofproto_parser.OFPActionDecMplsTtl(), ]
self.add_apply_actions(dp, actions)
def test_action_set_nw_ttl(self, dp):
nw_ttl = 64
self._verify = [dp.ofproto.OFPAT_SET_NW_TTL,
'nw_ttl', nw_ttl]
actions = [dp.ofproto_parser.OFPActionSetNwTtl(nw_ttl), ]
self.add_apply_actions(dp, actions)
def test_action_dec_nw_ttl(self, dp):
self._verify = [dp.ofproto.OFPAT_DEC_NW_TTL]
actions = [dp.ofproto_parser.OFPActionDecNwTtl(), ]
self.add_apply_actions(dp, actions)
def test_action_copy_ttl_out(self, dp):
self._verify = [dp.ofproto.OFPAT_COPY_TTL_OUT]
actions = [dp.ofproto_parser.OFPActionCopyTtlOut(), ]
self.add_apply_actions(dp, actions)
def test_action_copy_ttl_in(self, dp):
self._verify = [dp.ofproto.OFPAT_COPY_TTL_IN]
actions = [dp.ofproto_parser.OFPActionCopyTtlIn(), ]
self.add_apply_actions(dp, actions)
def is_supported(self, t):
# Open vSwitch 1.10 does not support MPLS yet.
unsupported = [
'test_action_set_field_ip_proto',
'test_action_set_field_dl_type',
'test_action_set_field_arp',
'test_action_set_field_ipv6',
'test_action_set_field_icmp',
'test_action_set_nw_ttl',
'test_action_copy_ttl_in',
'test_action_copy_ttl_out',
'test_action_dec_mpls_ttl',
'test_action_pop_mpls',
'test_action_push_mpls',
'test_action_set_field_mpls_label',
'test_action_set_field_mpls_tc',
'test_action_set_mpls_ttl'
]
for u in unsupported:
if t.find(u) != -1:
return False
return True
| apache-2.0 | 7,543,494,267,174,587,000 | 32.29718 | 69 | 0.589967 | false |
snnn/tensorflow | tensorflow/python/debug/cli/tensor_format.py | 44 | 20447 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Format tensors (ndarrays) for screen display and navigation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.lib import debug_data
_NUMPY_OMISSION = "...,"
_NUMPY_DEFAULT_EDGE_ITEMS = 3
_NUMBER_REGEX = re.compile(r"[-+]?([0-9][-+0-9eE\.]+|nan|inf)(\s|,|\])")
BEGIN_INDICES_KEY = "i0"
OMITTED_INDICES_KEY = "omitted"
DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR = "bold"
class HighlightOptions(object):
"""Options for highlighting elements of a tensor."""
def __init__(self,
criterion,
description=None,
font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):
"""Constructor of HighlightOptions.
Args:
criterion: (callable) A callable of the following signature:
def to_highlight(X):
# Args:
# X: The tensor to highlight elements in.
#
# Returns:
# (boolean ndarray) A boolean ndarray of the same shape as X
# indicating which elements are to be highlighted (iff True).
This callable will be used as the argument of np.argwhere() to
determine which elements of the tensor are to be highlighted.
description: (str) Description of the highlight criterion embodied by
criterion.
font_attr: (str) Font attribute to be applied to the
highlighted elements.
"""
self.criterion = criterion
self.description = description
self.font_attr = font_attr
def format_tensor(tensor,
tensor_label,
include_metadata=False,
auxiliary_message=None,
include_numeric_summary=False,
np_printoptions=None,
highlight_options=None):
"""Generate a RichTextLines object showing a tensor in formatted style.
Args:
tensor: The tensor to be displayed, as a numpy ndarray or other
appropriate format (e.g., None representing uninitialized tensors).
tensor_label: A label for the tensor, as a string. If set to None, will
suppress the tensor name line in the return value.
include_metadata: Whether metadata such as dtype and shape are to be
included in the formatted text.
auxiliary_message: An auxiliary message to display under the tensor label,
dtype and shape information lines.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions() to set the text format for display numpy
ndarrays.
highlight_options: (HighlightOptions) options for highlighting elements
of the tensor.
Returns:
A RichTextLines object. Its annotation field has line-by-line markups to
indicate which indices in the array the first element of each line
corresponds to.
"""
lines = []
font_attr_segs = {}
if tensor_label is not None:
lines.append("Tensor \"%s\":" % tensor_label)
suffix = tensor_label.split(":")[-1]
if suffix.isdigit():
# Suffix is a number. Assume it is the output slot index.
font_attr_segs[0] = [(8, 8 + len(tensor_label), "bold")]
else:
# Suffix is not a number. It is auxiliary information such as the debug
# op type. In this case, highlight the suffix with a different color.
debug_op_len = len(suffix)
proper_len = len(tensor_label) - debug_op_len - 1
font_attr_segs[0] = [
(8, 8 + proper_len, "bold"),
(8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, "yellow")
]
if isinstance(tensor, debug_data.InconvertibleTensorProto):
if lines:
lines.append("")
lines.extend(str(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
elif not isinstance(tensor, np.ndarray):
# If tensor is not a np.ndarray, return simple text-line representation of
# the object without annotations.
if lines:
lines.append("")
lines.extend(repr(tensor).split("\n"))
return debugger_cli_common.RichTextLines(lines)
if include_metadata:
lines.append(" dtype: %s" % str(tensor.dtype))
lines.append(" shape: %s" % str(tensor.shape).replace("L", ""))
if lines:
lines.append("")
formatted = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs)
if auxiliary_message:
formatted.extend(auxiliary_message)
if include_numeric_summary:
formatted.append("Numeric summary:")
formatted.extend(numeric_summary(tensor))
formatted.append("")
# Apply custom string formatting options for numpy ndarray.
if np_printoptions is not None:
np.set_printoptions(**np_printoptions)
array_lines = repr(tensor).split("\n")
if tensor.dtype.type is not np.string_:
# Parse array lines to get beginning indices for each line.
# TODO(cais): Currently, we do not annotate string-type tensors due to
# difficulty in escaping sequences. Address this issue.
annotations = _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=np_printoptions)
else:
annotations = None
formatted_array = debugger_cli_common.RichTextLines(
array_lines, annotations=annotations)
formatted.extend(formatted_array)
# Perform optional highlighting.
if highlight_options is not None:
indices_list = list(np.argwhere(highlight_options.criterion(tensor)))
total_elements = np.size(tensor)
highlight_summary = "Highlighted%s: %d of %d element(s) (%.2f%%)" % (
"(%s)" % highlight_options.description if highlight_options.description
else "", len(indices_list), total_elements,
len(indices_list) / float(total_elements) * 100.0)
formatted.lines[0] += " " + highlight_summary
if indices_list:
indices_list = [list(indices) for indices in indices_list]
are_omitted, rows, start_cols, end_cols = locate_tensor_element(
formatted, indices_list)
for is_omitted, row, start_col, end_col in zip(are_omitted, rows,
start_cols, end_cols):
if is_omitted or start_col is None or end_col is None:
continue
if row in formatted.font_attr_segs:
formatted.font_attr_segs[row].append(
(start_col, end_col, highlight_options.font_attr))
else:
formatted.font_attr_segs[row] = [(start_col, end_col,
highlight_options.font_attr)]
return formatted
def _annotate_ndarray_lines(
array_lines, tensor, np_printoptions=None, offset=0):
"""Generate annotations for line-by-line begin indices of tensor text.
Parse the numpy-generated text representation of a numpy ndarray to
determine the indices of the first element of each text line (if any
element is present in the line).
For example, given the following multi-line ndarray text representation:
["array([[ 0. , 0.0625, 0.125 , 0.1875],",
" [ 0.25 , 0.3125, 0.375 , 0.4375],",
" [ 0.5 , 0.5625, 0.625 , 0.6875],",
" [ 0.75 , 0.8125, 0.875 , 0.9375]])"]
the generate annotation will be:
{0: {BEGIN_INDICES_KEY: [0, 0]},
1: {BEGIN_INDICES_KEY: [1, 0]},
2: {BEGIN_INDICES_KEY: [2, 0]},
3: {BEGIN_INDICES_KEY: [3, 0]}}
Args:
array_lines: Text lines representing the tensor, as a list of str.
tensor: The tensor being formatted as string.
np_printoptions: A dictionary of keyword arguments that are passed to a
call of np.set_printoptions().
offset: Line number offset applied to the line indices in the returned
annotation.
Returns:
An annotation as a dict.
"""
if np_printoptions and "edgeitems" in np_printoptions:
edge_items = np_printoptions["edgeitems"]
else:
edge_items = _NUMPY_DEFAULT_EDGE_ITEMS
annotations = {}
# Put metadata about the tensor in the annotations["tensor_metadata"].
annotations["tensor_metadata"] = {
"dtype": tensor.dtype, "shape": tensor.shape}
dims = np.shape(tensor)
ndims = len(dims)
if ndims == 0:
# No indices for a 0D tensor.
return annotations
curr_indices = [0] * len(dims)
curr_dim = 0
for i in xrange(len(array_lines)):
line = array_lines[i].strip()
if not line:
# Skip empty lines, which can appear for >= 3D arrays.
continue
if line == _NUMPY_OMISSION:
annotations[offset + i] = {OMITTED_INDICES_KEY: copy.copy(curr_indices)}
curr_indices[curr_dim - 1] = dims[curr_dim - 1] - edge_items
else:
num_lbrackets = line.count("[") # TODO(cais): String array escaping.
num_rbrackets = line.count("]")
curr_dim += num_lbrackets - num_rbrackets
annotations[offset + i] = {BEGIN_INDICES_KEY: copy.copy(curr_indices)}
if num_rbrackets == 0:
line_content = line[line.rfind("[") + 1:]
num_elements = line_content.count(",")
curr_indices[curr_dim - 1] += num_elements
else:
if curr_dim > 0:
curr_indices[curr_dim - 1] += 1
for k in xrange(curr_dim, ndims):
curr_indices[k] = 0
return annotations
def locate_tensor_element(formatted, indices):
"""Locate a tensor element in formatted text lines, given element indices.
Given a RichTextLines object representing a tensor and indices of the sought
element, return the row number at which the element is located (if exists).
Args:
formatted: A RichTextLines object containing formatted text lines
representing the tensor.
indices: Indices of the sought element, as a list of int or a list of list
of int. The former case is for a single set of indices to look up,
whereas the latter case is for looking up a batch of indices sets at once.
In the latter case, the indices must be in ascending order, or a
ValueError will be raised.
Returns:
1) A boolean indicating whether the element falls into an omitted line.
2) Row index.
3) Column start index, i.e., the first column in which the representation
of the specified tensor starts, if it can be determined. If it cannot
be determined (e.g., due to ellipsis), None.
4) Column end index, i.e., the column right after the last column that
represents the specified tensor. Iff it cannot be determined, None.
For return values described above are based on a single set of indices to
look up. In the case of batch mode (multiple sets of indices), the return
values will be lists of the types described above.
Raises:
AttributeError: If:
Input argument "formatted" does not have the required annotations.
ValueError: If:
1) Indices do not match the dimensions of the tensor, or
2) Indices exceed sizes of the tensor, or
3) Indices contain negative value(s).
4) If in batch mode, and if not all sets of indices are in ascending
order.
"""
if isinstance(indices[0], list):
indices_list = indices
input_batch = True
else:
indices_list = [indices]
input_batch = False
# Check that tensor_metadata is available.
if "tensor_metadata" not in formatted.annotations:
raise AttributeError("tensor_metadata is not available in annotations.")
# Sanity check on input argument.
_validate_indices_list(indices_list, formatted)
dims = formatted.annotations["tensor_metadata"]["shape"]
batch_size = len(indices_list)
lines = formatted.lines
annot = formatted.annotations
prev_r = 0
prev_line = ""
prev_indices = [0] * len(dims)
# Initialize return values
are_omitted = [None] * batch_size
row_indices = [None] * batch_size
start_columns = [None] * batch_size
end_columns = [None] * batch_size
batch_pos = 0 # Current position in the batch.
for r in xrange(len(lines)):
if r not in annot:
continue
if BEGIN_INDICES_KEY in annot[r]:
indices_key = BEGIN_INDICES_KEY
elif OMITTED_INDICES_KEY in annot[r]:
indices_key = OMITTED_INDICES_KEY
matching_indices_list = [
ind for ind in indices_list[batch_pos:]
if prev_indices <= ind < annot[r][indices_key]
]
if matching_indices_list:
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
batch_pos += num_matches
if batch_pos >= batch_size:
break
prev_r = r
prev_line = lines[r]
prev_indices = annot[r][indices_key]
if batch_pos < batch_size:
matching_indices_list = indices_list[batch_pos:]
num_matches = len(matching_indices_list)
match_start_columns, match_end_columns = _locate_elements_in_line(
prev_line, matching_indices_list, prev_indices)
start_columns[batch_pos:batch_pos + num_matches] = match_start_columns
end_columns[batch_pos:batch_pos + num_matches] = match_end_columns
are_omitted[batch_pos:batch_pos + num_matches] = [
OMITTED_INDICES_KEY in annot[prev_r]
] * num_matches
row_indices[batch_pos:batch_pos + num_matches] = [prev_r] * num_matches
if input_batch:
return are_omitted, row_indices, start_columns, end_columns
else:
return are_omitted[0], row_indices[0], start_columns[0], end_columns[0]
def _validate_indices_list(indices_list, formatted):
prev_ind = None
for ind in indices_list:
# Check indices match tensor dimensions.
dims = formatted.annotations["tensor_metadata"]["shape"]
if len(ind) != len(dims):
raise ValueError("Dimensions mismatch: requested: %d; actual: %d" %
(len(ind), len(dims)))
# Check indices is within size limits.
for req_idx, siz in zip(ind, dims):
if req_idx >= siz:
raise ValueError("Indices exceed tensor dimensions.")
if req_idx < 0:
raise ValueError("Indices contain negative value(s).")
# Check indices are in ascending order.
if prev_ind and ind < prev_ind:
raise ValueError("Input indices sets are not in ascending order.")
prev_ind = ind
def _locate_elements_in_line(line, indices_list, ref_indices):
"""Determine the start and end indices of an element in a line.
Args:
line: (str) the line in which the element is to be sought.
indices_list: (list of list of int) list of indices of the element to
search for. Assumes that the indices in the batch are unique and sorted
in ascending order.
ref_indices: (list of int) reference indices, i.e., the indices of the
first element represented in the line.
Returns:
start_columns: (list of int) start column indices, if found. If not found,
None.
end_columns: (list of int) end column indices, if found. If not found,
None.
If found, the element is represented in the left-closed-right-open interval
[start_column, end_column].
"""
batch_size = len(indices_list)
offsets = [indices[-1] - ref_indices[-1] for indices in indices_list]
start_columns = [None] * batch_size
end_columns = [None] * batch_size
if _NUMPY_OMISSION in line:
ellipsis_index = line.find(_NUMPY_OMISSION)
else:
ellipsis_index = len(line)
matches_iter = re.finditer(_NUMBER_REGEX, line)
batch_pos = 0
offset_counter = 0
for match in matches_iter:
if match.start() > ellipsis_index:
# Do not attempt to search beyond ellipsis.
break
if offset_counter == offsets[batch_pos]:
start_columns[batch_pos] = match.start()
# Remove the final comma, right bracket, or whitespace.
end_columns[batch_pos] = match.end() - 1
batch_pos += 1
if batch_pos >= batch_size:
break
offset_counter += 1
return start_columns, end_columns
def _pad_string_to_length(string, length):
return " " * (length - len(string)) + string
def numeric_summary(tensor):
"""Get a text summary of a numeric tensor.
This summary is only available for numeric (int*, float*, complex*) and
Boolean tensors.
Args:
tensor: (`numpy.ndarray`) the tensor value object to be summarized.
Returns:
The summary text as a `RichTextLines` object. If the type of `tensor` is not
numeric or Boolean, a single-line `RichTextLines` object containing a
warning message will reflect that.
"""
def _counts_summary(counts, skip_zeros=True, total_count=None):
"""Format values as a two-row table."""
if skip_zeros:
counts = [(count_key, count_val) for count_key, count_val in counts
if count_val]
max_common_len = 0
for count_key, count_val in counts:
count_val_str = str(count_val)
common_len = max(len(count_key) + 1, len(count_val_str) + 1)
max_common_len = max(common_len, max_common_len)
key_line = debugger_cli_common.RichLine("|")
val_line = debugger_cli_common.RichLine("|")
for count_key, count_val in counts:
count_val_str = str(count_val)
key_line += _pad_string_to_length(count_key, max_common_len)
val_line += _pad_string_to_length(count_val_str, max_common_len)
key_line += " |"
val_line += " |"
if total_count is not None:
total_key_str = "total"
total_val_str = str(total_count)
max_common_len = max(len(total_key_str) + 1, len(total_val_str))
total_key_str = _pad_string_to_length(total_key_str, max_common_len)
total_val_str = _pad_string_to_length(total_val_str, max_common_len)
key_line += total_key_str + " |"
val_line += total_val_str + " |"
return debugger_cli_common.rich_text_lines_from_rich_line_list(
[key_line, val_line])
if not isinstance(tensor, np.ndarray) or not np.size(tensor):
return debugger_cli_common.RichTextLines([
"No numeric summary available due to empty tensor."])
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
counts = [
("nan", np.sum(np.isnan(tensor))),
("-inf", np.sum(np.isneginf(tensor))),
("-", np.sum(np.logical_and(
tensor < 0.0, np.logical_not(np.isneginf(tensor))))),
("0", np.sum(tensor == 0.0)),
("+", np.sum(np.logical_and(
tensor > 0.0, np.logical_not(np.isposinf(tensor))))),
("+inf", np.sum(np.isposinf(tensor)))]
output = _counts_summary(counts, total_count=np.size(tensor))
valid_array = tensor[
np.logical_not(np.logical_or(np.isinf(tensor), np.isnan(tensor)))]
if np.size(valid_array):
stats = [
("min", np.min(valid_array)),
("max", np.max(valid_array)),
("mean", np.mean(valid_array)),
("std", np.std(valid_array))]
output.extend(_counts_summary(stats, skip_zeros=False))
return output
elif tensor.dtype == np.bool:
counts = [
("False", np.sum(tensor == 0)),
("True", np.sum(tensor > 0)),]
return _counts_summary(counts, total_count=np.size(tensor))
else:
return debugger_cli_common.RichTextLines([
"No numeric summary available due to tensor dtype: %s." % tensor.dtype])
| apache-2.0 | -7,937,003,642,828,927,000 | 34.934974 | 80 | 0.652516 | false |
postlund/home-assistant | homeassistant/components/google_travel_time/sensor.py | 5 | 10356 | """Support for Google travel time sensors."""
from datetime import datetime, timedelta
import logging
import googlemaps
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Google"
CONF_DESTINATION = "destination"
CONF_OPTIONS = "options"
CONF_ORIGIN = "origin"
CONF_TRAVEL_MODE = "travel_mode"
DEFAULT_NAME = "Google Travel Time"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
ALL_LANGUAGES = [
"ar",
"bg",
"bn",
"ca",
"cs",
"da",
"de",
"el",
"en",
"es",
"eu",
"fa",
"fi",
"fr",
"gl",
"gu",
"hi",
"hr",
"hu",
"id",
"it",
"iw",
"ja",
"kn",
"ko",
"lt",
"lv",
"ml",
"mr",
"nl",
"no",
"pl",
"pt",
"pt-BR",
"pt-PT",
"ro",
"ru",
"sk",
"sl",
"sr",
"sv",
"ta",
"te",
"th",
"tl",
"tr",
"uk",
"vi",
"zh-CN",
"zh-TW",
]
AVOID = ["tolls", "highways", "ferries", "indoor"]
TRANSIT_PREFS = ["less_walking", "fewer_transfers"]
TRANSPORT_TYPE = ["bus", "subway", "train", "tram", "rail"]
TRAVEL_MODE = ["driving", "walking", "bicycling", "transit"]
TRAVEL_MODEL = ["best_guess", "pessimistic", "optimistic"]
UNITS = ["metric", "imperial"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: "driving"}): vol.All(
dict,
vol.Schema(
{
vol.Optional(CONF_MODE, default="driving"): vol.In(TRAVEL_MODE),
vol.Optional("language"): vol.In(ALL_LANGUAGES),
vol.Optional("avoid"): vol.In(AVOID),
vol.Optional("units"): vol.In(UNITS),
vol.Exclusive("arrival_time", "time"): cv.string,
vol.Exclusive("departure_time", "time"): cv.string,
vol.Optional("traffic_model"): vol.In(TRAVEL_MODEL),
vol.Optional("transit_mode"): vol.In(TRANSPORT_TYPE),
vol.Optional("transit_routing_preference"): vol.In(TRANSIT_PREFS),
}
),
),
}
)
TRACKABLE_DOMAINS = ["device_tracker", "sensor", "zone", "person"]
DATA_KEY = "google_travel_time"
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr)
)
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
hass.data.setdefault(DATA_KEY, [])
options = config.get(CONF_OPTIONS)
if options.get("units") is None:
options["units"] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = (
"Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!"
)
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = "{} - {}".format(DEFAULT_NAME, titled_mode)
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options
)
hass.data[DATA_KEY].append(sensor)
if sensor.valid_api_connection:
add_entities_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = "min"
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER.error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
return round(_data["duration_in_traffic"]["value"] / 60)
if "duration" in _data:
return round(_data["duration"]["value"] / 60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res["rows"]
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
res["duration_in_traffic"] = _data["duration_in_traffic"]["text"]
if "duration" in _data:
res["duration"] = _data["duration"]["text"]
if "distance" in _data:
res["distance"] = _data["distance"]["text"]
res["origin"] = self._origin
res["destination"] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get("departure_time")
atime = options_copy.get("arrival_time")
if dtime is not None and ":" in dtime:
options_copy["departure_time"] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy["departure_time"] = dtime
elif atime is None:
options_copy["departure_time"] = "now"
if atime is not None and ":" in atime:
options_copy["arrival_time"] = convert_time_to_utc(atime)
elif atime is not None:
options_copy["arrival_time"] = atime
# Convert device_trackers to google friendly location
if hasattr(self, "_origin_entity_id"):
self._origin = self._get_location_from_entity(self._origin_entity_id)
if hasattr(self, "_destination_entity_id"):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy
)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return "%s,%s" % (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == "zone" and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
| apache-2.0 | 6,775,675,279,985,126,000 | 30.099099 | 86 | 0.58314 | false |
40223123/raven | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/constants.py | 603 | 15297 | #!/usr/bin/env python
'''Constants defined by SDL, and needed in pygame.
Note that many of the flags for SDL are not needed in pygame, and are not
included here. These constants are generally accessed from the
`pygame.locals` module. This module is automatically placed in the pygame
namespace, but you will usually want to place them directly into your module's
namespace with the following command::
from pygame.locals import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
#import SDL.constants
# SDL constants taken from https://wiki.libsdl.org/SDLKeycodeLookup
'''
YV12_OVERLAY = SDL.constants.SDL_YV12_OVERLAY
IYUV_OVERLAY = SDL.constants.SDL_IYUV_OVERLAY
YUY2_OVERLAY = SDL.constants.SDL_YUY2_OVERLAY
UYVY_OVERLAY = SDL.constants.SDL_UYVY_OVERLAY
YVYU_OVERLAY = SDL.constants.SDL_YVYU_OVERLAY
SWSURFACE = SDL.constants.SDL_SWSURFACE
HWSURFACE = SDL.constants.SDL_HWSURFACE
RESIZABLE = SDL.constants.SDL_RESIZABLE
ASYNCBLIT = SDL.constants.SDL_ASYNCBLIT
OPENGL = SDL.constants.SDL_OPENGL
OPENGLBLIT = SDL.constants.SDL_OPENGLBLIT
ANYFORMAT = SDL.constants.SDL_ANYFORMAT
HWPALETTE = SDL.constants.SDL_HWPALETTE
DOUBLEBUF = SDL.constants.SDL_DOUBLEBUF
#FULLSCREEN = SDL.constants.SDL_FULLSCREEN
'''
FULLSCREEN = 0
'''
HWACCEL = SDL.constants.SDL_HWACCEL
SRCCOLORKEY = SDL.constants.SDL_SRCCOLORKEY
'''
RLEACCELOK = 254
RLEACCEL = 255
'''
SRCALPHA = SDL.constants.SDL_SRCALPHA
PREALLOC = SDL.constants.SDL_PREALLOC
NOFRAME = SDL.constants.SDL_NOFRAME
GL_RED_SIZE = SDL.constants.SDL_GL_RED_SIZE
GL_GREEN_SIZE = SDL.constants.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = SDL.constants.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = SDL.constants.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = SDL.constants.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = SDL.constants.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = SDL.constants.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = SDL.constants.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = SDL.constants.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = SDL.constants.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = SDL.constants.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = SDL.constants.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = SDL.constants.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = SDL.constants.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = SDL.constants.SDL_GL_MULTISAMPLESAMPLES
TIMER_RESOLUTION = SDL.constants.TIMER_RESOLUTION
AUDIO_U8 = SDL.constants.AUDIO_U8
AUDIO_S8 = SDL.constants.AUDIO_S8
AUDIO_U16LSB = SDL.constants.AUDIO_U16LSB
AUDIO_S16LSB = SDL.constants.AUDIO_S16LSB
AUDIO_U16MSB = SDL.constants.AUDIO_U16MSB
AUDIO_S16MSB = SDL.constants.AUDIO_S16MSB
AUDIO_U16 = SDL.constants.AUDIO_U16
AUDIO_S16 = SDL.constants.AUDIO_S16
AUDIO_U16SYS = SDL.constants.AUDIO_U16SYS
AUDIO_S16SYS = SDL.constants.AUDIO_S16SYS
'''
def _t(a, b, c, d):
return (ord(a) << 24) | (ord(b) << 16) | (ord(c) << 8) | ord(d)
SCRAP_TEXT = _t('T', 'E', 'X', 'T')
SCRAP_BMP = _t('B', 'M', 'P', ' ')
BLEND_ADD = 0x01
BLEND_SUB = 0x02
BLEND_MULT = 0x03
BLEND_MIN = 0x04
BLEND_MAX = 0x05
"""
NOEVENT = SDL.constants.SDL_NOEVENT
ACTIVEEVENT = SDL.constants.SDL_ACTIVEEVENT
KEYDOWN = SDL.constants.SDL_KEYDOWN
KEYUP = SDL.constants.SDL_KEYUP
MOUSEMOTION = SDL.constants.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = SDL.constants.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = SDL.constants.SDL_MOUSEBUTTONUP
JOYAXISMOTION = SDL.constants.SDL_JOYAXISMOTION
JOYBALLMOTION = SDL.constants.SDL_JOYBALLMOTION
JOYHATMOTION = SDL.constants.SDL_JOYHATMOTION
JOYBUTTONDOWN = SDL.constants.SDL_JOYBUTTONDOWN
JOYBUTTONUP = SDL.constants.SDL_JOYBUTTONUP
VIDEORESIZE = SDL.constants.SDL_VIDEORESIZE
VIDEOEXPOSE = SDL.constants.SDL_VIDEOEXPOSE
QUIT = SDL.constants.SDL_QUIT
SYSWMEVENT = SDL.constants.SDL_SYSWMEVENT
USEREVENT = SDL.constants.SDL_USEREVENT
NUMEVENTS = SDL.constants.SDL_NUMEVENTS
HAT_CENTERED = SDL.constants.SDL_HAT_CENTERED
HAT_UP = SDL.constants.SDL_HAT_UP
HAT_RIGHTUP = SDL.constants.SDL_HAT_RIGHTUP
HAT_RIGHT = SDL.constants.SDL_HAT_RIGHT
HAT_RIGHTDOWN = SDL.constants.SDL_HAT_RIGHTDOWN
HAT_DOWN = SDL.constants.SDL_HAT_DOWN
HAT_LEFTDOWN = SDL.constants.SDL_HAT_LEFTDOWN
HAT_LEFT = SDL.constants.SDL_HAT_LEFT
HAT_LEFTUP = SDL.constants.SDL_HAT_LEFTUP
"""
#BEGIN GENERATED CONSTANTS; see support/make_pygame_keyconstants.py
K_0 = 48
K_1 = 49
K_2 = 50
K_3 = 51
K_4 = 52
K_5 = 53
K_6 = 54
K_7 = 55
K_8 = 56
K_9 = 57
K_AMPERSAND = 38
K_ASTERISK = 42
K_AT = 64
K_BACKQUOTE = 96
K_BACKSLASH = 92
K_BACKSPACE = 8
#K_BREAK = SDL.constants.SDLK_BREAK
K_CAPSLOCK = 1073741881
K_CARET = 94
K_CLEAR = 1073742040
K_COLON = 58
K_COMMA = 44
#K_COMPOSE = SDL.constants.SDLK_COMPOSE
K_DELETE = 127
K_DOLLAR = 36
K_DOWN = 1073741905
K_END = 1073741901
K_EQUALS = 1073741927
K_ESCAPE = 27
#K_EURO = SDL.constants.SDLK_EURO
K_EXCLAIM = 33
K_F1 = 1073741882
K_F10 = 1073741891
K_F11 = 1073741892
K_F12 = 1073741893
K_F13 = 1073741928
K_F14 = 1073741929
K_F15 = 1073741930
K_F2 = 1073741883
K_F3 = 1073741884
K_F4 = 1073741885
K_F5 = 1073741886
K_F6 = 1073741887
K_F7 = 1073741888
K_F8 = 1073741889
K_F9 = 1073741890
#K_FIRST = SDL.constants.SDLK_FIRST
K_GREATER = 1073742022
K_HASH = 1073742028
K_HELP = 1073741941
K_HOME = 1073741898
K_INSERT = 1073741897
K_KP0 = 1073741922
K_KP1 = 1073741913
K_KP2 = 1073741914
K_KP3 = 1073741915
K_KP4 = 1073741916
K_KP5 = 1073741917
K_KP6 = 1073741918
K_KP7 = 1073741919
K_KP8 = 1073741920
K_KP9 = 1073741921
K_KP_DIVIDE = 1073741908
K_KP_ENTER = 1073741912
K_KP_EQUALS = 1073741927
K_KP_MINUS = 1073741910
K_KP_MULTIPLY = 1073741909
K_KP_PERIOD = 1073741923
K_KP_PLUS = 1073741911
K_LALT = 1073742050
#K_LAST = SDL.constants.SDLK_LAST
K_LCTRL = 1073742048
K_LEFT = 1073741904
#K_LEFTBRACKET = SDL.constants.SDLK_LEFTBRACKET
K_LEFTPAREN = 1073742006
#K_LESS = SDL.constants.SDLK_LESS
#K_LMETA = SDL.constants.SDLK_LMETA
K_LSHIFT = 1073742049
#K_LSUPER = SDL.constants.SDLK_LSUPER
K_MENU = 1073741942
K_MINUS = 45
K_MODE = 1073742081
#K_NUMLOCK = SDL.constants.SDLK_NUMLOCK
K_PAGEDOWN = 1073741902
K_PAGEUP = 1073741899
K_PAUSE = 1073741896
#K_PERIOD = SDL.constants.SDLK_PERIOD
K_PLUS = 43
#K_POWER = SDL.constants.SDLK_POWER
#K_PRINT = SDL.constants.SDLK_PRINT
K_QUESTION = 63
K_QUOTE = 39
K_QUOTEDBL = 34
K_RALT = 1073742054
K_RCTRL = 1073742052
K_RETURN = 13
K_RIGHT = 1073741903
#K_RIGHTBRACKET = SDL.constants.SDLK_RIGHTBRACKET
K_RIGHTPAREN = 41
#K_RMETA = SDL.constants.SDLK_RMETA
K_RSHIFT = 1073742053
#K_RSUPER = SDL.constants.SDLK_RSUPER
K_SCROLLOCK = 1073741895
K_SEMICOLON = 59
K_SLASH = 47
K_SPACE = 1073742029
K_SYSREQ = 1073741978
K_TAB = 9
K_UNDERSCORE = 95
K_UNDO = 1073741946
K_UNKNOWN = 0
K_UP = 1073741906
"""
K_WORLD_0 = SDL.constants.SDLK_WORLD_0
K_WORLD_1 = SDL.constants.SDLK_WORLD_1
K_WORLD_10 = SDL.constants.SDLK_WORLD_10
K_WORLD_11 = SDL.constants.SDLK_WORLD_11
K_WORLD_12 = SDL.constants.SDLK_WORLD_12
K_WORLD_13 = SDL.constants.SDLK_WORLD_13
K_WORLD_14 = SDL.constants.SDLK_WORLD_14
K_WORLD_15 = SDL.constants.SDLK_WORLD_15
K_WORLD_16 = SDL.constants.SDLK_WORLD_16
K_WORLD_17 = SDL.constants.SDLK_WORLD_17
K_WORLD_18 = SDL.constants.SDLK_WORLD_18
K_WORLD_19 = SDL.constants.SDLK_WORLD_19
K_WORLD_2 = SDL.constants.SDLK_WORLD_2
K_WORLD_20 = SDL.constants.SDLK_WORLD_20
K_WORLD_21 = SDL.constants.SDLK_WORLD_21
K_WORLD_22 = SDL.constants.SDLK_WORLD_22
K_WORLD_23 = SDL.constants.SDLK_WORLD_23
K_WORLD_24 = SDL.constants.SDLK_WORLD_24
K_WORLD_25 = SDL.constants.SDLK_WORLD_25
K_WORLD_26 = SDL.constants.SDLK_WORLD_26
K_WORLD_27 = SDL.constants.SDLK_WORLD_27
K_WORLD_28 = SDL.constants.SDLK_WORLD_28
K_WORLD_29 = SDL.constants.SDLK_WORLD_29
K_WORLD_3 = SDL.constants.SDLK_WORLD_3
K_WORLD_30 = SDL.constants.SDLK_WORLD_30
K_WORLD_31 = SDL.constants.SDLK_WORLD_31
K_WORLD_32 = SDL.constants.SDLK_WORLD_32
K_WORLD_33 = SDL.constants.SDLK_WORLD_33
K_WORLD_34 = SDL.constants.SDLK_WORLD_34
K_WORLD_35 = SDL.constants.SDLK_WORLD_35
K_WORLD_36 = SDL.constants.SDLK_WORLD_36
K_WORLD_37 = SDL.constants.SDLK_WORLD_37
K_WORLD_38 = SDL.constants.SDLK_WORLD_38
K_WORLD_39 = SDL.constants.SDLK_WORLD_39
K_WORLD_4 = SDL.constants.SDLK_WORLD_4
K_WORLD_40 = SDL.constants.SDLK_WORLD_40
K_WORLD_41 = SDL.constants.SDLK_WORLD_41
K_WORLD_42 = SDL.constants.SDLK_WORLD_42
K_WORLD_43 = SDL.constants.SDLK_WORLD_43
K_WORLD_44 = SDL.constants.SDLK_WORLD_44
K_WORLD_45 = SDL.constants.SDLK_WORLD_45
K_WORLD_46 = SDL.constants.SDLK_WORLD_46
K_WORLD_47 = SDL.constants.SDLK_WORLD_47
K_WORLD_48 = SDL.constants.SDLK_WORLD_48
K_WORLD_49 = SDL.constants.SDLK_WORLD_49
K_WORLD_5 = SDL.constants.SDLK_WORLD_5
K_WORLD_50 = SDL.constants.SDLK_WORLD_50
K_WORLD_51 = SDL.constants.SDLK_WORLD_51
K_WORLD_52 = SDL.constants.SDLK_WORLD_52
K_WORLD_53 = SDL.constants.SDLK_WORLD_53
K_WORLD_54 = SDL.constants.SDLK_WORLD_54
K_WORLD_55 = SDL.constants.SDLK_WORLD_55
K_WORLD_56 = SDL.constants.SDLK_WORLD_56
K_WORLD_57 = SDL.constants.SDLK_WORLD_57
K_WORLD_58 = SDL.constants.SDLK_WORLD_58
K_WORLD_59 = SDL.constants.SDLK_WORLD_59
K_WORLD_6 = SDL.constants.SDLK_WORLD_6
K_WORLD_60 = SDL.constants.SDLK_WORLD_60
K_WORLD_61 = SDL.constants.SDLK_WORLD_61
K_WORLD_62 = SDL.constants.SDLK_WORLD_62
K_WORLD_63 = SDL.constants.SDLK_WORLD_63
K_WORLD_64 = SDL.constants.SDLK_WORLD_64
K_WORLD_65 = SDL.constants.SDLK_WORLD_65
K_WORLD_66 = SDL.constants.SDLK_WORLD_66
K_WORLD_67 = SDL.constants.SDLK_WORLD_67
K_WORLD_68 = SDL.constants.SDLK_WORLD_68
K_WORLD_69 = SDL.constants.SDLK_WORLD_69
K_WORLD_7 = SDL.constants.SDLK_WORLD_7
K_WORLD_70 = SDL.constants.SDLK_WORLD_70
K_WORLD_71 = SDL.constants.SDLK_WORLD_71
K_WORLD_72 = SDL.constants.SDLK_WORLD_72
K_WORLD_73 = SDL.constants.SDLK_WORLD_73
K_WORLD_74 = SDL.constants.SDLK_WORLD_74
K_WORLD_75 = SDL.constants.SDLK_WORLD_75
K_WORLD_76 = SDL.constants.SDLK_WORLD_76
K_WORLD_77 = SDL.constants.SDLK_WORLD_77
K_WORLD_78 = SDL.constants.SDLK_WORLD_78
K_WORLD_79 = SDL.constants.SDLK_WORLD_79
K_WORLD_8 = SDL.constants.SDLK_WORLD_8
K_WORLD_80 = SDL.constants.SDLK_WORLD_80
K_WORLD_81 = SDL.constants.SDLK_WORLD_81
K_WORLD_82 = SDL.constants.SDLK_WORLD_82
K_WORLD_83 = SDL.constants.SDLK_WORLD_83
K_WORLD_84 = SDL.constants.SDLK_WORLD_84
K_WORLD_85 = SDL.constants.SDLK_WORLD_85
K_WORLD_86 = SDL.constants.SDLK_WORLD_86
K_WORLD_87 = SDL.constants.SDLK_WORLD_87
K_WORLD_88 = SDL.constants.SDLK_WORLD_88
K_WORLD_89 = SDL.constants.SDLK_WORLD_89
K_WORLD_9 = SDL.constants.SDLK_WORLD_9
K_WORLD_90 = SDL.constants.SDLK_WORLD_90
K_WORLD_91 = SDL.constants.SDLK_WORLD_91
K_WORLD_92 = SDL.constants.SDLK_WORLD_92
K_WORLD_93 = SDL.constants.SDLK_WORLD_93
K_WORLD_94 = SDL.constants.SDLK_WORLD_94
K_WORLD_95 = SDL.constants.SDLK_WORLD_95
"""
K_a = 97
K_b = 98
K_c = 99
K_d = 100
K_e = 101
K_f = 102
K_g = 103
K_h = 104
K_i = 105
K_j = 106
K_k = 107
K_l = 108
K_m = 109
K_n = 110
K_o = 111
K_p = 112
K_q = 113
K_r = 114
K_s = 115
K_t = 116
K_u = 117
K_v = 118
K_w = 119
K_x = 120
K_y = 121
K_z = 122
#END GENERATED CONSTANTS
| gpl-3.0 | -950,879,821,729,361,800 | 40.909589 | 78 | 0.525463 | false |
wrouesnel/ansible | lib/ansible/modules/network/cloudengine/ce_bfd_session.py | 16 | 20563 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_bfd_session
version_added: "2.4"
short_description: Manages BFD session configuration on HUAWEI CloudEngine devices.
description:
- Manages BFD session configuration, creates a BFD session or deletes a specified BFD session
on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
session_name:
description:
- Specifies the name of a BFD session.
The value is a string of 1 to 15 case-sensitive characters without spaces.
required: true
default: null
create_type:
description:
- BFD session creation mode, the currently created BFD session
only supports static or static auto-negotiation mode.
required: false
default: null
choices: ['static', 'auto']
addr_type:
description:
- Specifies the peer IP address type.
required: false
default: null
choices: ['ipv4']
out_if_name:
description:
- Specifies the type and number of the interface bound to the BFD session.
required: false
default: null
dest_addr:
description:
- Specifies the peer IP address bound to the BFD session.
required: false
default: null
src_addr:
description:
- Indicates the source IP address carried in BFD packets.
required: false
default: null
vrf_name:
description:
- Specifies the name of a Virtual Private Network (VPN) instance that is bound to a BFD session.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value _public_ is reserved and cannot be used as the VPN instance name.
required: false
default: null
use_default_ip:
description:
- Indicates the default multicast IP address that is bound to a BFD session.
By default, BFD uses the multicast IP address 224.0.0.184.
You can set the multicast IP address by running the default-ip-address command.
The value is a bool type.
required: false
default: false
state:
description:
- Determines whether the config should be present or not on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: bfd session module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring Single-hop BFD for Detecting Faults on a Layer 2 Link
ce_bfd_session:
session_name: bfd_l2link
use_default_ip: true
out_if_name: 10GE1/0/1
provider: '{{ cli }}'
- name: Configuring Single-Hop BFD on a VLANIF Interface
ce_bfd_session:
session_name: bfd_vlanif
dest_addr: 10.1.1.6
out_if_name: Vlanif100
provider: '{{ cli }}'
- name: Configuring Multi-Hop BFD
ce_bfd_session:
session_name: bfd_multi_hop
dest_addr: 10.1.1.1
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"addr_type": null,
"create_type": null,
"dest_addr": null,
"out_if_name": "10GE1/0/1",
"session_name": "bfd_l2link",
"src_addr": null,
"state": "present",
"use_default_ip": true,
"vrf_name": null
}
existing:
description: k/v pairs of existing configuration
returned: always
type: dict
sample: {
"session": {}
}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {
"session": {
"addrType": "IPV4",
"createType": "SESS_STATIC",
"destAddr": null,
"outIfName": "10GE1/0/1",
"sessName": "bfd_l2link",
"srcAddr": null,
"useDefaultIp": "true",
"vrfName": null
}
}
updates:
description: commands sent to the device
returned: always
type: list
sample: [
"bfd bfd_l2link bind peer-ip default-ip interface 10ge1/0/1"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import sys
import socket
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
CE_NC_GET_BFD = """
<filter type="subtree">
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</filter>
"""
CE_NC_GET_BFD_GLB = """
<bfdSchGlobal>
<bfdEnable></bfdEnable>
<defaultIp></defaultIp>
</bfdSchGlobal>
"""
CE_NC_GET_BFD_SESSION = """
<bfdCfgSessions>
<bfdCfgSession>
<sessName>%s</sessName>
<createType></createType>
<addrType></addrType>
<outIfName></outIfName>
<destAddr></destAddr>
<srcAddr></srcAddr>
<vrfName></vrfName>
<useDefaultIp></useDefaultIp>
</bfdCfgSession>
</bfdCfgSessions>
"""
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def check_default_ip(ipaddr):
"""check the default multicast IP address"""
# The value ranges from 224.0.0.107 to 224.0.0.250
if not check_ip_addr(ipaddr):
return False
if ipaddr.count(".") != 3:
return False
ips = ipaddr.split(".")
if ips[0] != "224" or ips[1] != "0" or ips[2] != "0":
return False
if not ips[3].isdigit() or int(ips[3]) < 107 or int(ips[3]) > 250:
return False
return True
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class BfdSession(object):
"""Manages BFD Session"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.session_name = self.module.params['session_name']
self.create_type = self.module.params['create_type']
self.addr_type = self.module.params['addr_type']
self.out_if_name = self.module.params['out_if_name']
self.dest_addr = self.module.params['dest_addr']
self.src_addr = self.module.params['src_addr']
self.vrf_name = self.module.params['vrf_name']
self.use_default_ip = self.module.params['use_default_ip']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.bfd_dict = dict()
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
mutually_exclusive = [('use_default_ip', 'dest_addr')]
self.module = AnsibleModule(argument_spec=self.spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def get_bfd_dict(self):
"""bfd config dict"""
bfd_dict = dict()
bfd_dict["global"] = dict()
bfd_dict["session"] = dict()
conf_str = CE_NC_GET_BFD % (CE_NC_GET_BFD_GLB + (CE_NC_GET_BFD_SESSION % self.session_name))
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return bfd_dict
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get bfd global info
glb = root.find("data/bfd/bfdSchGlobal")
if glb:
for attr in glb:
bfd_dict["global"][attr.tag] = attr.text
# get bfd session info
sess = root.find("data/bfd/bfdCfgSessions/bfdCfgSession")
if sess:
for attr in sess:
bfd_dict["session"][attr.tag] = attr.text
return bfd_dict
def is_session_match(self):
"""is bfd session match"""
if not self.bfd_dict["session"] or not self.session_name:
return False
session = self.bfd_dict["session"]
if self.session_name != session.get("sessName", ""):
return False
if self.create_type and self.create_type.upper() not in session.get("createType", "").upper():
return False
if self.addr_type and self.addr_type != session.get("addrType").lower():
return False
if self.dest_addr and self.dest_addr != session.get("destAddr"):
return False
if self.src_addr and self.src_addr != session.get("srcAddr"):
return False
if self.out_if_name:
if not session.get("outIfName"):
return False
if self.out_if_name.replace(" ", "").lower() != session.get("outIfName").replace(" ", "").lower():
return False
if self.vrf_name and self.vrf_name != session.get("vrfName"):
return False
if str(self.use_default_ip).lower() != session.get("useDefaultIp"):
return False
return True
def config_session(self):
"""configures bfd session"""
xml_str = ""
cmd_list = list()
if not self.session_name:
return xml_str
if self.bfd_dict["global"].get("bfdEnable", "false") != "true":
self.module.fail_json(msg="Error: Please enable BFD globally first.")
xml_str = "<sessName>%s</sessName>" % self.session_name
cmd_session = "bfd %s" % self.session_name
if self.state == "present":
if not self.bfd_dict["session"]:
# Parameter check
if not self.dest_addr and not self.use_default_ip:
self.module.fail_json(
msg="Error: dest_addr or use_default_ip must be set when bfd session is creating.")
# Creates a BFD session
if self.create_type:
xml_str += "<createType>SESS_%s</createType>" % self.create_type.upper()
else:
xml_str += "<createType>SESS_STATIC</createType>"
xml_str += "<linkType>IP</linkType>"
cmd_session += " bind"
if self.addr_type:
xml_str += "<addrType>%s</addrType>" % self.addr_type.upper()
else:
xml_str += "<addrType>IPV4</addrType>"
if self.dest_addr:
xml_str += "<destAddr>%s</destAddr>" % self.dest_addr
cmd_session += " peer-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.dest_addr)
if self.use_default_ip:
xml_str += "<useDefaultIp>%s</useDefaultIp>" % str(self.use_default_ip).lower()
cmd_session += " peer-ip default-ip"
if self.vrf_name:
xml_str += "<vrfName>%s</vrfName>" % self.vrf_name
cmd_session += " vpn-instance %s" % self.vrf_name
if self.out_if_name:
xml_str += "<outIfName>%s</outIfName>" % self.out_if_name
cmd_session += " interface %s" % self.out_if_name.lower()
if self.src_addr:
xml_str += "<srcAddr>%s</srcAddr>" % self.src_addr
cmd_session += " source-%s %s" % ("ipv6" if self.addr_type == "ipv6" else "ip", self.src_addr)
if self.create_type == "auto":
cmd_session += " auto"
elif not self.is_session_match():
# Bfd session is not match
self.module.fail_json(msg="Error: The specified BFD configuration view has been created.")
else:
pass
else: # absent
if not self.bfd_dict["session"]:
self.module.fail_json(msg="Error: BFD session is not exist.")
if not self.is_session_match():
self.module.fail_json(msg="Error: BFD session parameter is invalid.")
if self.state == "present":
if xml_str.endswith("</sessName>"):
# no config update
return ""
else:
cmd_list.insert(0, cmd_session)
self.updates_cmd.extend(cmd_list)
return '<bfdCfgSessions><bfdCfgSession operation="merge">' + xml_str\
+ '</bfdCfgSession></bfdCfgSessions>'
else: # absent
cmd_list.append("undo " + cmd_session)
self.updates_cmd.extend(cmd_list)
return '<bfdCfgSessions><bfdCfgSession operation="delete">' + xml_str\
+ '</bfdCfgSession></bfdCfgSessions>'
def netconf_load_config(self, xml_str):
"""load bfd config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<bfd xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</bfd>
</config>""" % xml_str
set_nc_config(self.module, xml_cfg)
self.changed = True
def check_params(self):
"""Check all input params"""
# check session_name
if not self.session_name:
self.module.fail_json(msg="Error: Missing required arguments: session_name.")
if self.session_name:
if len(self.session_name) < 1 or len(self.session_name) > 15:
self.module.fail_json(msg="Error: Session name is invalid.")
# check out_if_name
if self.out_if_name:
if not get_interface_type(self.out_if_name):
self.module.fail_json(msg="Error: Session out_if_name is invalid.")
# check dest_addr
if self.dest_addr:
if not check_ip_addr(self.dest_addr):
self.module.fail_json(msg="Error: Session dest_addr is invalid.")
# check src_addr
if self.src_addr:
if not check_ip_addr(self.src_addr):
self.module.fail_json(msg="Error: Session src_addr is invalid.")
# check vrf_name
if self.vrf_name:
if not is_valid_ip_vpn(self.vrf_name):
self.module.fail_json(msg="Error: Session vrf_name is invalid.")
if not self.dest_addr:
self.module.fail_json(msg="Error: vrf_name and dest_addr must set at the same time.")
# check use_default_ip
if self.use_default_ip and not self.out_if_name:
self.module.fail_json(msg="Error: use_default_ip and out_if_name must set at the same time.")
def get_proposed(self):
"""get proposed info"""
# base config
self.proposed["session_name"] = self.session_name
self.proposed["create_type"] = self.create_type
self.proposed["addr_type"] = self.addr_type
self.proposed["out_if_name"] = self.out_if_name
self.proposed["dest_addr"] = self.dest_addr
self.proposed["src_addr"] = self.src_addr
self.proposed["vrf_name"] = self.vrf_name
self.proposed["use_default_ip"] = self.use_default_ip
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.bfd_dict:
return
self.existing["session"] = self.bfd_dict.get("session")
def get_end_state(self):
"""get end state info"""
bfd_dict = self.get_bfd_dict()
if not bfd_dict:
return
self.end_state["session"] = bfd_dict.get("session")
def work(self):
"""worker"""
self.check_params()
self.bfd_dict = self.get_bfd_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.session_name:
xml_str += self.config_session()
# update to device
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
session_name=dict(required=True, type='str'),
create_type=dict(required=False, type='str', choices=['static', 'auto']),
addr_type=dict(required=False, type='str', choices=['ipv4']),
out_if_name=dict(required=False, type='str'),
dest_addr=dict(required=False, type='str'),
src_addr=dict(required=False, type='str'),
vrf_name=dict(required=False, type='str'),
use_default_ip=dict(required=False, type='bool', default=False),
state=dict(required=False, default='present', choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = BfdSession(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -7,757,325,635,414,255,000 | 32.112721 | 117 | 0.569615 | false |
chrisseto/osf.io | osf/models/metaschema.py | 10 | 2428 | # -*- coding: utf-8 -*-
from django.db import models
import jsonschema
from website.util import api_v2_url
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.exceptions import ValidationValueError
from website.project.metadata.utils import create_jsonschema_from_metaschema
class MetaSchema(ObjectIDMixin, BaseModel):
name = models.CharField(max_length=255)
schema = DateTimeAwareJSONField(default=dict)
category = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=True)
# Version of the schema to use (e.g. if questions, responses change)
schema_version = models.IntegerField()
class Meta:
unique_together = ('name', 'schema_version')
def __unicode__(self):
return '(name={}, schema_version={}, id={})'.format(self.name, self.schema_version, self.id)
@property
def _config(self):
return self.schema.get('config', {})
@property
def requires_approval(self):
return self._config.get('requiresApproval', False)
@property
def fulfills(self):
return self._config.get('fulfills', [])
@property
def messages(self):
return self._config.get('messages', {})
@property
def requires_consent(self):
return self._config.get('requiresConsent', False)
@property
def has_files(self):
return self._config.get('hasFiles', False)
@property
def absolute_api_v2_url(self):
path = '/metaschemas/{}/'.format(self._id)
return api_v2_url(path)
@classmethod
def get_prereg_schema(cls):
return cls.objects.get(
name='Prereg Challenge',
schema_version=2
)
def validate_metadata(self, metadata, reviewer=False, required_fields=False):
"""
Validates registration_metadata field.
"""
schema = create_jsonschema_from_metaschema(self.schema,
required_fields=required_fields,
is_reviewer=reviewer)
try:
jsonschema.validate(metadata, schema)
except jsonschema.ValidationError as e:
raise ValidationValueError(e.message)
except jsonschema.SchemaError as e:
raise ValidationValueError(e.message)
return
| apache-2.0 | 5,339,462,156,627,602,000 | 30.128205 | 100 | 0.637974 | false |
AdamWill/python-fedora | doc/conf.py | 5 | 5294 | # -*- coding: utf-8 -*-
#
# Python Fedora Module documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 08:12:44 2008.
#
# This file is execfile()d with the current directory set to its containing
# dir.
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import fedora.release
# If your extensions are in another directory, add it here.
#sys.path.append(os.path.dirname(__file__))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = fedora.release.NAME
copyright = fedora.release.COPYRIGHT
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = fedora.release.VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = fedora.release.DOWNLOAD_URL + 'doc/'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
(
'index',
'Python Fedora Module.tex',
'Python Fedora Module Documentation',
'Toshio Kuratomi',
'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
automodule_skip_lines = 4
| gpl-2.0 | 3,380,028,354,636,738,000 | 29.425287 | 79 | 0.714583 | false |
liukaijv/XlsxWriter | xlsxwriter/test/comparison/test_chart_font05.py | 8 | 1869 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49407488, 53740288]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({'name': 'Title'})
chart.set_x_axis({
'name': 'XXX',
'num_font': {'name': 'Arial', 'pitch_family': 34, 'charset': 0}
})
chart.set_y_axis({
'name': 'YYY',
'num_font': {'bold': 1, 'italic': 1, 'underline': 1}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -4,101,676,926,928,358,400 | 25.323944 | 79 | 0.523274 | false |
namecoin/namecoin-core | test/get_previous_releases.py | 25 | 9262 | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "bitcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "bitcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "bitcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "bitcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "bitcoin-0.15.2-x86_64-linux-gnu.tar.gz",
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "bitcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "bitcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "bitcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "bitcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "bitcoin-0.16.3-x86_64-linux-gnu.tar.gz",
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "bitcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "bitcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "bitcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "bitcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "bitcoin-0.17.2-x86_64-linux-gnu.tar.gz",
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "bitcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "bitcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "bitcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "bitcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "bitcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "bitcoin-0.18.1-x86_64-linux-gnu.tar.gz",
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "bitcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "bitcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "bitcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "bitcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "bitcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "bitcoin-0.19.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/bitcoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/bitcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'bitcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://bitcoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
print("Checksum did not match")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'bitcoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/bitcoin/bitcoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['bitcoind', 'bitcoin-cli', 'bitcoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'aarch64-*-linux*': 'aarch64-linux-gnu',
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| mit | 9,163,755,023,043,592,000 | 39.445415 | 112 | 0.655906 | false |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/dumbdbm.py | 251 | 8820 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
| mit | 4,915,030,171,117,929,000 | 34.28 | 78 | 0.59093 | false |
DEVELByte/incubator-airflow | airflow/contrib/sensors/gcs_sensor.py | 4 | 2452 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageObjectSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
"""
template_fields = ('bucket', 'object')
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
object,
google_cloud_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new GoogleCloudStorageDownloadOperator.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStorageObjectSensor, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
def poke(self, context):
logging.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to)
return hook.exists(self.bucket, self.object)
| apache-2.0 | -6,198,963,205,383,858,000 | 37.3125 | 110 | 0.670065 | false |
ga7g08/sympy | sympy/physics/vector/tests/test_output.py | 117 | 2594 | from sympy import S
from sympy.physics.vector import Vector, ReferenceFrame, Dyadic
from sympy.utilities.pytest import raises
Vector.simp = True
A = ReferenceFrame('A')
def test_output_type():
A = ReferenceFrame('A')
v = A.x + A.y
d = v | v
zerov = Vector(0)
zerod = Dyadic(0)
# dot products
assert isinstance(d & d, Dyadic)
assert isinstance(d & zerod, Dyadic)
assert isinstance(zerod & d, Dyadic)
assert isinstance(d & v, Vector)
assert isinstance(v & d, Vector)
assert isinstance(d & zerov, Vector)
assert isinstance(zerov & d, Vector)
raises(TypeError, lambda: d & S(0))
raises(TypeError, lambda: S(0) & d)
raises(TypeError, lambda: d & 0)
raises(TypeError, lambda: 0 & d)
assert not isinstance(v & v, (Vector, Dyadic))
assert not isinstance(v & zerov, (Vector, Dyadic))
assert not isinstance(zerov & v, (Vector, Dyadic))
raises(TypeError, lambda: v & S(0))
raises(TypeError, lambda: S(0) & v)
raises(TypeError, lambda: v & 0)
raises(TypeError, lambda: 0 & v)
# cross products
raises(TypeError, lambda: d ^ d)
raises(TypeError, lambda: d ^ zerod)
raises(TypeError, lambda: zerod ^ d)
assert isinstance(d ^ v, Dyadic)
assert isinstance(v ^ d, Dyadic)
assert isinstance(d ^ zerov, Dyadic)
assert isinstance(zerov ^ d, Dyadic)
assert isinstance(zerov ^ d, Dyadic)
raises(TypeError, lambda: d ^ S(0))
raises(TypeError, lambda: S(0) ^ d)
raises(TypeError, lambda: d ^ 0)
raises(TypeError, lambda: 0 ^ d)
assert isinstance(v ^ v, Vector)
assert isinstance(v ^ zerov, Vector)
assert isinstance(zerov ^ v, Vector)
raises(TypeError, lambda: v ^ S(0))
raises(TypeError, lambda: S(0) ^ v)
raises(TypeError, lambda: v ^ 0)
raises(TypeError, lambda: 0 ^ v)
# outer products
raises(TypeError, lambda: d | d)
raises(TypeError, lambda: d | zerod)
raises(TypeError, lambda: zerod | d)
raises(TypeError, lambda: d | v)
raises(TypeError, lambda: v | d)
raises(TypeError, lambda: d | zerov)
raises(TypeError, lambda: zerov | d)
raises(TypeError, lambda: zerov | d)
raises(TypeError, lambda: d | S(0))
raises(TypeError, lambda: S(0) | d)
raises(TypeError, lambda: d | 0)
raises(TypeError, lambda: 0 | d)
assert isinstance(v | v, Dyadic)
assert isinstance(v | zerov, Dyadic)
assert isinstance(zerov | v, Dyadic)
raises(TypeError, lambda: v | S(0))
raises(TypeError, lambda: S(0) | v)
raises(TypeError, lambda: v | 0)
raises(TypeError, lambda: 0 | v)
| bsd-3-clause | 8,322,091,404,888,139,000 | 33.131579 | 63 | 0.642251 | false |
gangadharkadam/stfrappe | frappe/modules/utils.py | 39 | 4119 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os
import frappe.modules
from frappe.utils import cstr
from frappe.modules import export_doc, get_module_path, scrub
def listfolders(path, only_name=0):
"""
Returns the list of folders (with paths) in the given path,
If only_name is set, it returns only the folder names
"""
out = []
for each in os.listdir(path):
each = cstr(each)
dirname = each.split(os.path.sep)[-1]
fullpath = os.path.join(path, dirname)
if os.path.isdir(fullpath) and not dirname.startswith('.'):
out.append(only_name and dirname or fullpath)
return out
def switch_module(dt, dn, to, frm=None, export=None):
"""
Change the module of the given doctype, if export is true, then also export txt and copy
code files from src
"""
frappe.db.sql("update `tab"+dt+"` set module=%s where name=%s", (to, dn))
if export:
export_doc(dt, dn)
# copy code files
if dt in ('DocType', 'Page', 'Report'):
from_path = os.path.join(get_module_path(frm), scrub(dt), scrub(dn), scrub(dn))
to_path = os.path.join(get_module_path(to), scrub(dt), scrub(dn), scrub(dn))
# make dire if exists
os.system('mkdir -p %s' % os.path.join(get_module_path(to), scrub(dt), scrub(dn)))
for ext in ('py','js','html','css'):
os.system('cp %s %s')
def commonify_doclist(doclist, with_comments=1):
"""
Makes a doclist more readable by extracting common properties.
This is used for printing Documents in files
"""
from frappe.utils import get_common_dict, get_diff_dict
def make_common(doclist):
c = {}
if with_comments:
c['##comment'] = 'These values are common in all dictionaries'
for k in common_keys:
c[k] = doclist[0][k]
return c
def strip_common_and_idx(d):
for k in common_keys:
if k in d: del d[k]
if 'idx' in d: del d['idx']
return d
def make_common_dicts(doclist):
common_dict = {} # one per doctype
# make common dicts for all records
for d in doclist:
if not d['doctype'] in common_dict:
d1 = d.copy()
if d1.has_key("name"):
del d1['name']
common_dict[d['doctype']] = d1
else:
common_dict[d['doctype']] = get_common_dict(common_dict[d['doctype']], d)
return common_dict
common_keys = ['owner','docstatus','creation','modified','modified_by']
common_dict = make_common_dicts(doclist)
# make docs
final = []
for d in doclist:
f = strip_common_and_idx(get_diff_dict(common_dict[d['doctype']], d))
f['doctype'] = d['doctype'] # keep doctype!
# strip name for child records (only an auto generated number!)
if f['doctype'] != doclist[0]['doctype'] and f.has_key("name"):
del f['name']
if with_comments:
f['##comment'] = d['doctype'] + ('name' in f and (', ' + f['name']) or '')
final.append(f)
# add commons
commons = []
for d in common_dict.values():
d['name']='__common__'
if with_comments:
d['##comment'] = 'These values are common for all ' + d['doctype']
commons.append(strip_common_and_idx(d))
common_values = make_common(doclist)
return [common_values]+commons+final
def uncommonify_doclist(dl):
"""
Expands an commonified doclist
"""
# first one has common values
common_values = dl[0]
common_dict = frappe._dict()
final = []
idx_dict = {}
for d in dl[1:]:
if 'name' in d and d['name']=='__common__':
# common for a doctype -
del d['name']
common_dict[d['doctype']] = d
else:
dt = d['doctype']
if not dt in idx_dict: idx_dict[dt] = 1;
d1 = frappe._dict(common_values.copy())
# update from common and global
d1.update(common_dict[dt])
d1.update(d)
# idx by sequence
d1['idx'] = idx_dict[dt]
# increment idx
idx_dict[dt] += 1
final.append(d1)
return final
def pprint_doclist(doclist, with_comments = 1):
from json import dumps
return dumps(commonify_doclist(doclist, False), indent=1, sort_keys=True)
def peval_doclist(txt):
from json import loads
try:
return uncommonify_doclist(loads(txt))
except Exception, e:
return uncommonify_doclist(eval(txt))
| mit | -775,023,721,281,391,400 | 25.403846 | 90 | 0.655013 | false |
fhe-odoo/odoo | addons/purchase_requisition/wizard/bid_line_qty.py | 374 | 1711 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,396,066,193,690,468,400 | 44.026316 | 115 | 0.628872 | false |
aronysidoro/django-angular | djangular/views/mixins.py | 16 | 5168 | # -*- coding: utf-8 -*-
import json
import warnings
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
def allow_remote_invocation(func, method='auto'):
"""
All methods which shall be callable through a given Ajax 'action' must be
decorated with @allowed_action. This is required for safety reasons. It
inhibits the caller to invoke all available methods of a class.
"""
setattr(func, 'allow_rmi', method)
return func
def allowed_action(func):
warnings.warn("Decorator `@allowed_action` is deprecated. Use `@allow_remote_invocation` instead.", DeprecationWarning)
return allow_remote_invocation(func)
class JSONResponseException(Exception):
"""
Exception class for triggering HTTP 4XX responses with JSON content, where expected.
"""
status_code = 400
def __init__(self, message=None, status=None, *args, **kwargs):
if status is not None:
self.status_code = status
super(JSONResponseException, self).__init__(message, *args, **kwargs)
class JSONBaseMixin(object):
"""
Basic mixin for encoding HTTP responses in JSON format.
"""
json_encoder = DjangoJSONEncoder
json_content_type = 'application/json;charset=UTF-8'
def json_response(self, response_data, status=200, **kwargs):
out_data = json.dumps(response_data, cls=self.json_encoder, **kwargs)
response = HttpResponse(out_data, self.json_content_type, status=status)
response['Cache-Control'] = 'no-cache'
return response
class JSONResponseMixin(JSONBaseMixin):
"""
A mixin for View classes that dispatches requests containing the private HTTP header
``DjNg-Remote-Method`` onto a method of an instance of this class, with the given method name.
This named method must be decorated with ``@allow_remote_invocation`` and shall return a
list or dictionary which is serializable to JSON.
The returned HTTP responses are of kind ``application/json;charset=UTF-8``.
"""
def get(self, request, *args, **kwargs):
if not request.is_ajax():
return self._dispatch_super(request, *args, **kwargs)
if 'action' in kwargs:
warnings.warn("Using the keyword 'action' in URLresolvers is deprecated. Please use 'invoke_method' instead", DeprecationWarning)
remote_method = kwargs['action']
else:
remote_method = kwargs.get('invoke_method')
if remote_method:
# method for invocation is determined programmatically
handler = getattr(self, remote_method)
else:
# method for invocation is determined by HTTP header
remote_method = request.META.get('HTTP_DJNG_REMOTE_METHOD')
handler = remote_method and getattr(self, remote_method, None)
if not callable(handler):
return self._dispatch_super(request, *args, **kwargs)
if not hasattr(handler, 'allow_rmi'):
return HttpResponseForbidden("Method '{0}.{1}' has no decorator '@allow_remote_invocation'"
.format(self.__class__.__name__, remote_method))
try:
response_data = handler()
except JSONResponseException as e:
return self.json_response({'message': e.args[0]}, e.status_code)
return self.json_response(response_data)
def post(self, request, *args, **kwargs):
if not request.is_ajax():
return self._dispatch_super(request, *args, **kwargs)
try:
in_data = json.loads(request.body.decode('utf-8'))
except ValueError:
in_data = request.body.decode('utf-8')
if 'action' in in_data:
warnings.warn("Using the keyword 'action' inside the payload is deprecated. Please use 'djangoRMI' from module 'ng.django.forms'", DeprecationWarning)
remote_method = in_data.pop('action')
else:
remote_method = request.META.get('HTTP_DJNG_REMOTE_METHOD')
handler = remote_method and getattr(self, remote_method, None)
if not callable(handler):
return self._dispatch_super(request, *args, **kwargs)
if not hasattr(handler, 'allow_rmi'):
return HttpResponseForbidden("Method '{0}.{1}' has no decorator '@allow_remote_invocation'"
.format(self.__class__.__name__, remote_method), 403)
try:
response_data = handler(in_data)
except JSONResponseException as e:
return self.json_response({'message': e.args[0]}, e.status_code)
return self.json_response(response_data)
def _dispatch_super(self, request, *args, **kwargs):
base = super(JSONResponseMixin, self)
handler = getattr(base, request.method.lower(), None)
if callable(handler):
return handler(request, *args, **kwargs)
# HttpResponseNotAllowed expects permitted methods.
return HttpResponseBadRequest('This view can not handle method {0}'.format(request.method), status=405)
| mit | 754,901,389,400,723,200 | 44.734513 | 162 | 0.647059 | false |
Eddy0402/Environment | vim/ycmd/cpp/ycm/tests/gmock/test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| gpl-3.0 | -2,333,054,393,106,732,000 | 31.892857 | 79 | 0.739957 | false |
quanhua92/learning-notes | libs/wxpython/design/mvc.py | 1 | 3804 | import wx
class ModelInterface(object):
""""Defines an interface for a simple value generator model"""
def __init__(self):
super(ModelInterface, self).__init__()
self.value = 0
self.observers = list()
def Generate(self):
"""Interface method to be implemented by subclasses"""
raise NotImplementedError
def SetValue(self, value):
self.value = value
self.NotifyObservers()
def GetValue(self):
return self.value
def RegisterObserver(self, callback):
"""Register an observer callback
:param callable(newvalue)
"""
self.observers.append(callback)
def NotifyObservers(self):
"""Notify all observers of current value"""
for observer in self.observers:
observer()
class ControllerInterface(object):
"""Defines an interface a value generator controller"""
def __init__(self, model):
super(ControllerInterface, self).__init__()
# Attributes
self.model = model
self.view = TheView(None, self, self.model, "Fibonacci Generator")
# Setup
self.view.Show()
def DoGenerateNext(self):
"""User action request next value"""
raise NotImplementedError
class FibonacciModel(ModelInterface):
def Generate(self):
cval = self.GetValue()
# Get the next one
for fib in self.fibonacci():
if fib > cval:
self.SetValue(fib)
break
@staticmethod
def fibonacci():
"""FIbonacci generator method"""
a, b = 0, 1
while True:
yield a
a, b = b, a + b
class FibonacciController(ControllerInterface):
def DoGenerateNext(self):
self.view.EnableButton(False)
self.model.Generate()
class TheView(wx.Frame):
def __init__(self, parent, controller, model, title, *args, **kwargs):
super(TheView, self).__init__(parent, title=title, *args, **kwargs)
# Attributes
self.panel = ViewPanel(self, controller, model)
# Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetInitialSize((300, 300))
def EnableButton(self, enable=True):
self.panel.button.Enable(enable)
class ViewPanel(wx.Panel):
def __init__(self, parent, controller, model):
super(ViewPanel, self).__init__(parent)
# Attributes
self.model = model
self.controller = controller
initial = str(self.model.GetValue())
self.text = wx.TextCtrl(self, value=initial)
self.button = wx.Button(self, label="Generate")
# Layout
self.__DoLayout()
# Setup
self.model.RegisterObserver(self.OnModelUpdate)
# Event Handlers
self.Bind(wx.EVT_BUTTON, self.OnAction)
def __DoLayout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
vsizer.AddStretchSpacer()
vsizer.Add(self.text, 0, wx.ALIGN_CENTER|wx.ALL, 8)
hsizer.AddStretchSpacer()
hsizer.Add(self.button)
hsizer.AddStretchSpacer()
vsizer.Add(hsizer, 0, wx.EXPAND)
vsizer.AddStretchSpacer()
self.SetSizer(vsizer)
def OnModelUpdate(self):
"""Observer Method"""
value = self.model.GetValue()
self.text.SetValue(str(value))
self.button.Enable(True)
def OnAction(self, event):
self.controller.DoGenerateNext()
class ModelViewApp(wx.App):
def OnInit(self):
self.model = FibonacciModel()
self.controller = FibonacciController(self.model)
return True
if __name__ == "__main__":
app = ModelViewApp(False)
app.MainLoop()
| apache-2.0 | 7,011,881,947,008,723,000 | 25.234483 | 75 | 0.602261 | false |
richardotis/scipy | scipy/odr/models.py | 113 | 4659 | """ Collection of Model instances for use with the odrpack fitting package.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.odr.odrpack import Model
__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
'polynomial']
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float), np.power(x,
powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1),axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
multilinear = Model(_lin_fcn, fjacb=_lin_fjb,
fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'})
def polynomial(order):
"""
Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
polynomial : Model instance
Model instance.
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ':'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^{%s} \\beta_i x^i$' %
(len_beta-1)})
exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est, meta={'name':'Exponential',
'equ':'y= B_0 + exp(B_1 * x)',
'TeXequ':'$y=\\beta_0 + e^{\\beta_1 x}$'})
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est, meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb,
estimate=_quad_est, meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
| bsd-3-clause | -1,072,080,834,890,045,700 | 24.320652 | 77 | 0.527366 | false |
SOKP/external_chromium_org | tools/telemetry/telemetry/value/summary.py | 58 | 6381 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from telemetry.value import failure
from telemetry.value import merge_values
from telemetry.value import skip
class Summary(object):
"""Computes summary values from the per-page-run values produced by a test.
Some telemetry benchmark repeat a number of times in order to get a reliable
measurement. The test does not have to handle merging of these runs:
summarizer does it for you.
For instance, if two pages run, 3 and 1 time respectively:
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page2, 'foo', units='ms', 2)
Then summarizer will produce two sets of values. First,
computed_per_page_values:
[
ListOfScalarValues(page1, 'foo', units='ms', [1,1,1])],
ListOfScalarValues(page2, 'foo', units='ms', [2])]
]
In addition, it will produce a summary value:
[
ListOfScalarValues(page=None, 'foo', units='ms', [1,1,1,2])]
]
"""
def __init__(self, all_page_specific_values):
had_failures = any(isinstance(v, failure.FailureValue) for v in
all_page_specific_values)
self.had_failures = had_failures
self._computed_per_page_values = []
self._computed_summary_values = []
self._interleaved_computed_per_page_values_and_summaries = []
self._ComputePerPageValues(all_page_specific_values)
@property
def computed_per_page_values(self):
return self._computed_per_page_values
@property
def computed_summary_values(self):
return self._computed_summary_values
@property
def interleaved_computed_per_page_values_and_summaries(self):
"""Returns the computed per page values and summary values interleaved.
All the results for a given name are printed together. First per page
values, then summary values.
"""
return self._interleaved_computed_per_page_values_and_summaries
def _ComputePerPageValues(self, all_page_specific_values):
all_successful_page_values = [
v for v in all_page_specific_values if not (isinstance(
v, failure.FailureValue) or isinstance(v, skip.SkipValue))]
# We will later need to determine how many values were originally created
# for each value name, to apply a workaround meant to clean up the printf
# output.
num_successful_pages_for_value_name = defaultdict(int)
for v in all_successful_page_values:
num_successful_pages_for_value_name[v.name] += 1
# By here, due to page repeat options, all_values_from_successful_pages
# contains values of the same name not only from mulitple pages, but also
# from the same name. So even if, for instance, only one page ran, it may
# have run twice, producing two 'x' values.
#
# So, get rid of the repeated pages by merging.
merged_page_values = merge_values.MergeLikeValuesFromSamePage(
all_successful_page_values)
# Now we have a bunch of values, but there is only one value_name per page.
# Suppose page1 and page2 ran, producing values x and y. We want to print
# x for page1
# x for page2
# x for page1, page2 combined
#
# y for page1
# y for page2
# y for page1, page2 combined
#
# We already have the x values in the values array. But, we will need
# them indexable by the value name.
#
# The following dict maps value_name -> list of pages that have values of
# that name.
per_page_values_by_value_name = defaultdict(list)
for value in merged_page_values:
per_page_values_by_value_name[value.name].append(value)
# We already have the x values in the values array. But, we also need
# the values merged across the pages. And, we will need them indexed by
# value name so that we can find them when printing out value names in
# alphabetical order.
merged_pages_value_by_value_name = {}
if not self.had_failures:
for value in merge_values.MergeLikeValuesFromDifferentPages(
all_successful_page_values):
assert value.name not in merged_pages_value_by_value_name
merged_pages_value_by_value_name[value.name] = value
# sorted_value names will govern the order we start printing values.
value_names = set([v.name for v in merged_page_values])
sorted_value_names = sorted(value_names)
# Time to walk through the values by name, printing first the page-specific
# values and then the merged_site value.
for value_name in sorted_value_names:
per_page_values = per_page_values_by_value_name.get(value_name, [])
# Sort the values by their url
sorted_per_page_values = list(per_page_values)
sorted_per_page_values.sort(
key=lambda per_page_values: per_page_values.page.display_name)
# Output the page-specific results.
num_successful_pages_for_this_value_name = (
num_successful_pages_for_value_name[value_name])
for per_page_value in sorted_per_page_values:
self._ComputePerPageValue(per_page_value,
num_successful_pages_for_this_value_name)
# Output the combined values.
merged_pages_value = merged_pages_value_by_value_name.get(value_name,
None)
if merged_pages_value:
self._computed_summary_values.append(merged_pages_value)
self._interleaved_computed_per_page_values_and_summaries.append(
merged_pages_value)
def _ComputePerPageValue(
self, value, num_successful_pages_for_this_value_name):
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if num_successful_pages_for_this_value_name > 1:
should_print = True
elif (self.had_failures and
num_successful_pages_for_this_value_name == 1):
should_print = True
else:
should_print = False
if not should_print:
return
# Actually save the result.
self._computed_per_page_values.append(value)
self._interleaved_computed_per_page_values_and_summaries.append(value)
| bsd-3-clause | 7,461,314,517,576,428,000 | 38.147239 | 79 | 0.67889 | false |
ayseyo/oclapi | django-nonrel/ocl/mappings/tests.py | 1 | 46964 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import os
from unittest import skip
from urlparse import urlparse
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.test import Client
from django.test.client import MULTIPART_CONTENT, FakePayload
from django.utils.encoding import force_str
from collection.models import Collection, CollectionVersion
from concepts.models import Concept, LocalizedText
from mappings.models import Mapping, MappingVersion
from mappings.validation_messages import OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS
from oclapi.models import ACCESS_TYPE_EDIT, ACCESS_TYPE_VIEW, CUSTOM_VALIDATION_SCHEMA_OPENMRS
from oclapi.utils import add_user_to_org
from orgs.models import Organization
from sources.models import Source, SourceVersion
from test_helper.base import *
from users.models import UserProfile
class OCLClient(Client):
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = self.my_put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def my_put(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Construct a PUT request."
post_data = self._encode_data(data, content_type)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_str(parsed[4]),
'REQUEST_METHOD': str('PUT'),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
class MappingBaseTest(OclApiBaseTestCase):
def setUp(self):
super(MappingBaseTest, self).setUp()
self.user1 = User.objects.create_user(
username='user1',
email='[email protected]',
password='user1',
last_name='One',
first_name='User'
)
self.user2 = User.objects.create_user(
username='user2',
email='[email protected]',
password='user2',
last_name='Two',
first_name='User'
)
self.userprofile1 = UserProfile.objects.create(user=self.user1, mnemonic='user1')
self.userprofile2 = UserProfile.objects.create(user=self.user2, mnemonic='user2')
self.org1 = Organization.objects.create(name='org1', mnemonic='org1')
self.org2 = Organization.objects.create(name='org2', mnemonic='org2')
add_user_to_org(self.userprofile2, self.org2)
self.source1 = Source(
name='source1',
mnemonic='source1',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source',
)
kwargs = {
'parent_resource': self.userprofile1
}
Source.persist_new(self.source1, self.user1, **kwargs)
self.source1 = Source.objects.get(id=self.source1.id)
self.source2 = Source(
name='source2',
mnemonic='source2',
full_name='Source Two',
source_type='Reference',
public_access=ACCESS_TYPE_VIEW,
default_locale='fr',
supported_locales=['fr'],
website='www.source2.com',
description='This is the second test source',
)
kwargs = {
'parent_resource': self.org2,
}
Source.persist_new(self.source2, self.user1, **kwargs)
self.source2 = Source.objects.get(id=self.source2.id)
self.name = LocalizedText.objects.create(name='Fred', locale='en', type='FULLY_SPECIFIED')
(self.concept1, errors) = create_concept(mnemonic='concept1', user=self.user1, source=self.source1, names=[self.name], descriptions=[self.name])
(self.concept2, errors) = create_concept(mnemonic='concept2', user=self.user1, source=self.source1, names=[self.name], descriptions=[self.name])
(self.concept3, errors) = create_concept(mnemonic='concept3', user=self.user1, source=self.source2, names=[self.name], descriptions=[self.name])
(self.concept4, errors) = create_concept(mnemonic='concept4', user=self.user1, source=self.source2, names=[self.name], descriptions=[self.name])
class MappingVersionBaseTest(MappingBaseTest):
def setUp(self):
super(MappingVersionBaseTest, self).setUp()
self.mapping1 = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='versionmapping1',
)
self.mapping1.full_clean()
self.mapping1.save()
class MappingTest(MappingBaseTest):
def test_create_mapping_positive(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(ACCESS_TYPE_VIEW, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertEquals(self.concept2, mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping.get_to_concept_name())
def test_create_mapping_negative__no_created_by(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_updated_by(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_parent(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
errors = Mapping.persist_new(mapping, self.user1)
self.assertTrue(errors)
def test_create_mapping_negative__no_map_type(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_from_concept(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__no_to_concept(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__two_to_concepts(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
to_concept_code='code',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__self_mapping(self):
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept1,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__same_mapping_type1(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
with self.assertRaises(ValidationError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_create_mapping_negative__same_mapping_type2(self):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_source=self.source1,
to_concept_code='code',
to_concept_name='name',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(ACCESS_TYPE_VIEW, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertIsNone(mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals('code', mapping.get_to_concept_code())
self.assertEquals('name', mapping.get_to_concept_name())
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
with self.assertRaises(IntegrityError):
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_source=self.source1,
to_concept_code='code',
to_concept_name='name',
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
def test_mapping_access_changes_with_source(self):
public_access = self.source1.public_access
mapping = Mapping(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
public_access=public_access,
external_id='mapping1',
)
mapping.full_clean()
mapping.save()
self.assertEquals(self.source1.public_access, mapping.public_access)
self.source1.public_access = ACCESS_TYPE_VIEW
self.source1.save()
mapping = Mapping.objects.get(id=mapping.id)
self.assertNotEquals(public_access, self.source1.public_access)
self.assertEquals(self.source1.public_access, mapping.public_access)
@skip('Skip this test until development of map type validation feature is complete.')
def test_create_mapping_negative__invalid_mapping_type(self):
maptypes_source = Source.objects.get(name="MapTypes")
create_concept(self.user1, maptypes_source, concept_class="MapType",names=[create_localized_text("SAME-AS")])
create_concept(self.user1, maptypes_source, concept_class="MapType",names=[create_localized_text("NARROWER-THAN")])
user = create_user()
source = create_source(user)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='XYZQWERT',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertEquals(1, len(errors))
self.assertEquals(errors['names'][0], 'Mapping type should be valid attribute')
@skip('Skip this test until development of map type validation feature is complete.')
def test_create_mapping_positive__valid_mapping_type(self):
maptypes_source = Source.objects.get(name="MapTypes")
create_concept(self.user1, maptypes_source, concept_class="MapType", names=[create_localized_text("SAME-AS")])
create_concept(self.user1, maptypes_source, concept_class="MapType", names=[create_localized_text("NARROWER-THAN")])
user = create_user()
source = create_source(user)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='SAME-AS',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertEquals(0, len(errors))
class MappingVersionTest(MappingVersionBaseTest):
def test_create_mapping_positive(self):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mappingversion1',
versioned_object_id=self.mapping1.id,
mnemonic='tempid',
versioned_object_type=ContentType.objects.get_for_model(Mapping),
)
mapping_version.full_clean()
mapping_version.save()
self.assertTrue(MappingVersion.objects.filter(versioned_object_id = self.mapping1.id).exists())
mapping_version = MappingVersion.objects.get(versioned_object_id = self.mapping1.id)
self.assertEquals(ACCESS_TYPE_VIEW, mapping_version.public_access)
self.assertEquals('user1', mapping_version.created_by)
self.assertEquals('user1', mapping_version.updated_by)
self.assertEquals(self.source1, mapping_version.parent)
self.assertEquals('Same As', mapping_version.map_type)
self.assertEquals(self.concept1, mapping_version.from_concept)
self.assertEquals(self.concept2, mapping_version.to_concept)
self.assertEquals(self.source1, mapping_version.from_source)
self.assertEquals(self.source1.owner_name, mapping_version.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping_version.from_source_name)
self.assertEquals(self.source1, mapping_version.get_to_source())
self.assertEquals(self.source1.owner_name, mapping_version.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping_version.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping_version.get_to_concept_name())
def test_create_mapping_negative__no_created_by(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping111',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_updated_by(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_parent(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_map_type(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_from_concept(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
to_concept=self.concept2,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_create_mapping_negative__no_version_object(self):
with self.assertRaises(ValidationError):
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
def test_mapping_access_changes_with_source(self):
public_access = self.source1.public_access
mapping_version = MappingVersion(
created_by=self.user1,
updated_by=self.user1,
parent=self.source1,
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
public_access=public_access,
external_id='mapping1',
versioned_object_id=self.mapping1.id,
versioned_object_type=ContentType.objects.get_for_model(Mapping),
mnemonic='tempid'
)
mapping_version.full_clean()
mapping_version.save()
self.assertEquals(self.source1.public_access, mapping_version.public_access)
self.source1.public_access = ACCESS_TYPE_VIEW
self.source1.save()
def test_collections_ids(self):
kwargs = {
'parent_resource': self.userprofile1
}
collection = Collection(
name='collection2',
mnemonic='collection2',
full_name='Collection Two',
collection_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
description='This is the second test collection'
)
Collection.persist_new(collection, self.user1, **kwargs)
source = Source(
name='source',
mnemonic='source',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source'
)
kwargs = {
'parent_resource': self.org1
}
Source.persist_new(source, self.user1, **kwargs)
(concept1, errors) = create_concept(mnemonic='concept12', user=self.user1, source=source)
(from_concept, errors) = create_concept(mnemonic='fromConcept', user=self.user1, source=source)
(to_concept, errors) = create_concept(mnemonic='toConcept', user=self.user1, source=source)
mapping = Mapping(
map_type='Same As',
from_concept=from_concept,
to_concept=to_concept,
external_id='mapping',
)
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
initial_mapping_version = MappingVersion.objects.get(versioned_object_id=mapping.id)
new_mapping_version = MappingVersion.for_mapping(mapping)
new_mapping_version.mnemonic = 98
new_mapping_version.save()
from_concept_reference = '/orgs/org1/sources/source/concepts/' + Concept.objects.get(mnemonic=from_concept.mnemonic).mnemonic + '/'
concept1_reference = '/orgs/org1/sources/source/concepts/' + Concept.objects.get(mnemonic=concept1.mnemonic).mnemonic + '/'
mapping = Mapping.objects.filter()[1]
references = [concept1_reference, from_concept_reference, mapping.uri, initial_mapping_version.uri]
collection.expressions = references
collection.full_clean()
collection.save()
self.assertEquals(initial_mapping_version.collection_ids, [collection.id])
self.assertEquals(new_mapping_version.collection_ids, [collection.id])
def test_collections_version_ids(self):
kwargs = {
'parent_resource': self.userprofile1
}
collection = Collection(
name='collection2',
mnemonic='collection2',
full_name='Collection Two',
collection_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.collection2.com',
description='This is the second test collection'
)
Collection.persist_new(collection, self.user1, **kwargs)
source = Source(
name='source',
mnemonic='source',
full_name='Source One',
source_type='Dictionary',
public_access=ACCESS_TYPE_EDIT,
default_locale='en',
supported_locales=['en'],
website='www.source1.com',
description='This is the first test source'
)
kwargs = {
'parent_resource': self.org1
}
Source.persist_new(source, self.user1, **kwargs)
(concept1, errors) = create_concept(mnemonic='concept12', user=self.user1, source=source)
(from_concept, errors) = create_concept(mnemonic='fromConcept', user=self.user1, source=source)
(to_concept, errors) = create_concept(mnemonic='toConcept', user=self.user1, source=source)
mapping = Mapping(
map_type='Same As',
from_concept=from_concept,
to_concept=to_concept,
external_id='mapping',
)
kwargs = {
'parent_resource': source,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.filter()[1]
mapping_reference = '/orgs/org1/sources/source/mappings/' + mapping.id + '/'
references = [mapping_reference]
collection.expressions = references
collection.full_clean()
collection.save()
mapping_version = MappingVersion.objects.filter()[0]
collection_version = CollectionVersion(
name='version1',
mnemonic='version1',
versioned_object=collection,
released=True,
created_by=self.user1,
updated_by=self.user1,
mappings=[mapping_version.id]
)
collection_version.full_clean()
collection_version.save()
self.assertEquals(mapping_version.collection_version_ids, [collection_version.id])
class MappingClassMethodsTest(MappingBaseTest):
def test_persist_new_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(self.source1.public_access, mapping.public_access)
self.assertEquals('user1', mapping.created_by)
self.assertEquals('user1', mapping.updated_by)
self.assertEquals(self.source1, mapping.parent)
self.assertEquals('Same As', mapping.map_type)
self.assertEquals(self.concept1, mapping.from_concept)
self.assertEquals(self.concept2, mapping.to_concept)
self.assertEquals(self.source1, mapping.from_source)
self.assertEquals(self.source1.owner_name, mapping.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping.from_source_name)
self.assertEquals(self.source1, mapping.get_to_source())
self.assertEquals(self.source1.owner_name, mapping.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping.get_to_concept_name())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_version_created_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(MappingVersion.objects.filter(versioned_object_id=mapping.id, is_latest_version=True).exists())
mapping_version = MappingVersion.objects.get(versioned_object_id=mapping.id, is_latest_version=True)
self.assertEquals(self.source1.public_access, mapping_version.public_access)
self.assertEquals('user1', mapping_version.created_by)
self.assertEquals('user1', mapping_version.updated_by)
self.assertEquals(self.source1, mapping_version.parent)
self.assertEquals('Same As', mapping_version.map_type)
self.assertEquals(self.concept1, mapping_version.from_concept)
self.assertEquals(self.concept2, mapping_version.to_concept)
self.assertEquals(self.source1, mapping_version.from_source)
self.assertEquals(self.source1.owner_name, mapping_version.from_source_owner)
self.assertEquals(self.source1.mnemonic, mapping_version.from_source_name)
self.assertEquals(self.source1, mapping_version.get_to_source())
self.assertEquals(self.source1.owner_name, mapping_version.to_source_owner)
self.assertEquals(self.concept2.mnemonic, mapping_version.get_to_concept_code())
self.assertEquals(self.concept2.display_name, mapping_version.get_to_concept_name())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_negative__no_creator(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, None, **kwargs)
self.assertEquals(1, len(errors))
self.assertTrue('non_field_errors' in errors)
non_field_errors = errors['non_field_errors']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('creator' in non_field_errors[0])
self.assertFalse(Mapping.objects.filter(external_id='mapping1').exists())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(0, len(source_version.mappings))
def test_persist_new_negative__no_parent(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(1, len(errors))
self.assertTrue('non_field_errors' in errors)
non_field_errors = errors['non_field_errors']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('parent' in non_field_errors[0])
self.assertFalse(Mapping.objects.filter(external_id='mapping1').exists())
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(0, len(source_version.mappings))
def test_persist_new_negative__same_mapping(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
mv = MappingVersion.objects.get(versioned_object_id=mapping.id)
self.assertTrue(mv.id in source_version.mappings)
# Repeat with same concepts
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping2',
)
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(1, len(errors))
self.assertEquals(1, len(errors))
self.assertTrue('__all__' in errors)
non_field_errors = errors['__all__']
self.assertEquals(1, len(non_field_errors))
self.assertTrue('already exists' in non_field_errors[0])
self.assertEquals(1, len(source_version.mappings))
def test_persist_new_positive__same_mapping_different_source(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
# Repeat with same concepts
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping2',
)
kwargs = {
'parent_resource': self.source2,
}
source_version = SourceVersion.get_latest_version_of(self.source2)
self.assertEquals(0, len(source_version.mappings))
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping2').exists())
mapping = Mapping.objects.get(external_id='mapping2')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_persist_new_positive__earlier_source_version(self):
version1 = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(version1.mappings))
version2 = SourceVersion.for_base_object(self.source1, label='version2')
version2.save()
self.assertEquals(0, len(version2.mappings))
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
kwargs = {
'parent_resource': self.source1,
'parent_resource_version': version1,
}
errors = Mapping.persist_new(mapping, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertTrue(Mapping.objects.filter(external_id='mapping1').exists())
mapping = Mapping.objects.get(external_id='mapping1')
version1 = SourceVersion.objects.get(id=version1.id)
self.assertEquals(1, len(version1.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in version1.mappings)
version2 = SourceVersion.objects.get(id=version2.id)
self.assertEquals(0, len(version2.mappings))
latest_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(latest_version.mappings))
def test_persist_persist_changes_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
to_concept = mapping.to_concept
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
mapping.to_concept = self.concept3
errors = Mapping.persist_changes(mapping, self.user1)
self.assertEquals(0, len(errors))
mapping = Mapping.objects.get(external_id='mapping1')
self.assertEquals(self.concept3, mapping.to_concept)
self.assertNotEquals(to_concept, mapping.to_concept)
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
mv = MappingVersion.objects.filter(versioned_object_id=mapping.id)
self.assertTrue(mv[1].id in source_version.mappings)
def test_persist_persist_changes_negative__no_updated_by(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
mapping.to_concept = self.concept3
errors = Mapping.persist_changes(mapping, None)
self.assertEquals(1, len(errors))
self.assertTrue('updated_by' in errors)
source_version = SourceVersion.objects.get(id=source_version.id)
self.assertEquals(1, len(source_version.mappings))
self.assertTrue(MappingVersion.objects.get(versioned_object_id=mapping.id).id in source_version.mappings)
def test_retire_positive(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertFalse(mapping.retired)
Mapping.retire(mapping, self.user1)
self.assertTrue(mapping.retired)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
mappingVersion=MappingVersion.objects.get(versioned_object_id=mapping.mnemonic, mnemonic=2)
self.assertTrue(mappingVersion.retired)
def test_retire_negative(self):
mapping = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
retired=True,
)
kwargs = {
'parent_resource': self.source1,
}
Mapping.persist_new(mapping, self.user1, **kwargs)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
result=Mapping.retire(mapping, self.user1)
self.assertFalse(result)
mapping = Mapping.objects.get(external_id='mapping1')
self.assertTrue(mapping.retired)
def test_edit_mapping_make_new_version_positive(self):
mapping1 = Mapping(
map_type='Same As',
from_concept=self.concept1,
to_concept=self.concept2,
external_id='mapping1',
)
source_version = SourceVersion.get_latest_version_of(self.source1)
self.assertEquals(0, len(source_version.mappings))
kwargs = {
'parent_resource': self.source1,
}
errors = Mapping.persist_new(mapping1, self.user1, **kwargs)
self.assertEquals(0, len(errors))
self.assertEquals(1,len(MappingVersion.objects.filter(versioned_object_id=mapping1.id)))
mapping1.map_type='BROADER_THAN'
Mapping.persist_changes(mapping1, self.user1)
self.assertEquals(2, len(MappingVersion.objects.filter(versioned_object_id=mapping1.id)))
old_version = MappingVersion.objects.get(versioned_object_id=mapping1.id, is_latest_version=False)
new_version= MappingVersion.objects.get(versioned_object_id=mapping1.id, is_latest_version=True)
self.assertFalse(old_version.is_latest_version)
self.assertTrue(new_version.is_latest_version)
self.assertEquals(new_version.map_type,'BROADER_THAN')
self.assertEquals(old_version.map_type,'Same As')
class OpenMRSMappingValidationTest(MappingBaseTest):
def test_create_same_from_and_to_pair_with_different_map_types_should_throw_validation_error(self):
user = create_user()
source = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user, source)
(concept2, _) = create_concept(user, source)
create_mapping(user, source, concept1, concept2, "Same As")
mapping = Mapping(
created_by=user,
updated_by=user,
parent=source,
map_type='Is Subset of',
from_concept=concept1,
to_concept=concept2,
public_access=ACCESS_TYPE_VIEW,
)
kwargs = {
'parent_resource': source,
}
errors = Mapping.persist_new(mapping, user, **kwargs)
self.assertTrue(OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS in errors["__all__"])
def test_update_different_from_and_to_pairs_to_same_from_and_to_pairs_should_throw_validation_error(self):
user = create_user()
source1 = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
source2 = create_source(user, validation_schema=CUSTOM_VALIDATION_SCHEMA_OPENMRS)
(concept1, _) = create_concept(user, source1)
(concept2, _) = create_concept(user, source2)
(concept3, _) = create_concept(user, source2)
create_mapping(user, source1, concept1, concept2, "Same As")
mapping = create_mapping(user, source1, concept2, concept3, "Same As")
mapping.from_concept = concept1
mapping.to_concept = concept2
mapping.map_type = "Different"
errors = Mapping.persist_changes(mapping, user)
self.assertTrue(
OPENMRS_SINGLE_MAPPING_BETWEEN_TWO_CONCEPTS in errors["__all__"])
| mpl-2.0 | -7,807,986,652,242,528,000 | 39.451335 | 152 | 0.617366 | false |
ajose01/rethinkdb | test/rql_test/connections/http_support/werkzeug/contrib/iterio.py | 147 | 10718 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
from werkzeug._compat import implements_iterator
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b'\n'
return u'\n'
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=''):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=''):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=''):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
| agpl-3.0 | -6,228,348,602,934,988,000 | 29.976879 | 78 | 0.564844 | false |
denny820909/builder | lib/python2.7/site-packages/python_dateutil-1.5-py2.7.egg/dateutil/tz.py | 270 | 32741 | """
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| mit | 4,681,741,642,012,700,000 | 33.427971 | 77 | 0.491708 | false |
fuzeman/Catalytic | deluge/ui/console/commands/help.py | 8 | 3063 | # help.py
#
# Copyright (C) 2008-2009 Ido Abramovich <[email protected]>
# Copyright (C) 2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from twisted.internet import defer
from deluge.ui.console.main import BaseCommand
import deluge.ui.console.colors as colors
import deluge.component as component
class Command(BaseCommand):
"""displays help on other commands"""
usage = "Usage: help [command]"
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
self._commands = self.console._commands
deferred = defer.succeed(True)
if args:
if len(args) > 1:
self.console.write(usage)
return deferred
try:
cmd = self._commands[args[0]]
except KeyError:
self.console.write("{!error!}Unknown command %r" % args[0])
return deferred
try:
parser = cmd.create_parser()
self.console.write(parser.format_help())
except AttributeError, e:
self.console.write(cmd.__doc__ or 'No help for this command')
else:
max_length = max( len(k) for k in self._commands)
self.console.set_batch_write(True)
for cmd in sorted(self._commands):
self.console.write("{!info!}" + cmd + "{!input!} - " + self._commands[cmd].__doc__ or '')
self.console.write(" ")
self.console.write('For help on a specific command, use "<command> --help"')
self.console.set_batch_write(False)
return deferred
def complete(self, line):
return [x for x in component.get("ConsoleUI")._commands if x.startswith(line)]
| gpl-3.0 | 8,266,724,967,275,697,000 | 38.779221 | 105 | 0.652628 | false |
tcffisher/namebench | libnamebench/better_webbrowser.py | 175 | 4191 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = '[email protected] (Thomas Stromberg)'
import os.path
import subprocess
import sys
import traceback
import webbrowser
import util
def output(string):
print string
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
Args:
url: string
Returns:
tuple of: (executable, arg1, arg2, ...)
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
browser_type = None
try:
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
browser_type = 'user'
except WindowsError:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'Software\Classes\http\shell\open\command')
browser_type = 'machine'
except:
return False
cmd = _winreg.EnumValue(key, 0)[1]
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
if not os.path.exists(executable):
output('$ Default HTTP browser does not exist: %s' % executable)
return False
else:
output('$ %s HTTP handler: %s' % (browser_type, executable))
return (executable, url)
def open(url):
"""Opens a URL, overriding the normal webbrowser.open methods for sanity."""
try:
webbrowser.open(url, new=1, autoraise=True)
# If the user is missing the osascript binary - see
# http://code.google.com/p/namebench/issues/detail?id=88
except:
output('Failed to open: [%s]: %s' % (url, util.GetLastExceptionString()))
if os.path.exists('/usr/bin/open'):
try:
output('trying open: %s' % url)
p = subprocess.Popen(('open', url))
p.wait()
except:
output('open did not seem to work: %s' % util.GetLastExceptionString())
elif sys.platform[:3] == 'win':
try:
output('trying default Windows controller: %s' % url)
controller = webbrowser.get('windows-default')
controller.open_new(url)
except:
output('WindowsController did not work: %s' % util.GetLastExceptionString())
# *NOTE*: EVIL IMPORT SIDE EFFECTS AHEAD!
#
# If we are running on Windows, register the WindowsHttpDefault class.
if sys.platform[:3] == 'win':
import _winreg
# We don't want to load this class by default, because Python 2.4 doesn't have BaseBrowser.
class WindowsHttpDefault(webbrowser.BaseBrowser):
"""Provide an alternate open class for Windows user, using the http handler."""
def open(self, url, new=0, autoraise=1):
command_args = create_win32_http_cmd(url)
if not command_args:
output('$ Could not find HTTP handler')
return False
output('command_args:')
output(command_args)
# Avoid some unicode path issues by moving our current directory
old_pwd = os.getcwd()
os.chdir('C:\\')
try:
_unused = subprocess.Popen(command_args)
os.chdir(old_pwd)
return True
except:
traceback.print_exc()
output('$ Failed to run HTTP handler, trying next browser.')
os.chdir(old_pwd)
return False
webbrowser.register('windows-http', WindowsHttpDefault, update_tryorder=-1)
| apache-2.0 | -7,149,999,936,689,766,000 | 30.238462 | 93 | 0.644953 | false |
aesteve/vertx-web | vertx-web/src/test/sockjs-protocol/ws4py/__init__.py | 4 | 2689 | # -*- coding: utf-8 -*-
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of ws4py nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import logging.handlers as handlers
__author__ = "Sylvain Hellegouarch"
__version__ = "0.5.1"
__all__ = ['WS_KEY', 'WS_VERSION', 'configure_logger', 'format_addresses']
WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
WS_VERSION = (8, 13)
def configure_logger(stdout=True, filepath=None, level=logging.INFO):
logger = logging.getLogger('ws4py')
logger.setLevel(level)
logfmt = logging.Formatter("[%(asctime)s] %(levelname)s %(message)s")
if filepath:
h = handlers.RotatingFileHandler(filepath, maxBytes=10485760, backupCount=3)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
if stdout:
import sys
h = logging.StreamHandler(sys.stdout)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
return logger
def format_addresses(ws):
me = ws.local_address
peer = ws.peer_address
if isinstance(me, tuple) and isinstance(peer, tuple):
me_ip, me_port = ws.local_address
peer_ip, peer_port = ws.peer_address
return "[Local => %s:%d | Remote => %s:%d]" % (me_ip, me_port, peer_ip, peer_port)
return "[Bound to '%s']" % me
| apache-2.0 | 3,067,784,132,048,281,600 | 39.134328 | 90 | 0.716995 | false |
yyamano/RESTx | src/python/restxclient/restx_resource.py | 2 | 5575 | """
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Definition of the L{RestxResource} class.
"""
from restxclient.restx_client_exception import RestxClientException
from restxclient.restx_parameter import RestxParameter
from restxclient.restx_service import RestxAccessibleService
class RestxResource(object):
"""
Represents information about a resource on a RESTx server.
This representation can be used by clients to find out about
component capabilities and also as a starting point to create
new resources, by utilizing the get_resource_template() function.
"""
# The keys to the component's meta data dictionary.
__NAME_KEY = "name"
__DESC_KEY = "desc"
__URI_KEY = "uri"
__SERVICES_KEY = "services"
__server = None # Reference to the server on which we reside (RestxServer)
__name = None # Name of this resource
__description = None # Description of this resource
__uri = None # URI of this resource
__services = None # Dictionary of service definitions
def __init__(self, server, rdesc):
"""
Create a new resource representation in memomory.
@param server: The RESTx server on which the resource resides.
@type server: L{RestxServer}
@param rdesc: Dictionary describing the server resource. This
is the dictionary returned by the server when a
reource URI is accessed.
@type rdesc: dict
"""
self.__server = server
try:
self.__name = rdesc[self.__NAME_KEY]
self.__description = rdesc[self.__DESC_KEY]
self.__uri = rdesc[self.__URI_KEY]
sdict = rdesc[self.__SERVICES_KEY]
# Parse the service dictionary and attempt to translate
# this to a dictionary of proper RestxAccessibleService objects.
self.__services = dict()
for sname, sdef in sdict.items():
self.__services[sname] = RestxAccessibleService(self, sname, sdef)
except KeyError, e:
raise RestxClientException("Server error: Expected key '%s' missing in definition of resource '%s'." % (str(e), self.__name))
def __str__(self):
"""
Return a string representation of this resource.
"""
buf = \
"""RestxResource: %s
Description: %s
URI: %s
Services:""" % (self.__name, self.__description, self.__uri)
if self.__services:
for sname, sdef in self.__services.items():
buf += "\n----------------\n" + str(sdef)
return buf
def get_name(self):
"""
Return the name of the resource.
@return: Name of resource.
@rtype: string
"""
return self.__name
def get_description(self):
"""
Return the description of the resource.
@return: Description of the resource.
@rtype: string
"""
return self.__description
def get_uri(self):
"""
Return the URI of the resource.
@return: URI of the resource.
@rtype: string
"""
return self.__uri
def get_server(self):
"""
Return the L{RestxServer} object of the server on which this resource lives.
@return: The server of this resource.
@rtype: L{RestxServer}
"""
return self.__server
def get_all_services(self):
"""
Return all services defined for this resource.
@return: Dictionary of all services.
@rtype: dict of L{RestxAccessibleService}
"""
return self.__services
def get_service(self, name):
"""
Return one service of this resource.
@param name: Name of the service.
@type name: string
@return: Dictionary of service definition.
@rtype: L{RestxAccessibleService}
"""
try:
return self.__services[name]
except KeyError:
raise RestxClientException("Service '%s' not defined." % name)
def delete(self):
"""
Delete the resource on the server.
"""
self.__server._send(self.__uri, method="DELETE", status=200)
#
# For convenience, we offer read access to several
# elements via properties.
#
name = property(get_name, None)
description = property(get_description, None)
uri = property(get_uri, None)
server = property(get_server, None)
| gpl-3.0 | 4,342,963,468,616,502,300 | 30.676136 | 137 | 0.575964 | false |
provaleks/o8 | addons/report_webkit/__init__.py | 382 | 1593 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import header
import company
import report_helper
import webkit_report
import ir_report
import wizard
import convert
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,497,249,088,537,903,000 | 37.853659 | 80 | 0.699309 | false |
openstack/vitrage | vitrage/evaluator/actions/priority_tools.py | 1 | 1972 | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import VertexProperties as VProps
from vitrage.entity_graph.mappings.datasource_info_mapper \
import DEFAULT_INFO_MAPPER
from vitrage.evaluator.template_fields import TemplateFields
class BaselineTools(object):
@staticmethod
def get_score(action_info):
return 1 # no priorities
@classmethod
def get_extra_info(cls, action_specs):
return None
class RaiseAlarmTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
severity = action_info.specs.properties[TemplateFields.SEVERITY]
return self.scores.get(severity.upper(), 0)
@classmethod
def get_extra_info(cls, action_specs):
return action_specs.properties[TemplateFields.ALARM_NAME]
class SetStateTools(object):
def __init__(self, scores):
self.scores = scores
def get_score(self, action_info):
state = action_info.specs.properties[TemplateFields.STATE].upper()
target_resource = action_info.specs.targets[TemplateFields.TARGET]
target_vitrage_type = target_resource[VProps.VITRAGE_TYPE]
score_name = target_vitrage_type \
if target_vitrage_type in self.scores else DEFAULT_INFO_MAPPER
return self.scores[score_name].get(state, 0)
@classmethod
def get_extra_info(cls, action_specs):
return None
| apache-2.0 | -6,377,092,506,443,881,000 | 31.866667 | 75 | 0.713489 | false |
jasonmccampbell/scipy-refactor | scipy/sparse/linalg/dsolve/linsolve.py | 8 | 9161 | from warnings import warn
from numpy import asarray
from scipy.sparse import isspmatrix_csc, isspmatrix_csr, isspmatrix, \
SparseEfficiencyWarning, csc_matrix
import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
import umfpack
noScikit = True
isUmfpack = hasattr( umfpack, 'UMFPACK_OK' )
useUmfpack = True
__all__ = [ 'use_solver', 'spsolve', 'splu', 'spilu', 'factorized' ]
def use_solver( **kwargs ):
"""
Valid keyword arguments with defaults (other ignored):
useUmfpack = True
assumeSortedIndices = False
The default sparse solver is umfpack when available. This can be changed by
passing useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass assumeSortedIndices=True
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
if isUmfpack:
umfpack.configure( **kwargs )
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b
"""
if isspmatrix( b ):
b = b.toarray()
if b.ndim > 1:
if max( b.shape ) == b.size:
b = b.squeeze()
else:
raise ValueError("rhs must be a vector (has shape %s)" % (b.shape,))
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires CSC or CSR matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.size:
raise ValueError("matrix - rhs size mismatch (%s - %s)"
% (A.shape, b.size))
use_umfpack = use_umfpack and useUmfpack
if isUmfpack and use_umfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
b = asarray(b, dtype=A.dtype).reshape(-1)
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
return umf.linsolve( umfpack.UMFPACK_A, A, b,
autoTranspose = True )
else:
if isspmatrix_csc(A):
flag = 1 # CSC format
elif isspmatrix_csr(A):
flag = 0 # CSR format
else:
A = csc_matrix(A)
flag = 1
b = asarray(b, dtype=A.dtype)
options = dict(ColPerm=permc_spec)
return _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag,
options=options)[0]
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [SLU]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [SLU]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [SLU]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [SLU]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix A.
The resulting object is an approximation to the inverse of A.
Parameters
----------
A
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
milu : str, optional
Which version of modified ILU to use. (Choices: ``silu``,
``smilu_1``, ``smilu_2`` (default), ``smilu_3``.)
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase ``fill_factor`` AND decrease ``drop_tol``.
This function uses the SuperLU library.
References
----------
.. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized( A ):
"""
Return a fuction for solving a sparse linear system, with A pre-factorized.
Example:
solve = factorized( A ) # Makes LU decomposition.
x1 = solve( rhs1 ) # Uses the LU factors.
x2 = solve( rhs2 ) # Uses again the LU factors.
"""
if isUmfpack and useUmfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
# Make LU decomposition.
umf.numeric( A )
def solve( b ):
return umf.solve( umfpack.UMFPACK_A, A, b, autoTranspose = True )
return solve
else:
return splu( A ).solve
| bsd-3-clause | -4,836,208,291,962,912,000 | 31.371025 | 82 | 0.606484 | false |
Skoda091/alfred-deepl | lib/urllib3/fields.py | 288 | 5943 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit | 3,889,735,669,155,261,400 | 32.38764 | 88 | 0.589096 | false |
xidui/shadowsocks | shadowsocks/crypto/table.py | 1044 | 8108 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
| apache-2.0 | 6,492,421,710,571,807,000 | 45.597701 | 79 | 0.546374 | false |
PennyQ/glue-3d-viewer | glue_vispy_viewers/extern/vispy/scene/cameras/turntable.py | 20 | 5029 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .perspective import Base3DRotationCamera
class TurntableCamera(Base3DRotationCamera):
""" 3D camera class that orbits around a center point while
maintaining a view on a center point.
For this camera, the ``scale_factor`` indicates the zoom level, and
the ``center`` indicates the position to put at the center of the
view.
Parameters
----------
fov : float
Field of view. Zero (default) means orthographic projection.
elevation : float
Elevation angle in degrees. Positive angles place the camera
above the cente point, negative angles place the camera below
the center point.
azimuth : float
Azimuth angle in degrees. Zero degrees places the camera on the
positive x-axis, pointing in the negative x direction.
roll : float
Roll angle in degrees
distance : float | None
The distance of the camera from the rotation point (only makes sense
if fov > 0). If None (default) the distance is determined from the
scale_factor and fov.
**kwargs : dict
Keyword arguments to pass to `BaseCamera`.
Notes
-----
Interaction:
* LMB: orbits the view around its center point.
* RMB or scroll: change scale_factor (i.e. zoom level)
* SHIFT + LMB: translate the center point
* SHIFT + RMB: change FOV
"""
_state_props = Base3DRotationCamera._state_props + ('elevation',
'azimuth', 'roll')
def __init__(self, fov=0.0, elevation=30.0, azimuth=30.0, roll=0.0,
distance=None, **kwargs):
super(TurntableCamera, self).__init__(fov=fov, **kwargs)
# Set camera attributes
self.azimuth = azimuth
self.elevation = elevation
self.roll = roll # interaction not implemented yet
self.distance = distance # None means auto-distance
@property
def elevation(self):
""" The angle of the camera in degrees above the horizontal (x, z)
plane.
"""
return self._elevation
@elevation.setter
def elevation(self, elev):
elev = float(elev)
self._elevation = min(90, max(-90, elev))
self.view_changed()
@property
def azimuth(self):
""" The angle of the camera in degrees around the y axis. An angle of
0 places the camera within the (y, z) plane.
"""
return self._azimuth
@azimuth.setter
def azimuth(self, azim):
azim = float(azim)
while azim < -180:
azim += 360
while azim > 180:
azim -= 360
self._azimuth = azim
self.view_changed()
@property
def roll(self):
""" The angle of the camera in degrees around the z axis. An angle of
0 places puts the camera upright.
"""
return self._roll
@roll.setter
def roll(self, roll):
roll = float(roll)
while roll < -180:
roll += 360
while roll > 180:
roll -= 360
self._roll = roll
self.view_changed()
def orbit(self, azim, elev):
""" Orbits the camera around the center position.
Parameters
----------
azim : float
Angle in degrees to rotate horizontally around the center point.
elev : float
Angle in degrees to rotate vertically around the center point.
"""
self.azimuth += azim
self.elevation = np.clip(self.elevation + elev, -90, 90)
self.view_changed()
def _update_rotation(self, event):
"""Update rotation parmeters based on mouse movement"""
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
if self._event_value is None:
self._event_value = self.azimuth, self.elevation
self.azimuth = self._event_value[0] - (p2 - p1)[0] * 0.5
self.elevation = self._event_value[1] + (p2 - p1)[1] * 0.5
def _rotate_tr(self):
"""Rotate the transformation matrix based on camera parameters"""
up, forward, right = self._get_dim_vectors()
self.transform.rotate(self.elevation, -right)
self.transform.rotate(self.azimuth, up)
def _dist_to_trans(self, dist):
"""Convert mouse x, y movement into x, y, z translations"""
rae = np.array([self.roll, self.azimuth, self.elevation]) * np.pi / 180
sro, saz, sel = np.sin(rae)
cro, caz, cel = np.cos(rae)
dx = (+ dist[0] * (cro * caz + sro * sel * saz)
+ dist[1] * (sro * caz - cro * sel * saz))
dy = (+ dist[0] * (cro * saz - sro * sel * caz)
+ dist[1] * (sro * saz + cro * sel * caz))
dz = (- dist[0] * sro * cel + dist[1] * cro * cel)
return dx, dy, dz
| bsd-2-clause | 7,358,991,475,045,603,000 | 32.526667 | 79 | 0.583217 | false |
benesch/pip | pip/_vendor/requests/packages/urllib3/util/connection.py | 365 | 4744 | from __future__ import absolute_import
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
| mit | 7,948,240,712,715,729,000 | 31.944444 | 82 | 0.636172 | false |
kevinlondon/httpie | httpie/output/formatters/xml.py | 51 | 1959 | from __future__ import absolute_import
import re
from xml.etree import ElementTree
from httpie.plugins import FormatterPlugin
DECLARATION_RE = re.compile('<\?xml[^\n]+?\?>', flags=re.I)
DOCTYPE_RE = re.compile('<!DOCTYPE[^\n]+?>', flags=re.I)
DEFAULT_INDENT = 4
def indent(elem, indent_text=' ' * DEFAULT_INDENT):
"""
In-place prettyprint formatter
C.f. http://effbot.org/zone/element-lib.htm#prettyprint
"""
def _indent(elem, level=0):
i = "\n" + level * indent_text
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent_text
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
return _indent(elem)
class XMLFormatter(FormatterPlugin):
# TODO: tests
def format_body(self, body, mime):
if 'xml' in mime:
# FIXME: orig NS names get forgotten during the conversion, etc.
try:
root = ElementTree.fromstring(body.encode('utf8'))
except ElementTree.ParseError:
# Ignore invalid XML errors (skips attempting to pretty print)
pass
else:
indent(root)
# Use the original declaration
declaration = DECLARATION_RE.match(body)
doctype = DOCTYPE_RE.match(body)
body = ElementTree.tostring(root, encoding='utf-8')\
.decode('utf8')
if doctype:
body = '%s\n%s' % (doctype.group(0), body)
if declaration:
body = '%s\n%s' % (declaration.group(0), body)
return body
| bsd-3-clause | -6,741,550,168,998,341,000 | 31.114754 | 78 | 0.533435 | false |
tmenjo/cinder-2015.1.1 | cinder/flow_utils.py | 6 | 2961 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from cinder import exception
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(CinderTask, self).__init__(_make_task_name(self.__class__,
addons),
**kwargs)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger)
def _format_failure(self, fail):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(DynamicLogListener, self)._format_failure(fail)
| apache-2.0 | 5,150,556,213,102,259,000 | 36.961538 | 79 | 0.645728 | false |
Maistho/CouchPotatoServer | couchpotato/core/media/_base/media/main.py | 65 | 21493 | from datetime import timedelta
import time
import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound, RecordDeleted
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
log = CPLog(__name__)
class MediaPlugin(MediaBase):
_database = {
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_tag': MediaTagIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
'media_children': MediaChildrenIndex,
}
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'},
'search': {'desc': 'Search media title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any media returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView, priority = 100)
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('app.load', self.cleanupFaults)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
addEvent('media.with_identifiers', self.withIdentifiers)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
addEvent('media.tag', self.tag)
addEvent('media.untag', self.unTag)
# Wrongly tagged media files
def cleanupFaults(self):
medias = fireEvent('media.with_status', 'ignored', single = True) or []
db = get_db()
for media in medias:
try:
media['status'] = 'done'
db.update(media)
except:
pass
def refresh(self, id = '', **kwargs):
handlers = []
ids = splitString(id)
for x in ids:
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def createRefreshHandler(self, media_id):
try:
media = get_db().get('id', media_id)
event = '%s.update' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
try:
db = get_db()
imdb_id = getImdb(str(media_id))
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
except (RecordNotFound, RecordDeleted):
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def withStatus(self, status, types = None, with_doc = True):
db = get_db()
if types and not isinstance(types, (list, tuple)):
types = [types]
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s):
if with_doc:
try:
doc = db.get('id', ms['_id'])
if types and doc.get('type') not in types:
continue
yield doc
except (RecordDeleted, RecordNotFound):
log.debug('Record not found, skipping: %s', ms['_id'])
except (ValueError, EOFError):
fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
else:
yield ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
return False
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
if with_tags and not isinstance(with_tags, (list, tuple)):
with_tags = [with_tags]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = list(all_media_ids)
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Add search filters
if starts_with:
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Add tag filter
if with_tags:
filter_by['with_tags'] = set()
for tag in with_tags:
for x in db.get_many('media_tag', tag):
filter_by['with_tags'].add(x['_id'])
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
del filter_by['media_status']
del filter_by['release_status']
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
total_count = len(media_ids)
if total_count == 0:
return 0, []
offset = 0
limit = -1
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
# List movies based on title order
medias = []
for m in db.all('media_title'):
media_id = m['_id']
if media_id not in media_ids: continue
if offset > 0:
offset -= 1
continue
media = fireEvent('media.get', media_id, single = True)
# Skip if no media has been found
if not media:
continue
# Merge releases with movie dict
medias.append(media)
# remove from media ids
media_ids.remove(media_id)
if len(media_ids) == 0 or len(medias) == limit: break
return total_count, medias
def listView(self, **kwargs):
total_movies, movies = self.list(
types = splitString(kwargs.get('type')),
status = splitString(kwargs.get('status')),
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
with_tags = splitString(kwargs.get('with_tags')),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList, docs = {
'desc': 'List media',
'params': {
'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
'search': {'desc': 'Search ' + media_type + ' title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any """ + media_type + """s returned or not,
'media': array, media found,
}"""}
})
def availableChars(self, types = None, status = None, release_status = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = all_media_ids
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
chars = set()
for x in db.all('media_startswith'):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 27:
break
return list(chars)
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
try:
db = get_db()
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
for release in media_releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.get('status') != 'done':
db.delete(release)
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done' or media.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
# Remove profile (no use for in manage)
if new_media_status == 'done':
media['profile_id'] = None
db.update(media)
fireEvent('media.untag', media['_id'], 'recent', single = True)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
if deleted:
fireEvent('notify.frontend', type = 'media.deleted', data = media)
except:
log.error('Failed deleting media: %s', traceback.format_exc())
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs)
addApiView('%s.delete' % media_type, tempDelete, docs = {
'desc': 'Delete a ' + media_type + ' from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
}
})
def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
try:
db = get_db()
m = db.get('id', media_id)
previous_status = m['status']
log.debug('Changing status for %s', getTitle(m))
if not m['profile_id']:
m['status'] = 'done'
else:
m['status'] = 'active'
try:
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
done_releases = [release for release in media_releases if release.get('status') == 'done']
if done_releases:
# Check if we are finished with the media
for release in done_releases:
if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
m['status'] = 'done'
break
elif previous_status == 'done':
m['status'] = 'done'
except RecordNotFound:
log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
m['status'] = previous_status
# Only update when status has changed
if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
db.update(m)
# Tag media as recent
if tag_recent:
self.tag(media_id, 'recent', update_edited = True)
return m['status']
except:
log.error('Failed restatus: %s', traceback.format_exc())
def tag(self, media_id, tag, update_edited = False):
try:
db = get_db()
m = db.get('id', media_id)
if update_edited:
m['last_edit'] = int(time.time())
tags = m.get('tags') or []
if tag not in tags:
tags.append(tag)
m['tags'] = tags
db.update(m)
return True
except:
log.error('Failed tagging: %s', traceback.format_exc())
return False
def unTag(self, media_id, tag):
try:
db = get_db()
m = db.get('id', media_id)
tags = m.get('tags') or []
if tag in tags:
new_tags = list(set(tags))
new_tags.remove(tag)
m['tags'] = new_tags
db.update(m)
return True
except:
log.error('Failed untagging: %s', traceback.format_exc())
return False
| gpl-3.0 | -9,063,155,015,253,226,000 | 35.803082 | 221 | 0.520821 | false |
TeamEOS/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py | 658 | 4406 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi00
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: obsolete argument. ignored.
strict: obsolete argument. ignored.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug(
'Client\'s opening handshake headers: %r', dict(request.headers_in))
handshakers = []
handshakers.append(
('RFC 6455', hybi.Handshaker(request, dispatcher)))
handshakers.append(
('HyBi 00', hybi00.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException, e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException, e:
raise
except VersionException, e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
| bsd-3-clause | 3,617,459,003,068,414,000 | 39.054545 | 76 | 0.72197 | false |
openstack/python-magnumclient | magnumclient/common/utils.py | 1 | 11677 | # -*- coding: utf-8 -*-
#
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from cryptography.x509.oid import NameOID
from oslo_serialization import base64
from oslo_serialization import jsonutils
from magnumclient import exceptions as exc
from magnumclient.i18n import _
def common_filters(marker=None, limit=None, sort_key=None, sort_dir=None):
"""Generate common filters for any list request.
:param marker: entity ID from which to start returning entities.
:param limit: maximum number of entities to return.
:param sort_key: field to use for sorting.
:param sort_dir: direction of sorting: 'asc' or 'desc'.
:returns: list of string filters.
"""
filters = []
if isinstance(limit, int):
filters.append('limit=%s' % limit)
if marker is not None:
filters.append('marker=%s' % marker)
if sort_key is not None:
filters.append('sort_key=%s' % sort_key)
if sort_dir is not None:
filters.append('sort_dir=%s' % sort_dir)
return filters
def split_and_deserialize(string):
"""Split and try to JSON deserialize a string.
Gets a string with the KEY=VALUE format, split it (using '=' as the
separator) and try to JSON deserialize the VALUE.
:returns: A tuple of (key, value).
"""
try:
key, value = string.split("=", 1)
except ValueError:
raise exc.CommandError(_('Attributes must be a list of '
'PATH=VALUE not "%s"') % string)
try:
value = jsonutils.loads(value)
except ValueError:
pass
return (key, value)
def args_array_to_patch(op, attributes):
patch = []
for attr in attributes:
# Sanitize
if not attr.startswith('/'):
attr = '/' + attr
if op in ['add', 'replace']:
path, value = split_and_deserialize(attr)
if path == "/labels" or path == "/health_status_reason":
a = []
a.append(value)
value = str(handle_labels(a))
patch.append({'op': op, 'path': path, 'value': value})
else:
patch.append({'op': op, 'path': path, 'value': value})
elif op == "remove":
# For remove only the key is needed
patch.append({'op': op, 'path': attr})
else:
raise exc.CommandError(_('Unknown PATCH operation: %s') % op)
return patch
def handle_labels(labels):
labels = format_labels(labels)
if 'mesos_slave_executor_env_file' in labels:
environment_variables_data = handle_json_from_file(
labels['mesos_slave_executor_env_file'])
labels['mesos_slave_executor_env_variables'] = jsonutils.dumps(
environment_variables_data)
return labels
def format_labels(lbls, parse_comma=True):
'''Reformat labels into dict of format expected by the API.'''
if not lbls:
return {}
if parse_comma:
# expect multiple invocations of --labels but fall back
# to either , or ; delimited if only one --labels is specified
if len(lbls) == 1 and lbls[0].count('=') > 1:
lbls = lbls[0].replace(';', ',').split(',')
labels = {}
for lbl in lbls:
try:
(k, v) = lbl.split(('='), 1)
except ValueError:
raise exc.CommandError(_('labels must be a list of KEY=VALUE '
'not %s') % lbl)
if k not in labels:
labels[k] = v
else:
labels[k] += ",%s" % v
return labels
def print_list_field(field):
return lambda obj: ', '.join(getattr(obj, field))
def handle_json_from_file(json_arg):
"""Attempts to read JSON file by the file url.
:param json_arg: May be a file name containing the JSON.
:returns: A list or dictionary parsed from JSON.
"""
try:
with open(json_arg, 'r') as f:
json_arg = f.read().strip()
json_arg = jsonutils.loads(json_arg)
except IOError as e:
err = _("Cannot get JSON from file '%(file)s'. "
"Error: %(err)s") % {'err': e, 'file': json_arg}
raise exc.InvalidAttribute(err)
except ValueError as e:
err = (_("For JSON: '%(string)s', error: '%(err)s'") %
{'err': e, 'string': json_arg})
raise exc.InvalidAttribute(err)
return json_arg
def config_cluster(cluster, cluster_template, cfg_dir, force=False,
certs=None, use_keystone=False):
"""Return and write configuration for the given cluster."""
if cluster_template.coe == 'kubernetes':
return _config_cluster_kubernetes(cluster, cluster_template, cfg_dir,
force, certs, use_keystone)
elif (cluster_template.coe == 'swarm'
or cluster_template.coe == 'swarm-mode'):
return _config_cluster_swarm(cluster, cluster_template, cfg_dir,
force, certs)
def _config_cluster_kubernetes(cluster, cluster_template, cfg_dir,
force=False, certs=None, use_keystone=False):
"""Return and write configuration for the given kubernetes cluster."""
cfg_file = "%s/config" % cfg_dir
if cluster_template.tls_disabled or certs is None:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: %(name)s\n"
" name: %(name)s\n"
"current-context: %(name)s\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: %(name)s'\n"
% {'name': cluster.name, 'api_address': cluster.api_address})
else:
if not use_keystone:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" certificate-authority-data: %(ca)s\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: admin\n"
" name: default\n"
"current-context: default\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: admin\n"
" user:\n"
" client-certificate-data: %(cert)s\n"
" client-key-data: %(key)s\n"
% {'name': cluster.name,
'api_address': cluster.api_address,
'key': base64.encode_as_text(certs['key']),
'cert': base64.encode_as_text(certs['cert']),
'ca': base64.encode_as_text(certs['ca'])})
else:
cfg = ("apiVersion: v1\n"
"clusters:\n"
"- cluster:\n"
" certificate-authority-data: %(ca)s\n"
" server: %(api_address)s\n"
" name: %(name)s\n"
"contexts:\n"
"- context:\n"
" cluster: %(name)s\n"
" user: openstackuser\n"
" name: openstackuser@kubernetes\n"
"current-context: openstackuser@kubernetes\n"
"kind: Config\n"
"preferences: {}\n"
"users:\n"
"- name: openstackuser\n"
" user:\n"
" exec:\n"
" command: /bin/bash\n"
" apiVersion: client.authentication.k8s.io/v1alpha1\n"
" args:\n"
" - -c\n"
" - >\n"
" if [ -z ${OS_TOKEN} ]; then\n"
" echo 'Error: Missing OpenStack credential from environment variable $OS_TOKEN' > /dev/stderr\n" # noqa
" exit 1\n"
" else\n"
" echo '{ \"apiVersion\": \"client.authentication.k8s.io/v1alpha1\", \"kind\": \"ExecCredential\", \"status\": { \"token\": \"'\"${OS_TOKEN}\"'\"}}'\n" # noqa
" fi\n"
% {'name': cluster.name,
'api_address': cluster.api_address,
'ca': base64.encode_as_text(certs['ca'])})
if os.path.exists(cfg_file) and not force:
raise exc.CommandError("File %s exists, aborting." % cfg_file)
else:
f = open(cfg_file, "w")
f.write(cfg)
f.close()
if 'csh' in os.environ['SHELL']:
return "setenv KUBECONFIG %s\n" % cfg_file
else:
return "export KUBECONFIG=%s\n" % cfg_file
def _config_cluster_swarm(cluster, cluster_template, cfg_dir,
force=False, certs=None):
"""Return and write configuration for the given swarm cluster."""
tls = "" if cluster_template.tls_disabled else True
if 'csh' in os.environ['SHELL']:
result = ("setenv DOCKER_HOST %(docker_host)s\n"
"setenv DOCKER_CERT_PATH %(cfg_dir)s\n"
"setenv DOCKER_TLS_VERIFY %(tls)s\n"
% {'docker_host': cluster.api_address,
'cfg_dir': cfg_dir,
'tls': tls}
)
else:
result = ("export DOCKER_HOST=%(docker_host)s\n"
"export DOCKER_CERT_PATH=%(cfg_dir)s\n"
"export DOCKER_TLS_VERIFY=%(tls)s\n"
% {'docker_host': cluster.api_address,
'cfg_dir': cfg_dir,
'tls': tls}
)
return result
def generate_csr_and_key():
"""Return a dict with a new csr and key."""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
csr = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"admin"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"system:masters")
])).sign(key, hashes.SHA256(), default_backend())
result = {
'csr': csr.public_bytes(
encoding=serialization.Encoding.PEM).decode("utf-8"),
'key': key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8"),
}
return result
| apache-2.0 | 7,353,442,693,384,339,000 | 36.306709 | 189 | 0.525649 | false |
bgxavier/nova | nova/objects/compute_node.py | 8 | 15378 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
# Version 1.5: Added numa_topology field
# Version 1.6: Added supported_hv_specs
# Version 1.7: Added host field
# Version 1.8: Added get_by_host_and_nodename()
# Version 1.9: Added pci_device_pools
# Version 1.10: Added get_first_node_by_host_for_old_compat()
# Version 1.11: PciDevicePoolList version 1.1
VERSION = '1.11'
fields = {
'id': fields.IntegerField(read_only=True),
'service_id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
'memory_mb': fields.IntegerField(),
'local_gb': fields.IntegerField(),
'vcpus_used': fields.IntegerField(),
'memory_mb_used': fields.IntegerField(),
'local_gb_used': fields.IntegerField(),
'hypervisor_type': fields.StringField(),
'hypervisor_version': fields.IntegerField(),
'hypervisor_hostname': fields.StringField(nullable=True),
'free_ram_mb': fields.IntegerField(nullable=True),
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
# NOTE(pmurray): the pci_device_pools field maps to the
# pci_stats field in the database
'pci_device_pools': fields.ObjectField('PciDevicePoolList',
nullable=True),
}
obj_relationships = {
'pci_device_pools': [('1.9', '1.0'), ('1.11', '1.1')],
'supported_hv_specs': [('1.6', '1.0')],
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 7) and 'host' in primitive:
del primitive['host']
if target_version < (1, 5) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _host_from_db_object(compute, db_compute):
if (('host' not in db_compute or db_compute['host'] is None)
and 'service_id' in db_compute
and db_compute['service_id'] is not None):
# FIXME(sbauza) : Unconverted compute record, provide compatibility
# This has to stay until we can be sure that any/all compute nodes
# in the database have been converted to use the host field
# Service field of ComputeNode could be deprecated in a next patch,
# so let's use directly the Service object
try:
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute['host'] = None
return
try:
compute['host'] = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute['host'] = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute['host'] = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute['host'] = None
@staticmethod
def _from_db_object(context, compute, db_compute):
special_cases = set([
'stats',
'supported_hv_specs',
'host',
'pci_device_pools',
])
fields = set(compute.fields) - special_cases
for key in fields:
compute[key] = db_compute[key]
stats = db_compute['stats']
if stats:
compute['stats'] = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute['supported_hv_specs'] = hv_specs
pci_stats = db_compute.get('pci_stats')
compute.pci_device_pools = pci_device_pool.from_pci_stats(pci_stats)
compute._context = context
# Make sure that we correctly set the host field depending on either
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
compute.obj_reset_changes()
return compute
@base.remotable_classmethod
def get_by_id(cls, context, compute_id):
db_compute = db.compute_node_get(context, compute_id)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_service_id(cls, context, service_id):
db_computes = db.compute_nodes_get_by_service_id(context, service_id)
# NOTE(sbauza): Old version was returning an item, we need to keep this
# behaviour for backwards compatibility
db_compute = db_computes[0]
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
try:
db_compute = db.compute_node_get_by_host_and_nodename(
context, host, nodename)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
db_compute = None
for compute in db_computes:
if compute['hypervisor_hostname'] == nodename:
db_compute = compute
# We can avoid an extra call to Service object in
# _from_db_object
db_compute['host'] = service.host
break
if not db_compute:
raise exception.ComputeHostNotFound(host=host)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
# FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple
# nodes per host, we should return all the nodes and modify the callers
# instead.
# Arbitrarily returning the first node.
return computes[0]
@staticmethod
def _convert_stats_to_db_format(updates):
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
@staticmethod
def _convert_host_ip_to_db_format(updates):
host_ip = updates.pop('host_ip', None)
if host_ip:
updates['host_ip'] = str(host_ip)
@staticmethod
def _convert_supported_instances_to_db_format(updates):
hv_specs = updates.pop('supported_hv_specs', None)
if hv_specs is not None:
hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
updates['supported_instances'] = jsonutils.dumps(hv_specs)
@staticmethod
def _convert_pci_stats_to_db_format(updates):
pools = updates.pop('pci_device_pools', None)
if pools:
updates['pci_stats'] = jsonutils.dumps(pools.obj_to_primitive())
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_create(self._context, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def save(self, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def destroy(self):
db.compute_node_delete(self._context, self.id)
@property
def service(self):
if not hasattr(self, '_cached_service'):
self._cached_service = objects.Service.get_by_id(self._context,
self.service_id)
return self._cached_service
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
# Version 1.4 ComputeNode version 1.5
# Version 1.5 Add use_slave to get_by_service
# Version 1.6 ComputeNode version 1.6
# Version 1.7 ComputeNode version 1.7
# Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
# Version 1.9 ComputeNode version 1.9
# Version 1.10 ComputeNode version 1.10
# Version 1.11 ComputeNode version 1.11
VERSION = '1.11'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
child_versions = {
'1.0': '1.2',
# NOTE(danms): ComputeNode was at 1.2 before we added this
'1.1': '1.3',
'1.2': '1.3',
'1.3': '1.4',
'1.4': '1.5',
'1.5': '1.5',
'1.6': '1.6',
'1.7': '1.7',
'1.8': '1.8',
'1.9': '1.9',
'1.10': '1.10',
'1.11': '1.11',
}
@base.remotable_classmethod
def get_all(cls, context):
db_computes = db.compute_node_get_all(context)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_hypervisor(cls, context, hypervisor_match):
db_computes = db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def _get_by_service(cls, context, service_id, use_slave=False):
try:
db_computes = db.compute_nodes_get_by_service_id(
context, service_id)
except exception.ServiceNotFound:
# NOTE(sbauza): Previous behaviour was returning an empty list
# if the service was created with no computes, we need to keep it.
db_computes = []
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@classmethod
def get_by_service(cls, context, service, use_slave=False):
return cls._get_by_service(context, service.id, use_slave=use_slave)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
try:
db_computes = db.compute_node_get_all_by_host(context, host,
use_slave)
except exception.ComputeHostNotFound:
# FIXME(sbauza): Some old computes can still have no host record
# We need to provide compatibility by using the old service_id
# record.
# We assume the compatibility as an extra penalty of one more DB
# call but that's necessary until all nodes are upgraded.
try:
service = objects.Service.get_by_compute_host(context, host,
use_slave)
db_computes = db.compute_nodes_get_by_service_id(
context, service.id)
except exception.ServiceNotFound:
# We need to provide the same exception upstream
raise exception.ComputeHostNotFound(host=host)
# We can avoid an extra call to Service object in _from_db_object
for db_compute in db_computes:
db_compute['host'] = service.host
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
| apache-2.0 | 1,963,370,426,602,458,400 | 41.716667 | 79 | 0.600533 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.