repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
travisreed-wf/PyGithub | github/PaginatedList.py | 23 | 7707 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Bill Mill <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 davidbrai <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PaginatedListBase:
def __init__(self):
self.__elements = list()
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
for element in self.__elements:
yield element
while self._couldGrow():
newElements = self._grow()
for element in newElements:
yield element
def _isBiggerThan(self, index):
return len(self.__elements) > index or self._couldGrow()
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self._couldGrow():
self._grow()
def _grow(self):
newElements = self._fetchNextPage()
self.__elements += newElements
return newElements
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list[index]
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
class PaginatedList(PaginatedListBase):
"""
This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
You can simply enumerate through instances of this class::
for repo in user.get_repos():
print repo.name
You can also index them or take slices::
second_repo = user.get_repos()[1]
first_repos = user.get_repos()[:10]
If you want to iterate in reversed order, just do::
for repo in user.get_repos().reversed:
print repo.name
And if you really need it, you can explicitely access a specific page::
some_repos = user.get_repos().get_page(0)
some_other_repos = user.get_repos().get_page(3)
"""
def __init__(self, contentClass, requester, firstUrl, firstParams):
PaginatedListBase.__init__(self)
self.__requester = requester
self.__contentClass = contentClass
self.__firstUrl = firstUrl
self.__firstParams = firstParams or ()
self.__nextUrl = firstUrl
self.__nextParams = firstParams or {}
if self.__requester.per_page != 30:
self.__nextParams["per_page"] = self.__requester.per_page
self._reversed = False
self.__totalCount = None
@property
def totalCount(self):
if not self.__totalCount:
self._grow()
return self.__totalCount
def _getLastPageUrl(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=self.__nextParams
)
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
return lastUrl
@property
def reversed(self):
r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
r.__reverse()
return r
def __reverse(self):
self._reversed = True
lastUrl = self._getLastPageUrl()
if lastUrl:
self.__nextUrl = lastUrl
def _couldGrow(self):
return self.__nextUrl is not None
def _fetchNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__nextUrl,
parameters=self.__nextParams
)
data = data if data else []
self.__nextUrl = None
if len(data) > 0:
links = self.__parseLinkHeader(headers)
if self._reversed:
if "prev" in links:
self.__nextUrl = links["prev"]
elif "next" in links:
self.__nextUrl = links["next"]
self.__nextParams = None
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
content = [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data if element is not None
]
if self._reversed:
return content[::-1]
return content
def __parseLinkHeader(self, headers):
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def get_page(self, page):
params = dict(self.__firstParams)
if page != 0:
params["page"] = page + 1
if self.__requester.per_page != 30:
params["per_page"] = self.__requester.per_page
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=params
)
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
return [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data
]
| gpl-3.0 | 2,083,754,415,230,222,800 | 35.014019 | 101 | 0.504217 | false |
katyhuff/moose | python/MooseDocs/extensions/MooseCSS.py | 1 | 2028 | from markdown.blockprocessors import BlockProcessor
from MooseCommonExtension import MooseCommonExtension
import re
from markdown.util import etree
class MooseCSS(BlockProcessor, MooseCommonExtension):
"""
Markdown extension for applying CSS styles to paragraph
Markdown syntax is:
!css <options>
Paragraph text here
Where <options> are key=value pairs.
"""
RE = re.compile(r'^!\ ?css(.*)')
# If there are multiple css blocks on the same page then
# they need to have different ids
MATCHES_FOUND = 0
def __init__(self, parser, root=None, **kwargs):
MooseCommonExtension.__init__(self)
BlockProcessor.__init__(self, parser, **kwargs)
def test(self, parent, block):
"""
Test to see if we should process this block of markdown.
Inherited from BlockProcessor.
"""
return self.RE.search(block)
def run(self, parent, blocks):
"""
Called when it is determined that we can process this block.
This will convert the markdown into HTML
"""
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
# Parse out the options on the css line
options, styles = self.getSettings(m.group(1))
block = block[m.end() + 1:] # removes the css line
block, paragraph = self.detab(block)
if m:
top_div = etree.SubElement(parent, 'div')
self.createCSS(top_div, styles, paragraph)
else:
top_div = sibling
self.parser.parseChunk(top_div, block)
def createCSS(self, top_div, styles, paragraph):
"""
Creates the actual HTML required for the CSS paragraph to work.
Input:
top_div: div element that will contain the paragraph element
styles[dict]: The CSS style attributes
paragraph: the actual text within the <p></p> element
"""
p_el = self.addStyle(etree.SubElement(top_div, 'p'), **styles)
p_el.text = paragraph
| lgpl-2.1 | -579,466,075,485,279,600 | 30.2 | 71 | 0.634615 | false |
karrtikr/ete | ete3/test/test_treeview/item_faces.py | 1 | 4140 | # We will need to create Qt4 items
from PyQt4 import QtCore
from PyQt4.QtGui import QGraphicsRectItem, QGraphicsSimpleTextItem, \
QGraphicsEllipseItem, QColor, QPen, QBrush
from ... import Tree, faces, TreeStyle, NodeStyle
# To play with random colors
import colorsys
import random
class InteractiveItem(QGraphicsRectItem):
def __init__(self, *arg, **karg):
QGraphicsRectItem.__init__(self, *arg, **karg)
self.node = None
self.label = None
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setAcceptsHoverEvents(True)
def hoverEnterEvent (self, e):
# There are many ways of adding interactive elements. With the
# following code, I show/hide a text item over my custom
# DynamicItemFace
if not self.label:
self.label = QGraphicsRectItem()
self.label.setParentItem(self)
# This is to ensure that the label is rendered over the
# rest of item children (default ZValue for items is 0)
self.label.setZValue(1)
self.label.setBrush(QBrush(QColor("white")))
self.label.text = QGraphicsSimpleTextItem()
self.label.text.setParentItem(self.label)
self.label.text.setText(self.node.name)
self.label.setRect(self.label.text.boundingRect())
self.label.setVisible(True)
def hoverLeaveEvent(self, e):
if self.label:
self.label.setVisible(False)
def random_color(h=None):
"""Generates a random color in RGB format."""
if not h:
h = random.random()
s = 0.5
l = 0.5
return _hls2hex(h, l, s)
def _hls2hex(h, l, s):
return '#%02x%02x%02x' %tuple(map(lambda x: int(x*255),
colorsys.hls_to_rgb(h, l, s)))
def ugly_name_face(node, *args, **kargs):
""" This is my item generator. It must receive a node object, and
returns a Qt4 graphics item that can be used as a node face.
"""
# receive an arbitrary number of arguments, in this case width and
# height of the faces
width = args[0][0]
height = args[0][1]
## Creates a main master Item that will contain all other elements
## Items can be standard QGraphicsItem
# masterItem = QGraphicsRectItem(0, 0, width, height)
# Or your custom Items, in which you can re-implement interactive
# functions, etc. Check QGraphicsItem doc for details.
masterItem = InteractiveItem(0, 0, width, height)
# Keep a link within the item to access node info
masterItem.node = node
# I dont want a border around the masterItem
masterItem.setPen(QPen(QtCore.Qt.NoPen))
# Add ellipse around text
ellipse = QGraphicsEllipseItem(masterItem.rect())
ellipse.setParentItem(masterItem)
# Change ellipse color
ellipse.setBrush(QBrush(QColor( random_color())))
# Add node name within the ellipse
text = QGraphicsSimpleTextItem(node.name)
text.setParentItem(ellipse)
text.setPen(QPen(QPen(QColor("white"))))
# Center text according to masterItem size
tw = text.boundingRect().width()
th = text.boundingRect().height()
center = masterItem.boundingRect().center()
text.setPos(center.x()-tw/2, center.y()-th/2)
return masterItem
def master_ly(node):
if node.is_leaf():
# Create an ItemFAce. First argument must be the pointer to
# the constructor function that returns a QGraphicsItem. It
# will be used to draw the Face. Next arguments are arbitrary,
# and they will be forwarded to the constructor Face function.
F = faces.DynamicItemFace(ugly_name_face, 100, 50)
faces.add_face_to_node(F, node, 0, position="aligned")
def get_example_tree():
t = Tree()
t.populate(8, reuse_names=False)
ts = TreeStyle()
ts.layout_fn = master_ly
ts.title.add_face(faces.TextFace("Drawing your own Qt Faces", fsize=15), 0)
return t, ts
if __name__ == "__main__":
t, ts = get_example_tree()
#t.render("item_faces.png", h=400, tree_style=ts)
# The interactive features are only available using the GUI
t.show(tree_style=ts)
| gpl-3.0 | -2,597,335,711,891,780,600 | 33.214876 | 79 | 0.655556 | false |
alexandrucoman/vbox-neutron-agent | neutron/db/api.py | 6 | 1693 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_db.sqlalchemy import session
from sqlalchemy import exc
_FACADE = None
MAX_RETRIES = 10
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
return _FACADE
def get_engine():
"""Helper method to grab engine."""
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session."""
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit)
@contextlib.contextmanager
def autonested_transaction(sess):
"""This is a convenience method to not bother with 'nested' parameter."""
try:
session_context = sess.begin_nested()
except exc.InvalidRequestError:
session_context = sess.begin(subtransactions=True)
finally:
with session_context as tx:
yield tx
| apache-2.0 | 9,142,357,548,287,583,000 | 27.694915 | 78 | 0.694034 | false |
MrLoick/python-for-android | python-modules/twisted/twisted/python/dispatch.py | 64 | 1187 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import warnings
warnings.warn(
"Create your own event dispatching mechanism, "
"twisted.python.dispatch will soon be no more.",
DeprecationWarning, 2)
class EventDispatcher:
"""
A global event dispatcher for events.
I'm used for any events that need to span disparate objects in the client.
I should only be used when one object needs to signal an object that it's
not got a direct reference to (unless you really want to pass it through
here, in which case I won't mind).
I'm mainly useful for complex GUIs.
"""
def __init__(self, prefix="event_"):
self.prefix = prefix
self.callbacks = {}
def registerHandler(self, name, meth):
self.callbacks.setdefault(name, []).append(meth)
def autoRegister(self, obj):
from twisted.python import reflect
d = {}
reflect.accumulateMethods(obj, d, self.prefix)
for k,v in d.items():
self.registerHandler(k, v)
def publishEvent(self, name, *args, **kwargs):
for cb in self.callbacks[name]:
cb(*args, **kwargs)
| apache-2.0 | 2,742,561,835,455,537,000 | 27.261905 | 78 | 0.651222 | false |
0xc0170/pyOCD | pyOCD/test/test_utility/test_cmdline.py | 11 | 1600 | """
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.utility.cmdline import split_command_line
class TestSplitCommandLine:
def test_split(self):
assert split_command_line('foo') == ['foo']
assert split_command_line(['foo']) == ['foo']
assert split_command_line('foo bar') == ['foo', 'bar']
assert split_command_line(['foo bar']) == ['foo', 'bar']
def test_split_strings(self):
assert split_command_line('"foo"') == ['foo']
assert split_command_line('"foo bar"') == ['foo bar']
assert split_command_line(['"foo"']) == ['foo']
assert split_command_line('a "b c" d') == ['a', "b c", 'd']
assert split_command_line("'foo bar'") == ['foo bar']
def test_split_whitespace(self):
assert split_command_line('a b') == ['a', 'b']
assert split_command_line('a\tb') == ['a', 'b']
assert split_command_line('a\rb') == ['a', 'b']
assert split_command_line('a\nb') == ['a', 'b']
assert split_command_line('a \tb') == ['a', 'b']
| apache-2.0 | 9,100,859,842,178,979,000 | 39 | 73 | 0.629375 | false |
mementum/backtrader | samples/sigsmacross/sigsmacross.py | 1 | 3835 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import backtrader as bt
class SmaCross(bt.SignalStrategy):
params = dict(sma1=10, sma2=20)
def notify_order(self, order):
if not order.alive():
print('{} {} {}@{}'.format(
bt.num2date(order.executed.dt),
'buy' if order.isbuy() else 'sell',
order.executed.size,
order.executed.price)
)
def notify_trade(self, trade):
if trade.isclosed:
print('profit {}'.format(trade.pnlcomm))
def __init__(self):
sma1 = bt.ind.SMA(period=self.params.sma1)
sma2 = bt.ind.SMA(period=self.params.sma2)
crossover = bt.ind.CrossOver(sma1, sma2)
self.signal_add(bt.SIGNAL_LONG, crossover)
def runstrat(pargs=None):
args = parse_args(pargs)
cerebro = bt.Cerebro()
cerebro.broker.set_cash(args.cash)
data0 = bt.feeds.YahooFinanceData(
dataname=args.data,
fromdate=datetime.datetime.strptime(args.fromdate, '%Y-%m-%d'),
todate=datetime.datetime.strptime(args.todate, '%Y-%m-%d'))
cerebro.adddata(data0)
cerebro.addstrategy(SmaCross, **(eval('dict(' + args.strat + ')')))
cerebro.addsizer(bt.sizers.FixedSize, stake=args.stake)
cerebro.run()
if args.plot:
cerebro.plot(**(eval('dict(' + args.plot + ')')))
def parse_args(pargs=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='sigsmacross')
parser.add_argument('--data', required=False, default='YHOO',
help='Yahoo Ticker')
parser.add_argument('--fromdate', required=False, default='2011-01-01',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--todate', required=False, default='2012-12-31',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--cash', required=False, action='store', type=float,
default=10000, help=('Starting cash'))
parser.add_argument('--stake', required=False, action='store', type=int,
default=1, help=('Stake to apply'))
parser.add_argument('--strat', required=False, action='store', default='',
help=('Arguments for the strategy'))
parser.add_argument('--plot', '-p', nargs='?', required=False,
metavar='kwargs', const='{}',
help=('Plot the read data applying any kwargs passed\n'
'\n'
'For example:\n'
'\n'
' --plot style="candle" (to plot candles)\n'))
return parser.parse_args(pargs)
if __name__ == '__main__':
runstrat()
| gpl-3.0 | 7,282,086,597,289,401,000 | 34.183486 | 79 | 0.575228 | false |
cchanning/Impala | tests/common/failure_injector.py | 16 | 3806 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Failure injection module for the Impala service. There are two main ways this module
# can be used - the first is to initialize the failure injector and then call start()
# which will kick off a timer that chooses a random impalad/state store process
# to fail each time timer fires.
# The second way this module can be used to to initialize it and call the actions
# directly (ex. kill_random_impalad()). This provides a bit more control over exactly
# when a failure will happen and is useful for targeted test scenarios.
import logging
import os
import sys
import time
from tests.common.impala_cluster import *
from random import choice
from threading import Timer
logging.basicConfig(level=logging.INFO, format='%(threadName)s: %(message)s')
LOG = logging.getLogger('failure-injector')
# This class is used for injecting failures for the Impala service.
class FailureInjector(object):
def __init__(self, impala_cluster, failure_frequency, impalad_exclude_list=None):
"""
Initializes the FailureInjector object.
impala_cluster - An ImpalaCluster object (see the impala_cluster module)
failure_frequency - Interval to fire timer (in seconds)
impalad_exclude_list - A list of impalad host:port name to not inject failures
on. Useful to filter out the coordinator.
"""
self.cluster = impala_cluster
self.cluster.get_impala_service().set_process_auto_restart_config(value=True)
# TODO: Do we need to restart the impala service to apply this?
# self.cluster.get_impala_service().restart()
self.failure_frequency = failure_frequency
num_impalad_procs = len(self.cluster.get_impala_service().get_all_impalad_processes())
self.impalad_exclude_list = impalad_exclude_list
# Build a weighted list of possible actions. This is done using a trivial approach
# where we just add the item multiple times (weight value) into the action list.
# TODO: Provide a way to dynamically configure the weights
actions_with_weights = {self.kill_random_impalad: num_impalad_procs * 2,
self.kill_state_store: 1}
self.possible_actions = list()
for key, value in actions_with_weights.items():
self.possible_actions.extend([key] * value)
def start(self):
""" Starts the timer, triggering failures for the specified interval """
self.__start_timer()
def cancel(self):
""" Stops the timer, canceling any additional failures from occurring """
if self.__timer is not None:
self.__timer.cancel()
def kill_random_impalad(self):
""" Kills a randomly selected impalad instance not in the exlude list """
filtered_impalad = \
filter(lambda impalad: '%s:%d' % (impalad.hostname, impalad.be_port)\
not in self.impalad_exclude_list,
self.cluster.get_impala_service().get_all_impalad_processes())
self.kill_impalad(choice(filtered_impalad))
def kill_impalad(self, impalad):
""" Kills the specified impalad instance """
LOG.info('Chose impalad on "%s" to kill' % impalad.hostname)
impalad.kill()
def kill_state_store(self):
""" Kills the statestore process """
state_store = self.cluster.get_impala_service().get_state_store_process()
LOG.info('Chose statestore on "%s" to kill' % state_store.hostname)
state_store.kill()
def __start_timer(self):
""" Starts a new timer, cancelling the previous timer if it is running """
self.cancel()
self.__timer = Timer(self.failure_frequency, self.__choose_action)
self.__timer.start()
def __choose_action(self):
""" Chooses a failure action to perform """
action = choice(self.possible_actions)
LOG.info('Executing action: %s' % action)
action()
self.__start_timer()
| apache-2.0 | -6,335,984,440,422,511,000 | 41.288889 | 90 | 0.705202 | false |
parthea/pydatalab | google/datalab/bigquery/_query.py | 4 | 13135 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Implements Query BigQuery API."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
import google.datalab
import google.datalab.data
import google.datalab.utils
from ._query_output import QueryOutput
from . import _api
from . import _query_job
from . import _udf
from . import _utils
from . import _external_data_source
class Query(object):
"""Represents a Query object that encapsulates a BigQuery SQL query.
This object can be used to execute SQL queries and retrieve results.
"""
def __init__(self, sql, env=None, udfs=None, data_sources=None, subqueries=None):
"""Initializes an instance of a Query object.
Args:
sql: the BigQuery SQL query string to execute
env: a dictionary containing objects from the query execution context, used to get references
to UDFs, subqueries, and external data sources referenced by the query
udfs: list of UDFs names referenced in the SQL, or dictionary of names and UDF objects
data_sources: list of external data sources names referenced in the SQL, or dictionary of
names and data source objects
subqueries: list of subqueries names referenced in the SQL, or dictionary of names and
Query objects
Raises:
Exception if expansion of any variables failed.
"""
self._sql = sql
self._udfs = []
self._subqueries = []
self._data_sources = []
self._env = env or {}
# Validate given list or dictionary of objects that they are of correct type
# and add them to the target dictionary
def _expand_objects(obj_container, obj_type, target_list):
for item in obj_container:
# for a list of objects, we should find these objects in the given environment
if isinstance(obj_container, list):
value = self._env.get(item)
if value is None:
raise Exception('Cannot find object %s' % item)
# for a dictionary of objects, each pair must be a string and object of the expected type
elif isinstance(obj_container, dict):
value = obj_container[item]
if not isinstance(value, obj_type):
raise Exception('Expected type: %s, found: %s.' % (obj_type, type(value)))
else:
raise Exception('Unexpected container for type %s. Expected a list or dictionary'
% obj_type)
target_list.append((item, value))
if subqueries:
_expand_objects(subqueries, Query, self._subqueries)
if udfs:
_expand_objects(udfs, _udf.UDF, self._udfs)
if data_sources:
_expand_objects(data_sources, _external_data_source.ExternalDataSource, self._data_sources)
if len(self._data_sources) > 1:
raise Exception('Only one temporary external datasource is supported in queries.')
@staticmethod
def from_view(view):
""" Return a Query for the given View object
Args:
view: the View object to construct a Query out of
Returns:
A Query object with the same sql as the given View object
"""
return Query('SELECT * FROM %s' % view._repr_sql_())
@staticmethod
def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_()))
def _expanded_sql(self, sampling=None):
"""Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources
Returns:
The expanded SQL string of this object
"""
# use lists to preserve the order of subqueries, bigquery will not like listing subqueries
# out of order if they depend on each other. for example. the following will be rejected:
# WITH q2 as (SELECT * FROM q1),
# q1 as (SELECT * FROM mytable),
# SELECT * FROM q2
# so when we're getting the dependencies, use recursion into a list to maintain the order
udfs = []
subqueries = []
expanded_sql = ''
def _recurse_subqueries(query):
"""Recursively scan subqueries and add their pieces to global scope udfs and subqueries
"""
if query._subqueries:
for subquery in query._subqueries:
_recurse_subqueries(subquery[1])
subqueries.extend([s for s in query._subqueries if s not in subqueries])
if query._udfs:
# query._udfs is a list of (name, UDF) tuples; we just want the UDF.
udfs.extend([u[1] for u in query._udfs if u[1] not in udfs])
_recurse_subqueries(self)
if udfs:
expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs])
expanded_sql += '\n'
def _indent_query(subquery):
return ' ' + subquery._sql.replace('\n', '\n ')
if subqueries:
expanded_sql += 'WITH ' + \
'\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1]))
for sq in subqueries])
expanded_sql += '\n)\n\n'
expanded_sql += sampling(self._sql) if sampling else self._sql
return expanded_sql
def _repr_sql_(self):
"""Creates a SQL representation of this object.
Returns:
The SQL representation to use when embedding this object into other SQL.
"""
return '(%s)' % self.sql
def __repr__(self):
"""Creates a friendly representation of this object.
Returns:
The friendly representation of this object (the unmodified SQL).
"""
return 'BigQuery Query - %s' % self._sql
@property
def sql(self):
""" Get the SQL for the query. """
return self._expanded_sql()
@property
def udfs(self):
""" Get a dictionary of UDFs referenced by the query."""
return dict(self._udfs)
@property
def subqueries(self):
""" Get a dictionary of subqueries referenced by the query."""
return dict(self._subqueries)
@property
def data_sources(self):
""" Get a dictionary of external data sources referenced by the query."""
return dict(self._data_sources)
def dry_run(self, context=None, query_params=None):
"""Dry run a query, to check the validity of the query and return some useful statistics.
Args:
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A dict with 'cacheHit' and 'totalBytesProcessed' fields.
Raises:
An exception if the query was malformed.
"""
context = context or google.datalab.Context.default()
api = _api.Api(context)
try:
query_result = api.jobs_insert_query(self.sql, dry_run=True,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
return query_result['statistics']['query']
def execute_async(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
query_params: a dictionary containing query parameter types and values, passed to BigQuery.
Returns:
A Job object that can wait on creating a table or exporting to a file
If the output is a table, the Job object additionally has run statistics
and query results
Raises:
Exception if query could not be executed.
"""
# Default behavior is to execute to a table
if output_options is None:
output_options = QueryOutput.table()
# First, execute the query into a table, using a temporary one if no name is specified
batch = output_options.priority == 'low'
append = output_options.table_mode == 'append'
overwrite = output_options.table_mode == 'overwrite'
table_name = output_options.table_name
context = context or google.datalab.Context.default()
api = _api.Api(context)
if table_name is not None:
table_name = _utils.parse_table_name(table_name, api.project_id)
sql = self._expanded_sql(sampling)
try:
query_result = api.jobs_insert_query(sql, table_name=table_name,
append=append, overwrite=overwrite, batch=batch,
use_cache=output_options.use_cache,
allow_large_results=output_options.allow_large_results,
table_definitions=self.data_sources,
query_params=query_params)
except Exception as e:
raise e
if 'jobReference' not in query_result:
raise Exception('Unexpected response from server')
job_id = query_result['jobReference']['jobId']
if not table_name:
try:
destination = query_result['configuration']['query']['destinationTable']
table_name = (destination['projectId'], destination['datasetId'], destination['tableId'])
except KeyError:
# The query was in error
raise Exception(_utils.format_query_errors(query_result['status']['errors']))
execute_job = _query_job.QueryJob(job_id, table_name, sql, context=context)
# If all we need is to execute the query to a table, we're done
if output_options.type == 'table':
return execute_job
# Otherwise, build an async Job that waits on the query execution then carries out
# the specific export operation
else:
export_args = export_kwargs = None
if output_options.type == 'file':
if output_options.file_path.startswith('gs://'):
export_func = execute_job.result().extract
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header,
'compress': output_options.compress_file
}
else:
export_func = execute_job.result().to_file
export_args = [output_options.file_path]
export_kwargs = {
'format': output_options.file_format,
'csv_delimiter': output_options.csv_delimiter,
'csv_header': output_options.csv_header
}
elif output_options.type == 'dataframe':
export_func = execute_job.result().to_dataframe
export_args = []
export_kwargs = {
'start_row': output_options.dataframe_start_row,
'max_rows': output_options.dataframe_max_rows
}
# Perform the export operation with the specified parameters
export_func = google.datalab.utils.async_function(export_func)
return export_func(*export_args, **export_kwargs)
def execute(self, output_options=None, sampling=None, context=None, query_params=None):
""" Initiate the query and return a QueryJob.
Args:
output_options: a QueryOutput object describing how to execute the query
sampling: sampling function to use. No sampling is done if None. See bigquery.Sampling
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
Returns:
A Job object that can be used to get the query results, or export to a file or dataframe
Raises:
Exception if query could not be executed.
"""
return self.execute_async(output_options, sampling=sampling, context=context,
query_params=query_params).wait()
| apache-2.0 | 4,199,476,646,674,152,400 | 37.976261 | 100 | 0.65314 | false |
kmacinnis/sympy | sympy/assumptions/tests/test_context.py | 126 | 1153 | from sympy.assumptions import ask, Q
from sympy.assumptions.assume import assuming, global_assumptions
from sympy.abc import x, y
def test_assuming():
with assuming(Q.integer(x)):
assert ask(Q.integer(x))
assert not ask(Q.integer(x))
def test_assuming_nested():
assert not ask(Q.integer(x))
assert not ask(Q.integer(y))
with assuming(Q.integer(x)):
assert ask(Q.integer(x))
assert not ask(Q.integer(y))
with assuming(Q.integer(y)):
assert ask(Q.integer(x))
assert ask(Q.integer(y))
assert ask(Q.integer(x))
assert not ask(Q.integer(y))
assert not ask(Q.integer(x))
assert not ask(Q.integer(y))
def test_finally():
try:
with assuming(Q.integer(x)):
1/0
except ZeroDivisionError:
pass
assert not ask(Q.integer(x))
def test_remove_safe():
global_assumptions.add(Q.integer(x))
with assuming():
assert ask(Q.integer(x))
global_assumptions.remove(Q.integer(x))
assert not ask(Q.integer(x))
assert ask(Q.integer(x))
global_assumptions.clear() # for the benefit of other tests
| bsd-3-clause | -1,177,393,594,256,946,400 | 28.564103 | 65 | 0.628794 | false |
lodemo/CATANA | src/face_recognition/youtube_dl/extractor/ina.py | 55 | 1062 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class InaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)'
_TEST = {
'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
'info_dict': {
'id': 'I12055569',
'ext': 'mp4',
'title': 'François Hollande "Je crois que c\'est clair"',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
info_doc = self._download_xml(mrss_url, video_id)
self.report_extraction(video_id)
video_url = info_doc.find('.//{http://search.yahoo.com/mrss/}player').attrib['url']
return {
'id': video_id,
'url': video_url,
'title': info_doc.find('.//title').text,
}
| mit | -8,776,238,411,303,982,000 | 28.472222 | 105 | 0.555137 | false |
cmyr/keras | keras/preprocessing/sequence.py | 4 | 4010 | from __future__ import absolute_import
# -*- coding: utf-8 -*-
import numpy as np
import random
from six.moves import range
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre'):
"""
Pad each sequence to the same length:
the length of the longuest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Support post-padding and pre-padding (default).
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = np.zeros((nb_samples, maxlen)).astype(dtype)
for idx, s in enumerate(sequences):
if padding == 'post':
x[idx, :lengths[idx]] = s[:maxlen]
else:
x[idx, -min(maxlen, lengths[idx]):] = s[:maxlen]
return x
def make_sampling_table(size, sampling_factor=1e-5):
'''
This generates an array where the ith element
is the probability that a word of rank i would be sampled,
according to the sampling distribution used in word2vec.
The word2vec formula is:
p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
where gamma is the Euler-Mascheroni constant.
'''
gamma = 0.577
rank = np.array(list(range(size)))
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1./(12.*rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None):
'''
Take a sequence (list of indexes of words),
returns couples of [word_index, other_word index] and labels (1s or 0s),
where label = 1 if 'other_word' belongs to the context of 'word',
and label=0 if 'other_word' is ramdomly sampled
@param vocabulary_size: int. maximum possible word index + 1
@param window_size: int. actually half-window. The window of a word wi will be [i-window_size, i+window_size+1]
@param negative_samples: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc.
@param categorical: bool. if False, labels will be integers (eg. [0, 1, 1 .. ]),
if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]
Note: by convention, index 0 in the vocabulary is a non-word and will be skipped.
'''
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i-window_size)
window_end = min(len(sequence), i+window_size+1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0,1])
else:
labels.append(1)
if negative_samples > 0:
nb_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [[words[i%len(words)], random.randint(1, vocabulary_size-1)] for i in range(nb_negative_samples)]
if categorical:
labels += [[1,0]]*nb_negative_samples
else:
labels += [0]*nb_negative_samples
if shuffle:
seed = random.randint(0,10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
| mit | 6,439,112,295,302,569,000 | 35.126126 | 125 | 0.590524 | false |
GenericMappingTools/gmt-python | pygmt/tests/test_helpers.py | 1 | 3568 | """
Tests the helper functions/classes/etc used in wrapping GMT.
"""
import os
import numpy as np
import pytest
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
args_in_kwargs,
data_kind,
kwargs_to_strings,
unique_name,
)
@pytest.mark.parametrize(
"data,x,y",
[
(None, None, None),
("data.txt", np.array([1, 2]), np.array([4, 5])),
("data.txt", np.array([1, 2]), None),
("data.txt", None, np.array([4, 5])),
(None, np.array([1, 2]), None),
(None, None, np.array([4, 5])),
],
)
def test_data_kind_fails(data, x, y):
"""
Make sure data_kind raises exceptions when it should.
"""
with pytest.raises(GMTInvalidInput):
data_kind(data=data, x=x, y=y)
def test_unique_name():
"""
Make sure the names are really unique.
"""
names = [unique_name() for i in range(100)]
assert len(names) == len(set(names))
def test_kwargs_to_strings_fails():
"""
Make sure it fails for invalid conversion types.
"""
with pytest.raises(GMTInvalidInput):
kwargs_to_strings(bla="blablabla")
def test_gmttempfile():
"""
Check that file is really created and deleted.
"""
with GMTTempFile() as tmpfile:
assert os.path.exists(tmpfile.name)
# File should be deleted when leaving the with block
assert not os.path.exists(tmpfile.name)
def test_gmttempfile_unique():
"""
Check that generating multiple files creates unique names.
"""
with GMTTempFile() as tmp1:
with GMTTempFile() as tmp2:
with GMTTempFile() as tmp3:
assert tmp1.name != tmp2.name != tmp3.name
def test_gmttempfile_prefix_suffix():
"""
Make sure the prefix and suffix of temporary files are user specifiable.
"""
with GMTTempFile() as tmpfile:
assert os.path.basename(tmpfile.name).startswith("pygmt-")
assert os.path.basename(tmpfile.name).endswith(".txt")
with GMTTempFile(prefix="user-prefix-") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("user-prefix-")
assert os.path.basename(tmpfile.name).endswith(".txt")
with GMTTempFile(suffix=".log") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("pygmt-")
assert os.path.basename(tmpfile.name).endswith(".log")
with GMTTempFile(prefix="user-prefix-", suffix=".log") as tmpfile:
assert os.path.basename(tmpfile.name).startswith("user-prefix-")
assert os.path.basename(tmpfile.name).endswith(".log")
def test_gmttempfile_read():
"""
Make sure GMTTempFile.read() works.
"""
with GMTTempFile() as tmpfile:
with open(tmpfile.name, "w") as ftmp:
ftmp.write("in.dat: N = 2\t<1/3>\t<2/4>\n")
assert tmpfile.read() == "in.dat: N = 2 <1/3> <2/4>\n"
assert tmpfile.read(keep_tabs=True) == "in.dat: N = 2\t<1/3>\t<2/4>\n"
def test_args_in_kwargs():
"""
Test that args_in_kwargs function returns correct Boolean responses.
"""
kwargs = {"A": 1, "B": 2, "C": 3}
# Passing list of arguments with passing values in the beginning
passing_args_1 = ["B", "C", "D"]
assert args_in_kwargs(args=passing_args_1, kwargs=kwargs)
# Passing list of arguments that starts with failing arguments
passing_args_2 = ["D", "X", "C"]
assert args_in_kwargs(args=passing_args_2, kwargs=kwargs)
# Failing list of arguments
failing_args = ["D", "E", "F"]
assert not args_in_kwargs(args=failing_args, kwargs=kwargs)
| bsd-3-clause | -8,962,987,882,936,566,000 | 30.026087 | 78 | 0.625561 | false |
linktlh/Toontown-journey | toontown/dna/DNALandmarkBuilding.py | 3 | 2519 | from panda3d.core import LVector4f
import DNANode
import DNAUtil
import DNAError
class DNALandmarkBuilding(DNANode.DNANode):
COMPONENT_CODE = 13
def __init__(self, name):
DNANode.DNANode.__init__(self, name)
self.code = ''
self.wallColor = LVector4f(1, 1, 1, 1)
self.title = ''
self.article = ''
self.buildingType = ''
self.door = None
def setArticle(self, article):
self.article = article
def getArticle(self):
return self.article
def setBuildingType(self, buildingType):
self.buildingType = buildingType
def getBuildingType(self):
return self.buildingType
def setTitle(self, title):
self.title = title
def getTitle(self):
return self.title
def getCode(self):
return self.code
def setCode(self, code):
self.code = code
def setWallColor(self, color):
self.wallColor = color
def getWallColor(self):
return self.wallColor
def setupSuitBuildingOrigin(self, nodePathA, nodePathB):
if (self.getName()[:2] == 'tb') and (self.getName()[3].isdigit()) and (self.getName().find(':') != -1):
name = self.getName()
name = 's' + name[1:]
node = nodePathB.find('**/*suit_building_origin')
if node.isEmpty():
node = nodePathA.attachNewNode(name)
node.setPosHprScale(self.getPos(), self.getHpr(), self.getScale())
else:
node.wrtReparentTo(nodePathA, 0)
node.setName(name)
def makeFromDGI(self, dgi):
DNANode.DNANode.makeFromDGI(self, dgi)
self.code = DNAUtil.dgiExtractString8(dgi)
self.wallColor = DNAUtil.dgiExtractColor(dgi)
self.title = DNAUtil.dgiExtractString8(dgi)
self.article = DNAUtil.dgiExtractString8(dgi)
self.buildingType = DNAUtil.dgiExtractString8(dgi)
def traverse(self, nodePath, dnaStorage):
node = dnaStorage.findNode(self.code)
if node is None:
raise DNAError.DNAError('DNALandmarkBuilding code ' + self.code + ' not found in DNAStorage')
npA = nodePath
nodePath = node.copyTo(nodePath, 0)
nodePath.setName(self.getName())
nodePath.setPosHprScale(self.getPos(), self.getHpr(), self.getScale())
self.setupSuitBuildingOrigin(npA, nodePath)
for child in self.children:
child.traverse(nodePath, dnaStorage)
nodePath.flattenStrong() | apache-2.0 | -4,839,080,749,253,332,000 | 30.898734 | 111 | 0.616911 | false |
mvaled/gunicorn | gunicorn/selectors.py | 107 | 18997 | """Selectors module.
This module allows high-level and efficient I/O multiplexing, built upon the
`select` module primitives.
The following code adapted from trollius.selectors.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple, Mapping
import math
import select
import sys
from gunicorn._compat import wrap_error, InterruptedError
from gunicorn import six
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, six.integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
"""Object used to associate a file object to its backing file descriptor,
selected event mask and attached data."""
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(six.with_metaclass(ABCMeta)):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
I/O events.
A file object is a file descriptor or any object with a `fileno()` method.
An arbitrary object can be attached to the file object, which can be used
for example to store context information, a callback, etc.
A selector can use various implementations (select(), poll(), epoll()...)
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
@abstractmethod
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
raise NotImplementedError
@abstractmethod
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
raise NotImplementedError
def modify(self, fileobj, events, data=None):
"""Change a registered file object monitored events or attached data.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
Anything that unregister() or register() raises
"""
self.unregister(fileobj)
return self.register(fileobj, events, data)
@abstractmethod
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
raise NotImplementedError
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
pass
def get_key(self, fileobj):
"""Return the key associated to a registered file object.
Returns:
SelectorKey for this file object
"""
mapping = self.get_map()
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
@abstractmethod
def get_map(self):
"""Return a mapping of file objects to selector keys."""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class _BaseSelectorImpl(BaseSelector):
"""Base selector implementation."""
def __init__(self):
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
# TODO: Subclasses can probably optimize this even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def close(self):
self._fd_to_key.clear()
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
if sys.platform == 'win32':
def _select(self, r, w, _, timeout=None):
r, w, x = select.select(r, w, w, timeout)
return r, w + x, []
else:
_select = select.select
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
r, w, _ = wrap_error(self._select,
self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'poll'):
class PollSelector(_BaseSelectorImpl):
"""Poll-based selector."""
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._poll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = int(math.ceil(timeout * 1e3))
ready = []
try:
fd_event_list = wrap_error(self._poll.poll, timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'epoll'):
class EpollSelector(_BaseSelectorImpl):
"""Epoll-based selector."""
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
if events & EVENT_WRITE:
epoll_events |= select.EPOLLOUT
self._epoll.register(key.fd, epoll_events)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
self._epoll.unregister(key.fd)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
return key
def select(self, timeout=None):
if timeout is None:
timeout = -1
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond, round away
# from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3) * 1e-3
max_ev = len(self._fd_to_key)
ready = []
try:
fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.EPOLLIN:
events |= EVENT_WRITE
if event & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, 'devpoll'):
class DevpollSelector(_BaseSelectorImpl):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# devpoll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._devpoll.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
if hasattr(select, 'kqueue'):
class KqueueSelector(_BaseSelectorImpl):
"""Kqueue-based selector."""
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
if events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
if key.events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# See comment above.
pass
return key
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
max_ev = len(self._fd_to_key)
ready = []
try:
kev_list = wrap_error(self._kqueue.control,
None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
fd = kev.ident
flag = kev.filter
events = 0
if flag == select.KQ_FILTER_READ:
events |= EVENT_READ
if flag == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals():
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals():
DefaultSelector = EpollSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'PollSelector' in globals():
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
| mit | -8,908,488,979,811,775,000 | 31.089527 | 80 | 0.549403 | false |
xiangel/hue | desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/setup.py | 42 | 1045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: setup.py 114 2007-06-14 21:17:14Z palgarvio $
# =============================================================================
# $URL: http://svn.edgewall.org/repos/babel/tags/0.9.6/babel/messages/tests/data/setup.py $
# $LastChangedDate: 2007-06-14 23:17:14 +0200 (do, 14 jun 2007) $
# $Rev: 114 $
# $LastChangedBy: palgarvio $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - Pedro Algarvio <[email protected]>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
# THIS IS A BOGUS PROJECT
from setuptools import setup, find_packages
setup(
name = 'TestProject',
version = '0.1',
license = 'BSD',
author = 'Foo Bar',
author_email = '[email protected]',
packages = find_packages(),
)
| apache-2.0 | 36,494,104,289,918,264 | 35.321429 | 91 | 0.442105 | false |
rabitt/mir_eval | evaluators/beat_eval.py | 4 | 1856 | #!/usr/bin/env python
'''
CREATED:2014-01-24 12:42:43 by Brian McFee <[email protected]>
Compute beat evaluation metrics
Usage:
./beat_eval.py REFERENCE.TXT ESTIMATED.TXT
'''
from __future__ import print_function
import argparse
import sys
import os
import eval_utilities
import mir_eval
def process_arguments():
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='mir_eval beat detection '
'evaluation')
parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')
parser.add_argument('reference_file',
action='store',
help='path to the reference annotation file')
parser.add_argument('estimated_file',
action='store',
help='path to the estimated annotation file')
return vars(parser.parse_args(sys.argv[1:]))
if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()
# Load in data
reference_beats = mir_eval.io.load_events(parameters['reference_file'])
estimated_beats = mir_eval.io.load_events(parameters['estimated_file'])
# Compute all the scores
scores = mir_eval.beat.evaluate(reference_beats, estimated_beats)
print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)
if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
| mit | -5,275,290,441,729,628,000 | 29.933333 | 77 | 0.597522 | false |
LukeM12/samba | source4/scripting/devel/speedtest.py | 31 | 8527 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# This speed test aims to show difference in execution time for bulk
# creation of user objects. This will help us compare
# Samba4 vs MS Active Directory performance.
# Copyright (C) Zahari Zahariev <[email protected]> 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import optparse
import sys
import time
import base64
from decimal import Decimal
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
import samba.getopt as options
from ldb import (
SCOPE_BASE, SCOPE_SUBTREE, LdbError, ERR_NO_SUCH_OBJECT,
ERR_UNWILLING_TO_PERFORM, ERR_INSUFFICIENT_ACCESS_RIGHTS)
from samba.ndr import ndr_pack, ndr_unpack
from samba.dcerpc import security
from samba.auth import system_session
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials
import samba.tests
from samba.tests import delete_force
from subunit.run import SubunitTestRunner
import unittest
parser = optparse.OptionParser("speedtest.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
#
# Tests start here
#
class SpeedTest(samba.tests.TestCase):
def find_domain_sid(self, ldb):
res = ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE)
return ndr_unpack(security.dom_sid,res[0]["objectSid"][0])
def setUp(self):
super(SpeedTest, self).setUp()
self.ldb_admin = ldb
self.base_dn = ldb.domain_dn()
self.domain_sid = security.dom_sid(ldb.get_domain_sid())
self.user_pass = "samba123@"
print "baseDN: %s" % self.base_dn
def create_user(self, user_dn):
ldif = """
dn: """ + user_dn + """
sAMAccountName: """ + user_dn.split(",")[0][3:] + """
objectClass: user
unicodePwd:: """ + base64.b64encode(("\"%s\"" % self.user_pass).encode('utf-16-le')) + """
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_group(self, group_dn, desc=None):
ldif = """
dn: """ + group_dn + """
objectClass: group
sAMAccountName: """ + group_dn.split(",")[0][3:] + """
groupType: 4
url: www.example.com
"""
self.ldb_admin.add_ldif(ldif)
def create_bundle(self, count):
for i in range(count):
self.create_user("cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_bundle(self, count):
for i in range(count):
delete_force(self.ldb_admin, "cn=speedtestuser%d,cn=Users,%s" % (i+1, self.base_dn))
def remove_test_users(self):
res = ldb.search(base="cn=Users,%s" % self.base_dn, expression="(objectClass=user)", scope=SCOPE_SUBTREE)
dn_list = [item.dn for item in res if "speedtestuser" in str(item.dn)]
for dn in dn_list:
delete_force(self.ldb_admin, dn)
class SpeedTestAddDel(SpeedTest):
def setUp(self):
super(SpeedTestAddDel, self).setUp()
def run_bundle(self, num):
print "\n=== Test ADD/DEL %s user objects ===\n" % num
avg_add = Decimal("0.0")
avg_del = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
self.create_bundle(num)
res_add = Decimal( str(time.time() - start) )
avg_add += res_add
print " Attempt %s ADD: %.3fs" % ( x, float(res_add) )
#
start = time.time()
self.remove_bundle(num)
res_del = Decimal( str(time.time() - start) )
avg_del += res_del
print " Attempt %s DEL: %.3fs" % ( x, float(res_del) )
print "Average ADD: %.3fs" % float( Decimal(avg_add) / Decimal("3.0") )
print "Average DEL: %.3fs" % float( Decimal(avg_del) / Decimal("3.0") )
print ""
def test_00000(self):
""" Remove possibly undeleted test users from previous test
"""
self.remove_test_users()
def test_00010(self):
self.run_bundle(10)
def test_00100(self):
self.run_bundle(100)
def test_01000(self):
self.run_bundle(1000)
def _test_10000(self):
""" This test should be enabled preferably against MS Active Directory.
It takes quite the time against Samba4 (1-2 days).
"""
self.run_bundle(10000)
class AclSearchSpeedTest(SpeedTest):
def setUp(self):
super(AclSearchSpeedTest, self).setUp()
self.ldb_admin.newuser("acltestuser", "samba123@")
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
self.ldb_user = self.get_ldb_connection("acltestuser", "samba123@")
self.user_sid = self.sd_utils.get_object_sid(self.get_user_dn("acltestuser"))
def tearDown(self):
super(AclSearchSpeedTest, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn("acltestuser"))
def run_search_bundle(self, num, _ldb):
print "\n=== Creating %s user objects ===\n" % num
self.create_bundle(num)
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
for i in range(num):
self.sd_utils.dacl_add_ace("cn=speedtestuser%d,cn=Users,%s" %
(i+1, self.base_dn), mod)
print "\n=== %s user objects created ===\n" % num
print "\n=== Test search on %s user objects ===\n" % num
avg_search = Decimal("0.0")
for x in [1, 2, 3]:
start = time.time()
res = _ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_SUBTREE)
res_search = Decimal( str(time.time() - start) )
avg_search += res_search
print " Attempt %s SEARCH: %.3fs" % ( x, float(res_search) )
print "Average Search: %.3fs" % float( Decimal(avg_search) / Decimal("3.0") )
self.remove_bundle(num)
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(target_password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp)
return ldb_target
def test_search_01000(self):
self.run_search_bundle(1000, self.ldb_admin)
def test_search2_01000(self):
# allow the user to see objects but not attributes, all attributes will be filtered out
mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
self.sd_utils.dacl_add_ace("CN=Users,%s" % self.base_dn, mod)
self.run_search_bundle(1000, self.ldb_user)
# Important unit running information
if not "://" in host:
host = "ldap://%s" % host
ldb_options = ["modules:paged_searches"]
ldb = SamDB(host, credentials=creds, session_info=system_session(), lp=lp, options=ldb_options)
runner = SubunitTestRunner()
rc = 0
if not runner.run(unittest.makeSuite(SpeedTestAddDel)).wasSuccessful():
rc = 1
if not runner.run(unittest.makeSuite(AclSearchSpeedTest)).wasSuccessful():
rc = 1
sys.exit(rc)
| gpl-3.0 | -389,703,416,252,953,300 | 34.381743 | 113 | 0.634338 | false |
jakar/odoo-bank-statement-reconcile | __unported__/account_statement_completion_label/partner.py | 18 | 1385 | # -*- coding: utf-8 -*-
###############################################################################
#
# account_statement_completion_label for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com). All Rights Reserved
# @author Benoît GUILLOT <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp.osv import fields, orm
class res_partner(orm.Model):
_inherit = "res.partner"
_columns = {
'bank_statement_label': fields.one2many('account.statement.label',
'partner_id',
'Bank Statement Label'),
}
| agpl-3.0 | -8,480,900,677,500,525,000 | 40.939394 | 79 | 0.585983 | false |
kgullikson88/IGRINS_Scripts | Search_Fast.py | 1 | 2751 | import sys
import os
import GenericSearch
import pandas
# Define regions contaminated by telluric residuals or other defects. We will not use those regions in the cross-correlation
badregions = [[0, 1510], # Blue end of H band (lots of water absorption)
#[1561, 1615], # CO2 band that is often poorly corrected (for now at least...)
[1740, 2090], #In between H and K bands (lots of water absorption)
[2348, 2500], #Red end of K band (lots of water absorption)
[1510, 1520], #Temporary...
[1688,1740],
[2313, 2350]]
if "darwin" in sys.platform:
modeldir = "/Volumes/DATADRIVE/Stellar_Models/Sorted/Stellar/NearIR/"
elif "linux" in sys.platform:
modeldir = "/media/FreeAgent_Drive/SyntheticSpectra/Sorted/Stellar/NearIR/"
else:
modeldir = raw_input("sys.platform not recognized. Please enter model directory below: ")
if not modeldir.endswith("/"):
modeldir = modeldir + "/"
def add_oh_lines(oh_file, badregions=[], minstrength=1.0, tol=0.05):
oh_data = pandas.read_csv(oh_file, header=False, sep=" ", skipinitialspace=True, names=['wave', 'strength'])
oh = oh_data[oh_data['strength'] > minstrength]
n = 1.0 + 2.735182e-4 + 131.4182 / oh['wave'] ** 2 + 2.76249e8 / oh['wave'] ** 4
oh['wave'] = oh['wave'] / (n * 10.0)
for wave in oh['wave'].values:
badregions.append([wave - tol, wave + tol])
return badregions
if __name__ == "__main__":
#Parse command line arguments:
fileList = []
interp_regions = []
extensions = True
tellurics = False
trimsize = 100
for arg in sys.argv[1:]:
if "-e" in arg:
extensions = False
if "-t" in arg:
tellurics = True #telluric lines modeled but not removed
else:
fileList.append(arg)
# Add strong oh lines to interp_regions
oh_file = "{}/School/Research/IGRINS_data/plp/master_calib/ohlines.dat".format(os.environ['HOME'])
interp_regions = add_oh_lines(oh_file, badregions=interp_regions)
GenericSearch.CompanionSearch(fileList,
extensions=extensions,
resolution=45000.0,
trimsize=trimsize,
vsini_values=[1.0, 10.0, 20.0, 30.0, 40.0],
observatory="McDonald",
vbary_correct=True,
debug=False,
badregions=badregions,
interp_regions=interp_regions,
modeldir=modeldir,
addmode="weighted")
| mit | -3,009,733,239,299,444,000 | 38.3 | 124 | 0.556525 | false |
RubenKelevra/rethinkdb | external/v8_3.30.33.16/build/gyp/PRESUBMIT.py | 496 | 3373 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
# Needs style fix.
'pylib/gyp/generator/xcode.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
| agpl-3.0 | 4,849,723,541,785,945,000 | 27.108333 | 77 | 0.628521 | false |
jarvys/django-1.7-jdb | django/middleware/common.py | 52 | 7351 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
RemovedInDjango18Warning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause | -9,207,017,432,528,226,000 | 41.005714 | 91 | 0.566726 | false |
waseem18/oh-mainline | mysite/search/migrations/0041_add_created_and_modified_timestamps_to_all_models.py | 17 | 14730 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'BugAnswer.created_date'
db.add_column('search_buganswer', 'created_date', orm['search.buganswer:created_date'])
# Adding field 'ProjectInvolvementQuestion.created_date'
db.add_column('search_projectinvolvementquestion', 'created_date', orm['search.projectinvolvementquestion:created_date'])
# Adding field 'Bug.modified_date'
db.add_column('search_bug', 'modified_date', orm['search.bug:modified_date'])
# Adding field 'HitCountCache.created_date'
db.add_column('search_hitcountcache', 'created_date', orm['search.hitcountcache:created_date'])
# Adding field 'HitCountCache.modified_date'
db.add_column('search_hitcountcache', 'modified_date', orm['search.hitcountcache:modified_date'])
# Adding field 'Answer.modified_date'
db.add_column('search_answer', 'modified_date', orm['search.answer:modified_date'])
# Adding field 'Answer.created_date'
db.add_column('search_answer', 'created_date', orm['search.answer:created_date'])
# Adding field 'Bug.created_date'
db.add_column('search_bug', 'created_date', orm['search.bug:created_date'])
# Adding field 'ProjectInvolvementQuestion.modified_date'
db.add_column('search_projectinvolvementquestion', 'modified_date', orm['search.projectinvolvementquestion:modified_date'])
# Adding field 'Project.created_date'
db.add_column('search_project', 'created_date', orm['search.project:created_date'])
# Adding field 'Project.modified_date'
db.add_column('search_project', 'modified_date', orm['search.project:modified_date'])
# Adding field 'BugAnswer.modified_date'
db.add_column('search_buganswer', 'modified_date', orm['search.buganswer:modified_date'])
# Adding field 'Bug.as_appears_in_distribution'
#db.add_column('search_bug', 'as_appears_in_distribution', orm['search.bug:as_appears_in_distribution'])
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0)))
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
def backwards(self, orm):
# Deleting field 'BugAnswer.created_date'
db.delete_column('search_buganswer', 'created_date')
# Deleting field 'ProjectInvolvementQuestion.created_date'
db.delete_column('search_projectinvolvementquestion', 'created_date')
# Deleting field 'Bug.modified_date'
db.delete_column('search_bug', 'modified_date')
# Deleting field 'HitCountCache.created_date'
db.delete_column('search_hitcountcache', 'created_date')
# Deleting field 'HitCountCache.modified_date'
db.delete_column('search_hitcountcache', 'modified_date')
# Deleting field 'Answer.modified_date'
db.delete_column('search_answer', 'modified_date')
# Deleting field 'Answer.created_date'
db.delete_column('search_answer', 'created_date')
# Deleting field 'Bug.created_date'
db.delete_column('search_bug', 'created_date')
# Deleting field 'ProjectInvolvementQuestion.modified_date'
db.delete_column('search_projectinvolvementquestion', 'modified_date')
# Deleting field 'Project.created_date'
db.delete_column('search_project', 'created_date')
# Deleting field 'Project.modified_date'
db.delete_column('search_project', 'modified_date')
# Deleting field 'BugAnswer.modified_date'
db.delete_column('search_buganswer', 'modified_date')
# Deleting field 'Bug.as_appears_in_distribution'
db.delete_column('search_bug', 'as_appears_in_distribution')
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField())
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.buganswer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']", 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bug_answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'search.hitcountcache': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True', 'null': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
| agpl-3.0 | 8,717,758,586,210,936,000 | 64.466667 | 160 | 0.5852 | false |
fbossy/SickRage | lib/guessit/plugins/transformers.py | 33 | 9580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from pkg_resources import EntryPoint
from guessit.options import reload as reload_options
from stevedore import ExtensionManager
from stevedore.extension import Extension
log = getLogger(__name__)
class Transformer(object): # pragma: no cover
def __init__(self, priority=0):
self.priority = priority
self.log = getLogger(self.name)
@property
def name(self):
return self.__class__.__name__
def supported_properties(self):
return {}
def second_pass_options(self, mtree, options=None):
return None
def should_process(self, mtree, options=None):
return True
def process(self, mtree, options=None):
pass
def post_process(self, mtree, options=None):
pass
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
pass
def rate_quality(self, guess, *props):
return 0
class CustomTransformerExtensionManager(ExtensionManager):
def __init__(self, namespace='guessit.transformer', invoke_on_load=True,
invoke_args=(), invoke_kwds={}, propagate_map_exceptions=True, on_load_failure_callback=None,
verify_requirements=False):
super(CustomTransformerExtensionManager, self).__init__(namespace=namespace,
invoke_on_load=invoke_on_load,
invoke_args=invoke_args,
invoke_kwds=invoke_kwds,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback,
verify_requirements=verify_requirements)
@staticmethod
def order_extensions(extensions):
"""Order the loaded transformers
It should follow those rules
- website before language (eg: tvu.org.ru vs russian)
- language before episodes_rexps
- properties before language (eg: he-aac vs hebrew)
- release_group before properties (eg: XviD-?? vs xvid)
"""
extensions.sort(key=lambda ext: -ext.obj.priority)
return extensions
@staticmethod
def _load_one_plugin(ep, invoke_on_load, invoke_args, invoke_kwds, verify_requirements=True):
if not ep.dist:
# `require` argument of ep.load() is deprecated in newer versions of setuptools
if hasattr(ep, 'resolve'):
plugin = ep.resolve()
elif hasattr(ep, '_load'):
plugin = ep._load()
else:
plugin = ep.load(require=False)
else:
plugin = ep.load()
if invoke_on_load:
obj = plugin(*invoke_args, **invoke_kwds)
else:
obj = None
return Extension(ep.name, ep, plugin, obj)
def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds, verify_requirements):
return self.order_extensions(super(CustomTransformerExtensionManager, self)._load_plugins(invoke_on_load, invoke_args, invoke_kwds, verify_requirements))
def objects(self):
return self.map(self._get_obj)
@staticmethod
def _get_obj(ext):
return ext.obj
def object(self, name):
try:
return self[name].obj
except KeyError:
return None
def register_module(self, name=None, module_name=None, attrs=(), entry_point=None):
if entry_point:
ep = EntryPoint.parse(entry_point)
else:
ep = EntryPoint(name, module_name, attrs)
loaded = self._load_one_plugin(ep, invoke_on_load=True, invoke_args=(), invoke_kwds={})
if loaded:
self.extensions.append(loaded)
self.extensions = self.order_extensions(self.extensions)
self._extensions_by_name = None
class DefaultTransformerExtensionManager(CustomTransformerExtensionManager):
@property
def _internal_entry_points(self):
return ['split_path_components = guessit.transfo.split_path_components:SplitPathComponents',
'guess_filetype = guessit.transfo.guess_filetype:GuessFiletype',
'split_explicit_groups = guessit.transfo.split_explicit_groups:SplitExplicitGroups',
'guess_date = guessit.transfo.guess_date:GuessDate',
'guess_website = guessit.transfo.guess_website:GuessWebsite',
'guess_release_group = guessit.transfo.guess_release_group:GuessReleaseGroup',
'guess_properties = guessit.transfo.guess_properties:GuessProperties',
'guess_language = guessit.transfo.guess_language:GuessLanguage',
'guess_video_rexps = guessit.transfo.guess_video_rexps:GuessVideoRexps',
'guess_episodes_rexps = guessit.transfo.guess_episodes_rexps:GuessEpisodesRexps',
'guess_weak_episodes_rexps = guessit.transfo.guess_weak_episodes_rexps:GuessWeakEpisodesRexps',
'guess_bonus_features = guessit.transfo.guess_bonus_features:GuessBonusFeatures',
'guess_year = guessit.transfo.guess_year:GuessYear',
'guess_country = guessit.transfo.guess_country:GuessCountry',
'guess_idnumber = guessit.transfo.guess_idnumber:GuessIdnumber',
'split_on_dash = guessit.transfo.split_on_dash:SplitOnDash',
'guess_episode_info_from_position = guessit.transfo.guess_episode_info_from_position:GuessEpisodeInfoFromPosition',
'guess_movie_title_from_position = guessit.transfo.guess_movie_title_from_position:GuessMovieTitleFromPosition',
'guess_episode_details = guessit.transfo.guess_episode_details:GuessEpisodeDetails',
'expected_series = guessit.transfo.expected_series:ExpectedSeries',
'expected_title = guessit.transfo.expected_title:ExpectedTitle',]
def _find_entry_points(self, namespace):
entry_points = {}
# Internal entry points
if namespace == self.namespace:
for internal_entry_point_str in self._internal_entry_points:
internal_entry_point = EntryPoint.parse(internal_entry_point_str)
entry_points[internal_entry_point.name] = internal_entry_point
# Package entry points
setuptools_entrypoints = super(DefaultTransformerExtensionManager, self)._find_entry_points(namespace)
for setuptools_entrypoint in setuptools_entrypoints:
entry_points[setuptools_entrypoint.name] = setuptools_entrypoint
return list(entry_points.values())
_extensions = None
def all_transformers():
return _extensions.objects()
def get_transformer(name):
return _extensions.object(name)
def add_transformer(name, module_name, class_name):
"""
Add a transformer
:param name: the name of the transformer. ie: 'guess_regexp_id'
:param name: the module name. ie: 'flexget.utils.parsers.transformers.guess_regexp_id'
:param class_name: the class name. ie: 'GuessRegexpId'
"""
_extensions.register_module(name, module_name, (class_name,))
def add_transformer(entry_point):
"""
Add a transformer
:param entry_point: entry point spec format. ie: 'guess_regexp_id = flexget.utils.parsers.transformers.guess_regexp_id:GuessRegexpId'
"""
_extensions.register_module(entry_point = entry_point)
def reload(custom=False):
"""
Reload extension manager with default or custom one.
:param custom: if True, custom manager will be used, else default one.
Default manager will load default extensions from guessit and setuptools packaging extensions
Custom manager will not load default extensions from guessit, using only setuptools packaging extensions.
:type custom: boolean
"""
global _extensions
if custom:
_extensions = CustomTransformerExtensionManager()
else:
_extensions = DefaultTransformerExtensionManager()
reload_options(all_transformers())
reload()
| gpl-3.0 | -433,597,681,300,607,170 | 42.153153 | 161 | 0.621399 | false |
jedi22/osquery | tools/tests/test_osqueryi.py | 5 | 9121 | #!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the Apache 2.0 license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pyexpect.replwrap will not work with unicode_literals
# from __future__ import unicode_literals
import os
import random
import sys
import unittest
import utils
# osquery-specific testing utils
import test_base
SHELL_TIMEOUT = 10
EXIT_CATASTROPHIC = 78
class OsqueryiTest(unittest.TestCase):
def setUp(self):
self.binary = test_base.getLatestOsqueryBinary('osqueryi')
self.osqueryi = test_base.OsqueryWrapper(command=self.binary)
self.dbpath = "%s%s" % (
test_base.CONFIG["options"]["database_path"],
str(random.randint(1000, 9999)))
@unittest.skipIf(os.name == "nt", "stderr tests not supported on Windows.")
def test_error(self):
'''Test that we throw an error on bad query'''
self.osqueryi.run_command(' ')
self.assertRaises(test_base.OsqueryException,
self.osqueryi.run_query, 'foo')
def test_config_check_success(self):
'''Test that a 0-config passes'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--config_path=%s/test.config" % test_base.SCRIPT_DIR,
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_config_dump(self):
'''Test that config raw output is dumped when requested'''
config = os.path.join(test_base.SCRIPT_DIR, "test_noninline_packs.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_dump",
"--config_path=%s" % config,
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
content = ""
with open(config, 'r') as fh:
content = fh.read()
actual = proc.stdout
if os.name == "nt":
actual = actual.replace('\r', '')
self.assertEqual(actual, '{"%s": %s}\n' % (config, content))
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_config_check_failure_invalid_path(self):
'''Test that a missing config fails'''
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--disable_extensions",
"--verbose",
"--config_path=/this/path/does/not/exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 1)
def test_config_check_failure_valid_path(self):
# Now with a valid path, but invalid content.
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--extensions_autoload=",
"--verbose",
"--database_path=%s" % (self.dbpath),
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "test.badconfig")
],
SHELL_TIMEOUT)
self.assertEqual(proc.proc.poll(), 1)
self.assertNotEqual(proc.stderr, "")
def test_config_check_failure_missing_plugin(self):
# Finally with a missing config plugin
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--database_path=%s" % (self.dbpath),
"--extensions_autoload=",
"--verbose",
"--config_plugin=does_not_exist"
],
SHELL_TIMEOUT)
self.assertNotEqual(proc.stderr, "")
self.assertNotEqual(proc.proc.poll(), 0)
# Also do not accept a SIGSEG
self.assertEqual(proc.proc.poll(), EXIT_CATASTROPHIC)
def test_config_check_example(self):
'''Test that the example config passes'''
example_path = os.path.join("deployment", "osquery.example.conf")
proc = test_base.TimeoutRunner([
self.binary,
"--config_check",
"--config_path=%s" % os.path.join(test_base.SCRIPT_DIR, "..", example_path),
"--extensions_autoload=",
"--verbose",
],
SHELL_TIMEOUT)
self.assertEqual(proc.stdout, "")
print (proc.stdout)
print (proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
def test_meta_commands(self):
'''Test the supported meta shell/help/info commands'''
commands = [
'.help',
'.all',
'.all osquery_info',
'.all this_table_does_not_exist',
'.echo',
'.echo on',
'.echo off',
'.header',
'.header off',
'.header on',
'.mode',
'.mode csv',
'.mode column',
'.mode line',
'.mode list',
'.mode pretty',
'.mode this_mode_does_not_exists',
'.nullvalue',
'.nullvalue ""',
'.print',
'.print hello',
'.schema osquery_info',
'.schema this_table_does_not_exist',
'.schema',
'.separator',
'.separator ,',
'.show',
'.tables osquery',
'.tables osquery_info',
'.tables this_table_does_not_exist',
'.tables',
'.trace',
'.width',
'.width 80',
'.timer',
'.timer on',
'.timer off'
]
for command in commands:
result = self.osqueryi.run_command(command)
pass
def test_json_output(self):
'''Test that the output of --json is valid json'''
proc = test_base.TimeoutRunner([
self.binary,
"select 0",
"--disable_extensions",
"--json",
],
SHELL_TIMEOUT
)
if os.name == "nt":
self.assertEqual(proc.stdout, "[\r\n {\"0\":\"0\"}\r\n]\r\n")
else:
self.assertEqual(proc.stdout, "[\n {\"0\":\"0\"}\n]\n")
print(proc.stdout)
print(proc.stderr)
self.assertEqual(proc.proc.poll(), 0)
@test_base.flaky
def test_time(self):
'''Demonstrating basic usage of OsqueryWrapper with the time table'''
self.osqueryi.run_command(' ') # flush error output
result = self.osqueryi.run_query(
'SELECT hour, minutes, seconds FROM time;')
self.assertEqual(len(result), 1)
row = result[0]
self.assertTrue(0 <= int(row['hour']) <= 24)
self.assertTrue(0 <= int(row['minutes']) <= 60)
self.assertTrue(0 <= int(row['seconds']) <= 60)
# TODO: Running foreign table tests as non-priv user fails
@test_base.flaky
@unittest.skipIf(os.name == "nt", "foreign table tests not supported on Windows.")
def test_foreign_tables(self):
'''Requires the --enable_foreign flag to add at least one table.'''
self.osqueryi.run_command(' ')
query = 'SELECT count(1) c FROM osquery_registry;'
result = self.osqueryi.run_query(query)
before = int(result[0]['c'])
osqueryi2 = test_base.OsqueryWrapper(self.binary,
args={"enable_foreign": True})
osqueryi2.run_command(' ')
# This execution fails if the user is not Administrator on Windows
result = osqueryi2.run_query(query)
after = int(result[0]['c'])
self.assertGreater(after, before)
@test_base.flaky
def test_time_using_all(self):
self.osqueryi.run_command(' ')
result = self.osqueryi.run_command('.all time')
self.assertNotEqual(result.rstrip(), "Error querying table: time")
@test_base.flaky
def test_config_bad_json(self):
self.osqueryi = test_base.OsqueryWrapper(self.binary,
args={"config_path": "/"})
result = self.osqueryi.run_query('SELECT * FROM time;')
self.assertEqual(len(result), 1)
@test_base.flaky
def test_atc(self):
local_osquery_instance = test_base.OsqueryWrapper(self.binary,
args={"config_path": "test.config"})
result = local_osquery_instance.run_query('SELECT a_number FROM test_atc')
self.assertEqual(result, [{'a_number':'314159'}])
if __name__ == '__main__':
test_base.Tester().run()
| bsd-3-clause | 663,276,022,926,242,600 | 33.94636 | 92 | 0.544129 | false |
sonuyos/couchpotato | libs/html5lib/treebuilders/etree.py | 721 | 12609 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-3.0 | -1,413,215,153,787,059,500 | 36.41543 | 85 | 0.509557 | false |
bq/bitbloq-offline | app/res/web2board/linux/res/Scons/sconsFiles/SCons/Tool/rpmutils.py | 6 | 17072 | """SCons.Tool.rpmutils.py
RPM specific helper routines for general usage in the test framework
and SCons core modules.
Since we check for the RPM package target name in several places,
we have to know which machine/system name RPM will use for the current
hardware setup. The following dictionaries and functions try to
mimic the exact naming rules of the RPM source code.
They were directly derived from the file "rpmrc.in" of the version
rpm-4.9.1.3. For updating to a more recent version of RPM, this Python
script can be used standalone. The usage() function below shows the
exact syntax.
"""
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/rpmutils.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import platform
import subprocess
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
""" Return the canonicalized machine name. """
if use_rpm_default:
try:
# This should be the most reliable way to get the default arch
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
except Exception as e:
# Something went wrong, try again by looking up platform.machine()
return defaultMachine(False)
else:
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
oldpy = open(pyfile,"r").readlines()
# Read current rpmrc.in file
rpm = open(rpmrc,"r").readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if not data.has_key(tokens[0]):
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
out = open(pyfile,"w")
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.iteritems():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
out.close()
except:
pass
def usage():
print "rpmutils.py rpmrc.in rpmutils.py"
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| gpl-3.0 | -5,800,444,543,613,495,000 | 30.440147 | 107 | 0.519154 | false |
Etxea/gestion_eide_web | grupos/migrations/0003_auto__add_field_alumno_activo.py | 1 | 2078 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Alumno.activo'
db.add_column(u'alumnos_alumno', 'activo',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Alumno.activo'
db.delete_column(u'alumnos_alumno', 'activo')
models = {
u'alumnos.alumno': {
'Meta': {'object_name': 'Alumno'},
'activo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'apellido1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'apellido2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cp': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5'}),
'cuenta_bancaria': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'dni': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localidad': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'nombre': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '25'}),
'telefono1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9'}),
'telefono2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '9', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['alumnos'] | mit | 7,626,090,300,032,314,000 | 50.975 | 140 | 0.547642 | false |
Dhivyap/ansible | lib/ansible/module_utils/network/eos/providers/module.py | 20 | 2106 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.eos.providers import providers
from ansible.module_utils._text import to_text
class NetworkModule(AnsibleModule):
fail_on_missing_provider = True
def __init__(self, connection=None, *args, **kwargs):
super(NetworkModule, self).__init__(*args, **kwargs)
if connection is None:
connection = Connection(self._socket_path)
self.connection = connection
@property
def provider(self):
if not hasattr(self, '_provider'):
capabilities = self.from_json(self.connection.get_capabilities())
network_os = capabilities['device_info']['network_os']
network_api = capabilities['network_api']
if network_api == 'cliconf':
connection_type = 'network_cli'
cls = providers.get(network_os, self._name.split('.')[-1], connection_type)
if not cls:
msg = 'unable to find suitable provider for network os %s' % network_os
if self.fail_on_missing_provider:
self.fail_json(msg=msg)
else:
self.warn(msg)
obj = cls(self.params, self.connection, self.check_mode)
setattr(self, '_provider', obj)
return getattr(self, '_provider')
def get_facts(self, subset=None):
try:
self.provider.get_facts(subset)
except Exception as exc:
self.fail_json(msg=to_text(exc))
def edit_config(self, config_filter=None):
current_config = self.connection.get_config(flags=config_filter)
try:
commands = self.provider.edit_config(current_config)
changed = bool(commands)
return {'commands': commands, 'changed': changed}
except Exception as exc:
self.fail_json(msg=to_text(exc))
| gpl-3.0 | -4,444,893,630,275,901,000 | 32.967742 | 92 | 0.612061 | false |
ArneBachmann/configr | configr/test.py | 1 | 3339 | import doctest
import json
import logging
import os
import unittest
import sys
sys.path.insert(0, "..")
import configr
class Tests(unittest.TestCase):
''' Test suite. '''
def tests_metadata(_):
_.assertTrue(hasattr(configr, "version"))
_.assertTrue(hasattr(configr.version, "__version__"))
_.assertTrue(hasattr(configr.version, "__version_info__"))
def test_details(_):
try:
for file in (f for f in os.listdir() if f.endswith(configr.EXTENSION + ".bak")):
try: os.unlink(file)
except: pass
except: pass
c = configr.Configr("myapp", data = {"d": 2}, defaults = {"e": 1})
_.assertEqual("myapp", c.__name)
_.assertEqual("myapp", c["__name"])
try: c["c"]; raise Exception("Should have crashed") # not existing data via dictionary access case
except: pass
try: c.c; raise Exception("Should have crashed") # not existing data via attribute access case
except: pass
_.assertEqual(2, c.d) # pre-defined data case
_.assertEqual(1, c["e"]) # default case
# Create some contents
c.a = "a"
c["b"] = "b"
_.assertEqual("a", c["a"])
_.assertEqual("b", c.b)
# Save to file
value = c.saveSettings(location = os.getcwd(), keys = ["a", "b"], clientCodeLocation = __file__) # CWD should be "tests" folder
_.assertIsNotNone(value.path)
_.assertIsNone(value.error)
_.assertEqual(value, c.__savedTo)
_.assertEqual(os.getcwd(), os.path.dirname(c.__savedTo.path))
_.assertEqual("a", c["a"])
_.assertEqual("b", c.b)
name = c.__savedTo.path
with open(name, "r") as fd: contents = json.loads(fd.read())
_.assertEqual({"a": "a", "b": "b"}, contents)
# Now load and see if all is correct
c = configr.Configr("myapp")
value = c.loadSettings(location = os.getcwd(), data = {"c": 33}, clientCodeLocation = __file__)
_.assertEqual(name, c.__loadedFrom.path)
_.assertIsNotNone(value.path)
_.assertIsNone(value.error)
_.assertEqual(value, c.__loadedFrom)
_.assertEqual(c.a, "a")
_.assertEqual(c["b"], "b")
_.assertEqual(c.c, 33)
os.unlink(value.path)
value = c.loadSettings(location = "bla", clientCodeLocation = __file__) # provoke error
_.assertIsNone(value.path)
_.assertIsNotNone(value.error)
# Now test removal
del c["b"]
del c.a
_.assertEqual(1, len(c.keys()))
_.assertIn("c", c.keys())
# Now stringify
_.assertEqual("Configr(c: 33)", str(c))
_.assertEqual("Configr(c: 33)", repr(c))
# Testing map functions: already done in doctest
# TODO test ignores option for saveSettings
def testNested(_):
c = configr.Configr(data = {"a": "a"}, defaults = configr.Configr(data = {"b": "b"}, defaults = configr.Configr(data = {"c": "c"})))
_.assertEqual("a", c.a)
_.assertEqual("b", c["b"])
_.assertEqual("c", c.c)
_.assertTrue("a" in c)
_.assertTrue("b" in c)
_.assertTrue("c" in c)
_.assertFalse("d" in c)
def load_tests(loader, tests, ignore):
''' The function name suffix "_tests" tells the unittest module about a test case. '''
tests.addTests(doctest.DocTestSuite(configr))
return tests
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG, stream = sys.stderr, format = "%(asctime)-25s %(levelname)-8s %(name)-12s | %(message)s")
print(unittest.main())
| mit | 5,298,692,201,717,053,000 | 34.147368 | 136 | 0.616652 | false |
fnugrahendi/petuk.corp | installer/pysource/source/installer.py | 1 | 6796 | import os,sys
from PyQt4 import QtCore
from PyQt4 import QtGui
import functools
import itertools
import re
from subprocess import Popen
import pythoncom #-- shortcut
from win32com.shell import shell, shellcon
from installer_ui import Ui_MainWindow
class MainGUI(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(MainGUI,self).__init__(parent)
self.setupUi(self)
self.show()
#-- path
self.Path = str(__file__).replace("installer.py","").replace("\\","/")
print self.Path
self.BasePath = self.Path+"../"
try:open(self.BasePath+"archive/eula.txt","r").close()
except Exception,e:
print str(e)
self.BasePath = self.Path
print ("base path is now",self.BasePath)
#-- icon
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(self.BasePath+"archive/Garvin.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
#-- deteksi 64 bit ataukah 32 bit
self.arc = 32
if ("PROGRAMFILES(X86)" in os.environ):#-- bila 64bit
self.arc = 64
self.PageL = ["INSTALL FOLDER","INSTALL BIN","QUIT"]
dataf = open(self.BasePath+"archive/eula.txt","r")
data = dataf.read()
dataf.close()
self.te_Lisensi.setText(data)
self.te_Lisensi.hide()
self.tb_Lisensi.clicked.connect(self.TampilLisensi)
self.InstallDir()
def TampilLisensi(self):
self.te_Lisensi.show()
def Goto(self,name):
self.stackedWidget.setCurrentIndex(self.PageL.index(name.upper()))
def InstallDir(self):
self.Goto("Install Folder")
self.GarvinDisconnect(self.tb_Browse.clicked)
self.GarvinDisconnect(self.tb_Install.clicked)
self.GarvinDisconnect(self.tb_Quit.clicked)
self.tb_Browse.clicked.connect(self.Browse)
self.tb_Install.clicked.connect(self.InstallBin)
self.tb_Quit.clicked.connect(self.Quit)
def Browse(self):
dialog = QtGui.QFileDialog(self)
dialog.setFileMode(QtGui.QFileDialog.Directory)
namafolder = str(dialog.getExistingDirectory(self, ("Pilih folder instalasi"),"",QtGui.QFileDialog.ShowDirsOnly| QtGui.QFileDialog.DontResolveSymlinks))
if not ("garvin" in namafolder.lower()):
if namafolder[-1]=="\\":
namafolder=namafolder[:-1]
namafolder=namafolder+"\Garvin"
self.le_InstallDir.setText(namafolder)
def InstallBin_Act(self):
self.aatime.stop()
archiveBin = self.BasePath+"archive/bin.grvz"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(installpath): os.makedirs(installpath)
os.system(self.BasePath+"7z.exe -y x "+archiveBin+" -o"+installpath+" -pnyungsep")
#~ self.tb_InstallBin_Next.show()
self.InstallMysql()
os.makedirs(installpath+"\data")
def InstallBin(self):
if str(self.le_InstallDir.text())[-1]=='\\':
self.le_InstallDir.setText(str(self.le_InstallDir.text())[:-1]) #-- strip \ dibelakang
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Menginstall Garvin Accounting...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallBin_Act)
self.aatime.start(100)
def InstallMysql_Act(self):
self.aatime.stop()
archiveBin = self.BasePath+"archive/mysql32.grvz"
if self.arc==64:
archiveBin = self.BasePath+"archive/mysql64.grvz"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(installpath): os.makedirs(installpath)
os.system(self.BasePath+"7z.exe -y x "+archiveBin+" -o"+installpath+" -pnyungsep")
self.InstallConfig()
def InstallMysql(self):
self.Goto("Install Bin")
if self.arc==32:self.lb_InstallBin_Judul.setText("Menginstall MySQL database server (32 bit)...")
else:self.lb_InstallBin_Judul.setText("Menginstall MySQL database server (64 bit)...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallMysql_Act)
self.aatime.start(100)
def InstallConfig_Act(self):
self.aatime.stop()
print "jalankan", str(self.le_InstallDir.text())+"\\mysql\\bin\\mysqld --port=44559"
self.childproses = Popen(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysqld --port=44559")
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallConfig_MysqlUser)
self.aatime.start(10000)
def InstallConfig_MysqlUser(self):
self.aatime.stop()
querytambahuser = """ CREATE USER 'gd_user_akunting'@'localhost' IDENTIFIED BY 'nyungsep';
GRANT ALL PRIVILEGES ON *.* TO 'gd_user_akunting'@'localhost' IDENTIFIED BY 'nyungsep' WITH GRANT OPTION MAX_QUERIES_PER_HOUR 0 MAX_CONNECTIONS_PER_HOUR 0 MAX_UPDATES_PER_HOUR 0 MAX_USER_CONNECTIONS 0;
"""
f = open("querytambahuser.md","w")
f.write(querytambahuser)
f.close()
print "jalankan",(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysql --port=44559 -u root test < querytambahuser.md")
os.system(str(self.le_InstallDir.text())+"\\mysql\\bin\\mysql --port=44559 -u root test < querytambahuser.md")
self.Install_StartMenu()
def InstallConfig(self):
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Melakukan configurasi program...")
self.tb_InstallBin_Next.hide()
self.aatime = QtCore.QTimer(self)
self.aatime.timeout.connect(self.InstallConfig_Act)
self.aatime.start(100)
def Install_StartMenu(self):
#--- install start menu
self.Goto("Install Bin")
self.lb_InstallBin_Judul.setText("Memasang start menu...")
self.tb_InstallBin_Next.hide()
startmenudir = os.environ["PROGRAMDATA"]+"\\Microsoft\\Windows\\Start Menu\\Garvin Accounting"
installpath = str(self.le_InstallDir.text())
if not os.path.exists(startmenudir): os.makedirs(startmenudir)
startmenulink = startmenudir + "\\Garvin.lnk"
shortcut = pythoncom.CoCreateInstance (shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
shortcut.SetPath(installpath+"\\bin\\Garvin.exe")
shortcut.SetDescription ("Garvin Accounting")
shortcut.SetIconLocation(installpath+"\\bin\\Garvin.exe",0)
shortcut.SetWorkingDirectory(installpath+"\\bin\\")
persist_file = shortcut.QueryInterface (pythoncom.IID_IPersistFile)
persist_file.Save(startmenulink,0)
self.Selesai()
def Selesai(self):
#--- todo tambah source file info
self.lb_InstallBin_Judul.setText("Instalasi sukses")
self.tb_InstallBin_Next.show()
self.tb_InstallBin_Next.setText("Finish")
self.tb_InstallBin_Next.clicked.connect(self.Quit)
def Quit(self):
#-- bunuh subproses mysqld dulu
try:self.childproses.kill()
except:pass
sys.exit (0)
def GarvinDisconnect(self,stuff):
"nyimpel2ke disconnect signal, cara manggil koyo self.GarvinDisconnect(self.tbl_BukuBesar_DaftarTransaksiJurnal_Tambah_List.cellDoubleClicked)"
try:
stuff.disconnect()
return True
except:
return False
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
w = MainGUI()
sys.exit(app.exec_())
| gpl-2.0 | 7,519,898,417,814,834,000 | 33.323232 | 209 | 0.719982 | false |
cristianquaglio/odoo | addons/hr_attendance/report/attendance_errors.py | 377 | 3669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class attendance_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(attendance_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'lst': self._lst,
'total': self._lst_total,
'get_employees':self._get_employees,
})
def _get_employees(self, emp_ids):
emp_obj_list = self.pool.get('hr.employee').browse(self.cr, self.uid, emp_ids)
return emp_obj_list
def _lst(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
temp = r['delay'].seconds
r['delay'] = str(r['delay']).split('.')[0]
if abs(temp) < max*60:
r['delay2'] = r['delay']
else:
r['delay2'] = '/'
return res
def _lst_total(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
if not res:
return ('/','/')
total2 = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
total = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
total += r['delay']
if abs(r['delay'].seconds) < max*60:
total2 += r['delay']
result_dict = {
'total': total and str(total).split('.')[0],
'total2': total2 and str(total2).split('.')[0]
}
return [result_dict]
class report_hr_attendanceerrors(osv.AbstractModel):
_name = 'report.hr_attendance.report_attendanceerrors'
_inherit = 'report.abstract_report'
_template = 'hr_attendance.report_attendanceerrors'
_wrapped_report_class = attendance_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 | -2,122,865,311,469,535,700 | 42.164706 | 294 | 0.576724 | false |
enigmampc/catalyst | catalyst/support/issue_227.py | 1 | 1521 | import pytz
from datetime import datetime
from catalyst.api import symbol
from catalyst.utils.run_algo import run_algorithm
coin = 'btc'
quote_currency = 'usd'
n_candles = 5
def initialize(context):
context.symbol = symbol('%s_%s' % (coin, quote_currency))
def handle_data_polo_partial_candles(context, data):
history = data.history(symbol('btc_usdt'), ['volume'],
bar_count=10,
frequency='4H')
print('\nnow: %s\n%s' % (data.current_dt, history))
if not hasattr(context, 'i'):
context.i = 0
context.i += 1
if context.i > 5:
raise Exception('stop')
live = False
if live:
run_algorithm(initialize=lambda ctx: True,
handle_data=handle_data_polo_partial_candles,
exchange_name='poloniex',
quote_currency='usdt',
algo_namespace='ns',
live=True,
data_frequency='minute',
capital_base=3000)
else:
run_algorithm(initialize=lambda ctx: True,
handle_data=handle_data_polo_partial_candles,
exchange_name='poloniex',
quote_currency='usdt',
algo_namespace='ns',
live=False,
data_frequency='minute',
capital_base=3000,
start=datetime(2018, 2, 2, 0, 0, 0, 0, pytz.utc),
end=datetime(2018, 2, 20, 0, 0, 0, 0, pytz.utc)
)
| apache-2.0 | 7,911,985,052,871,495,000 | 30.040816 | 67 | 0.530572 | false |
atmark-techno/atmark-dist | user/python/Lib/lib-tk/turtle.py | 4 | 10917 | # LogoMation-like turtle graphics
from math import * # Also for export
import Tkinter
class Error(Exception):
pass
class RawPen:
def __init__(self, canvas):
self._canvas = canvas
self._items = []
self._tracing = 1
self.degrees()
self.reset()
def degrees(self, fullcircle=360.0):
self._fullcircle = fullcircle
self._invradian = pi / (fullcircle * 0.5)
def radians(self):
self.degrees(2.0*pi)
def reset(self):
canvas = self._canvas
width = canvas.winfo_width()
height = canvas.winfo_height()
if width <= 1:
width = canvas['width']
if height <= 1:
height = canvas['height']
self._origin = float(width)/2.0, float(height)/2.0
self._position = self._origin
self._angle = 0.0
self._drawing = 1
self._width = 1
self._color = "black"
self._filling = 0
self._path = []
self._tofill = []
self.clear()
canvas._root().tkraise()
def clear(self):
self.fill(0)
canvas = self._canvas
items = self._items
self._items = []
for item in items:
canvas.delete(item)
def tracer(self, flag):
self._tracing = flag
def forward(self, distance):
x0, y0 = start = self._position
x1 = x0 + distance * cos(self._angle*self._invradian)
y1 = y0 - distance * sin(self._angle*self._invradian)
self._goto(x1, y1)
def backward(self, distance):
self.forward(-distance)
def left(self, angle):
self._angle = (self._angle + angle) % self._fullcircle
def right(self, angle):
self.left(-angle)
def up(self):
self._drawing = 0
def down(self):
self._drawing = 1
def width(self, width):
self._width = float(width)
def color(self, *args):
if not args:
raise Error, "no color arguments"
if len(args) == 1:
color = args[0]
if type(color) == type(""):
# Test the color first
try:
id = self._canvas.create_line(0, 0, 0, 0, fill=color)
except Tkinter.TclError:
raise Error, "bad color string: %s" % `color`
self._color = color
return
try:
r, g, b = color
except:
raise Error, "bad color sequence: %s" % `color`
else:
try:
r, g, b = args
except:
raise Error, "bad color arguments: %s" % `args`
assert 0 <= r <= 1
assert 0 <= g <= 1
assert 0 <= b <= 1
x = 255.0
y = 0.5
self._color = "#%02x%02x%02x" % (int(r*x+y), int(g*x+y), int(b*x+y))
def write(self, arg, move=0):
x, y = start = self._position
x = x-1 # correction -- calibrated for Windows
item = self._canvas.create_text(x, y,
text=str(arg), anchor="sw",
fill=self._color)
self._items.append(item)
if move:
x0, y0, x1, y1 = self._canvas.bbox(item)
self._goto(x1, y1)
def fill(self, flag):
if self._filling:
path = tuple(self._path)
smooth = self._filling < 0
if len(path) > 2:
item = self._canvas._create('polygon', path,
{'fill': self._color,
'smooth': smooth})
self._items.append(item)
self._canvas.lower(item)
if self._tofill:
for item in self._tofill:
self._canvas.itemconfigure(item, fill=self._color)
self._items.append(item)
self._path = []
self._tofill = []
self._filling = flag
if flag:
self._path.append(self._position)
def circle(self, radius, extent=None):
if extent is None:
extent = self._fullcircle
x0, y0 = self._position
xc = x0 - radius * sin(self._angle * self._invradian)
yc = y0 - radius * cos(self._angle * self._invradian)
if radius >= 0.0:
start = self._angle - 90.0
else:
start = self._angle + 90.0
extent = -extent
if self._filling:
if abs(extent) >= self._fullcircle:
item = self._canvas.create_oval(xc-radius, yc-radius,
xc+radius, yc+radius,
width=self._width,
outline="")
self._tofill.append(item)
item = self._canvas.create_arc(xc-radius, yc-radius,
xc+radius, yc+radius,
style="chord",
start=start,
extent=extent,
width=self._width,
outline="")
self._tofill.append(item)
if self._drawing:
if abs(extent) >= self._fullcircle:
item = self._canvas.create_oval(xc-radius, yc-radius,
xc+radius, yc+radius,
width=self._width,
outline=self._color)
self._items.append(item)
item = self._canvas.create_arc(xc-radius, yc-radius,
xc+radius, yc+radius,
style="arc",
start=start,
extent=extent,
width=self._width,
outline=self._color)
self._items.append(item)
angle = start + extent
x1 = xc + abs(radius) * cos(angle * self._invradian)
y1 = yc - abs(radius) * sin(angle * self._invradian)
self._angle = (self._angle + extent) % self._fullcircle
self._position = x1, y1
if self._filling:
self._path.append(self._position)
def goto(self, *args):
if len(args) == 1:
try:
x, y = args[0]
except:
raise Error, "bad point argument: %s" % `args[0]`
else:
try:
x, y = args
except:
raise Error, "bad coordinates: %s" % `args[0]`
x0, y0 = self._origin
self._goto(x0+x, y0-y)
def _goto(self, x1, y1):
x0, y0 = start = self._position
self._position = map(float, (x1, y1))
if self._filling:
self._path.append(self._position)
if self._drawing:
if self._tracing:
dx = float(x1 - x0)
dy = float(y1 - y0)
distance = hypot(dx, dy)
nhops = int(distance)
item = self._canvas.create_line(x0, y0, x0, y0,
width=self._width,
arrow="last",
capstyle="round",
fill=self._color)
try:
for i in range(1, 1+nhops):
x, y = x0 + dx*i/nhops, y0 + dy*i/nhops
self._canvas.coords(item, x0, y0, x, y)
self._canvas.update()
self._canvas.after(10)
self._canvas.itemconfigure(item, arrow="none")
except Tkinter.TclError:
# Probably the window was closed!
return
else:
item = self._canvas.create_line(x0, y0, x1, y1,
width=self._width,
capstyle="round",
fill=self._color)
self._items.append(item)
_root = None
_canvas = None
_pen = None
class Pen(RawPen):
def __init__(self):
global _root, _canvas
if _root is None:
_root = Tkinter.Tk()
_root.wm_protocol("WM_DELETE_WINDOW", self._destroy)
if _canvas is None:
# XXX Should have scroll bars
_canvas = Tkinter.Canvas(_root, background="white")
_canvas.pack(expand=1, fill="both")
RawPen.__init__(self, _canvas)
def _destroy(self):
global _root, _canvas, _pen
root = self._canvas._root()
if root is _root:
_pen = None
_root = None
_canvas = None
root.destroy()
def _getpen():
global _pen
pen = _pen
if not pen:
_pen = pen = Pen()
return pen
def degrees(): _getpen().degrees()
def radians(): _getpen().radians()
def reset(): _getpen().reset()
def clear(): _getpen().clear()
def tracer(flag): _getpen().tracer(flag)
def forward(distance): _getpen().forward(distance)
def backward(distance): _getpen().backward(distance)
def left(angle): _getpen().left(angle)
def right(angle): _getpen().right(angle)
def up(): _getpen().up()
def down(): _getpen().down()
def width(width): _getpen().width(width)
def color(*args): apply(_getpen().color, args)
def write(arg, move=0): _getpen().write(arg, move)
def fill(flag): _getpen().fill(flag)
def circle(radius, extent=None): _getpen().circle(radius, extent)
def goto(*args): apply(_getpen().goto, args)
def demo():
reset()
tracer(1)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
fill(1)
for j in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
fill(0)
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(0)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
fill(1)
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
fill(0)
# more text
write("end")
if __name__ == '__main__':
_root.mainloop()
if __name__ == '__main__':
demo()
| gpl-2.0 | 438,159,064,982,537,540 | 30.827988 | 76 | 0.446276 | false |
jbobron/node-workshop | challenge6/start/node_modules/browserify/node_modules/syntax-error/node_modules/esprima-six/tools/generate-unicode-regex.py | 341 | 5096 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <[email protected]>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
| mit | 646,803,075,168,037,400 | 29.073171 | 169 | 0.560636 | false |
bfurtaw/thrust | performance/report.py | 12 | 1519 | from build import plot_results, print_results
#valid formats are png, pdf, ps, eps and svg
#if format=None the plot will be displayed
format = 'png'
#output = print_results
output = plot_results
for function in ['fill', 'reduce', 'inner_product', 'gather', 'merge']:
output(function + '.xml', 'InputType', 'InputSize', 'Bandwidth', format=format)
for function in ['inclusive_scan', 'inclusive_segmented_scan', 'unique']:
output(function + '.xml', 'InputType', 'InputSize', 'Throughput', format=format)
for method in ['indirect_sort']:
output(method + '.xml', 'Sort', 'VectorLength', 'Time', plot='semilogx', title='Indirect Sorting', format=format)
for method in ['sort', 'merge_sort', 'radix_sort']:
output(method + '.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method, format=format)
output(method + '_by_key.xml', 'KeyType', 'InputSize', 'Sorting', title='thrust::' + method + '_by_key', format=format)
output('stl_sort.xml', 'KeyType', 'InputSize', 'Sorting', title='std::sort', format=format)
for method in ['radix_sort']:
output(method + '_bits.xml', 'KeyType', 'KeyBits', 'Sorting', title='thrust::' + method, plot='plot', dpi=72, format=format)
for format in ['png', 'pdf']:
output('reduce_float.xml', 'InputType', 'InputSize', 'Bandwidth', dpi=120, plot='semilogx', title='thrust::reduce<float>()', format=format)
output('sort_large.xml', 'KeyType', 'InputSize', 'Sorting', dpi=120, plot='semilogx', title='thrust::sort<T>()', format=format)
| apache-2.0 | -5,358,173,115,082,442,000 | 49.633333 | 143 | 0.663594 | false |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/requests/auth.py | 68 | 9541 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
import warnings
from base64 import b64encode
from .compat import urlparse, str, basestring
from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
from .utils import parse_dict_header
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({0!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(password),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| mit | -6,903,700,700,795,054,000 | 32.128472 | 88 | 0.570066 | false |
obeattie/sqlalchemy | lib/sqlalchemy/sql/functions.py | 19 | 3067 | from sqlalchemy import types as sqltypes
from sqlalchemy.sql.expression import (
ClauseList, Function, _literal_as_binds, text, _type_from_args
)
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import VisitableType
class _GenericMeta(VisitableType):
def __call__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
return type.__call__(self, *args, **kwargs)
class GenericFunction(Function):
__metaclass__ = _GenericMeta
def __init__(self, type_=None, args=(), **kwargs):
self.packagenames = []
self.name = self.__class__.__name__
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).self_group()
self.type = sqltypes.to_instance(
type_ or getattr(self, '__return_type__', None))
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', _type_from_args(args))
GenericFunction.__init__(self, args=args, **kwargs)
class coalesce(ReturnTypeFromArgs):
pass
class max(ReturnTypeFromArgs):
pass
class min(ReturnTypeFromArgs):
pass
class sum(ReturnTypeFromArgs):
pass
class now(GenericFunction):
__return_type__ = sqltypes.DateTime
class concat(GenericFunction):
__return_type__ = sqltypes.String
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, args=args, **kwargs)
class char_length(GenericFunction):
__return_type__ = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, args=[arg], **kwargs)
class random(GenericFunction):
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', None)
GenericFunction.__init__(self, args=args, **kwargs)
class count(GenericFunction):
"""The ANSI COUNT aggregate function. With no arguments, emits COUNT \*."""
__return_type__ = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = text('*')
GenericFunction.__init__(self, args=(expression,), **kwargs)
class current_date(AnsiFunction):
__return_type__ = sqltypes.Date
class current_time(AnsiFunction):
__return_type__ = sqltypes.Time
class current_timestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class current_user(AnsiFunction):
__return_type__ = sqltypes.String
class localtime(AnsiFunction):
__return_type__ = sqltypes.DateTime
class localtimestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class session_user(AnsiFunction):
__return_type__ = sqltypes.String
class sysdate(AnsiFunction):
__return_type__ = sqltypes.DateTime
class user(AnsiFunction):
__return_type__ = sqltypes.String
| mit | -3,386,888,585,508,713,000 | 28.490385 | 80 | 0.656668 | false |
calebfoss/tensorflow | tensorflow/contrib/metrics/python/metrics/classification.py | 23 | 2583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 | 1,662,373,576,708,966,100 | 40 | 80 | 0.678281 | false |
elhuhdron/knossos | python/user/custom_graphics_view.py | 2 | 2659 | #!/home/amos/anaconda/bin/python
from PythonQt.Qt import *
from PythonQt.QtGui import *
class CGScene(QGraphicsScene):
def __init__(self, parent = None):
super(CGScene, self).__init__(self)
class Texture(QGraphicsItem):
def __init__(self, pos, tex):
super(Texture, self).__init__(self)
self.tex = tex
self.setPos(pos)
def paint(self, painter, item, widget):
painter.drawPixmap(self.x(), self.y(), self.tex)
def mousePressEvent(self, event):
QGraphicsItem.mousePressEvent(self.event)
QGraphicsItem.update()
class CGView(QGraphicsView):
def __init__(self, scene):
super(CGView, self).__init__(scene)
def wheelEvent(self, event):
if event.delta() > 0:
self.zoom(1.2)
else:
self.zoom(1 / 1.2)
def zoom(self, factor):
scaling = self.transform().scale(factor, factor).mapRect(QRectF(0, 0, 1, 1)).width()
if scaling < 0.07 or scaling > 100:
return
self.scale(factor, factor)
scene = CGScene()
view = CGView(scene)
view.setInteractive(True)
view.setBackgroundBrush(QBrush(QColor(0, 0, 0), QPixmap("python/user/images/ni.jpg")))
view.setSceneRect(-768, -512, 2000, 2000)
view.setOptimizationFlags(QGraphicsView.DontClipPainter | QGraphicsView.DontAdjustForAntialiasing);
view.setDragMode(QGraphicsView.ScrollHandDrag)
view.setCacheMode(QGraphicsView.CacheBackground);
view.setViewportUpdateMode(QGraphicsView.FullViewportUpdate);
view.setTransformationAnchor(QGraphicsView.AnchorUnderMouse);
view.setRenderHints(QPainter.SmoothPixmapTransform)
widget = QWidget()
widget.setWindowTitle("CGExample")
widget.setGeometry(100, 100, 800, 800)
splitter = QSplitter(widget)
toolbox = QToolBox()
splitter.addWidget(toolbox)
splitter.addWidget(view)
tex = Texture(QPointF(0, 0), QPixmap("python/user/images/e1088_xy.png"))
tex.setFlags(QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsScenePositionChanges)
tex.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton);
tex.setAcceptHoverEvents(True);
scene.addItem(tex)
label = QLabel()
label.setPixmap(QPixmap(":/images/splash.png"))
proxy = scene.addWidget(label)
proxy.setScale(0.5)
group_box = QGroupBox()
layout = QHBoxLayout(group_box)
group_proxy = scene.addWidget(group_box)
group_proxy.setPos(0, 0)
rect = QRectF(-64, -64, 64, 128)
pen = QColor(1, 1, 1)
brush = QBrush(QColor(0, 0, 0))
widget.show()
"""
class Watcher(QtCore.QRunnable):
def __init__(self):
super(Watcher, self).__init__()
print "init"
def run(self):
pass
#view.rotate(5)
watcher = Watcher()
timer = QTimer()
timer.setInterval(100)
timer.timeout.connect(watcher.run)
timer.start(100)
pool = QThreadPool()
"""
| gpl-2.0 | 9,022,214,861,748,472,000 | 22.530973 | 99 | 0.719819 | false |
b3c/VTK-5.8 | Wrapping/Python/vtk/util/misc.py | 9 | 1984 | """Miscellaneous functions and classes that dont fit into specific
categories."""
import sys, os, vtk
#----------------------------------------------------------------------
# the following functions are for the vtk regression testing and examples
def vtkGetDataRoot():
"""vtkGetDataRoot() -- return vtk example data directory
"""
dataIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-D' and i < len(sys.argv)-1:
dataIndex = i+1
if dataIndex != -1:
dataRoot = sys.argv[dataIndex]
else:
try:
dataRoot = os.environ['VTK_DATA_ROOT']
except KeyError:
dataRoot = '../../../../VTKData'
return dataRoot
def vtkRegressionTestImage( renWin ):
"""vtkRegressionTestImage(renWin) -- produce regression image for window
This function writes out a regression .png file for a vtkWindow.
Does anyone involved in testing care to elaborate?
"""
imageIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-V' and i < len(sys.argv)-1:
imageIndex = i+1
if imageIndex != -1:
fname = os.path.join(vtkGetDataRoot(), sys.argv[imageIndex])
rt_w2if = vtk.vtkWindowToImageFilter()
rt_w2if.SetInput(renWin)
if os.path.isfile(fname):
pass
else:
rt_pngw = vtk.vtkPNGWriter()
rt_pngw.SetFileName(fname)
rt_pngw.SetInput(rt_w2if.GetOutput())
rt_pngw.Write()
rt_pngw = None
rt_png = vtk.vtkPNGReader()
rt_png.SetFileName(fname)
rt_id = vtk.vtkImageDifference()
rt_id.SetInput(rt_w2if.GetOutput())
rt_id.SetImage(rt_png.GetOutput())
rt_id.Update()
if rt_id.GetThresholdedError() <= 10:
return 1
else:
sys.stderr.write('Failed image test: %f\n'
% rt_id.GetThresholdedError())
return 0
return 2
| bsd-3-clause | -7,155,833,687,008,587,000 | 28.176471 | 76 | 0.555948 | false |
ActiveState/code | recipes/Python/577760_Change_a_Functions_Closure/recipe-577760.py | 1 | 1282 | """inject_closure module"""
INJECTEDKEY = "injected_{}"
OUTERLINE = " outer_{0} = injected_{0}"
INNERLINE = " inner_{0} = outer_{0}"
SOURCE= ("def not_important():",
" def also_not_important():",
" return also_not_important")
def inject_closure(f, *args):
"""Return a copy of f, with a new closure.
The new closure will be derived from args, in the same
order. This requires that the caller have knowledge
of the existing closure.
"""
# build the source to exec
injected = {}
source = list(SOURCE)
for i in range(len(args)):
source.insert(1, OUTERLINE.format(i))
source.insert(-1, INNERLINE.format(i))
injected[INJECTEDKEY.format(i)] = args[i]
# exec the source and pull the new closure
exec("\n".join(source), injected, injected)
closure = injected["not_important"]().__closure__
# build the new function object
func = type(f)(f.__code__, f.__globals__, f.__name__,
f.__defaults__, closure)
func.__annotations__ = f.__annotations__
func.__doc__ = f.__doc__
func.__kwdefaults__ = f.__kwdefaults__
func.__module__ = f.__module__
return func
| mit | 7,580,353,531,385,490,000 | 31.871795 | 60 | 0.552262 | false |
HackSoftware/hackconf.bg | home/website/migrations/0002_auto_20160903_1043.py | 1 | 12244 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-03 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0013_make_rendition_upload_callable'),
('wagtailcore', '0029_unicode_slugfield_dj19'),
('wagtailredirects', '0005_capitalizeverbose'),
('wagtailforms', '0003_capitalizeverbose'),
('website', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BranchPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('webpage', models.URLField(blank=True, max_length=255, null=True)),
('description', wagtail.wagtailcore.fields.RichTextField()),
],
),
migrations.CreateModel(
name='GeneralPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='MediaPartners',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('video_url', models.URLField(blank=True, max_length=255, null=True)),
('description', wagtail.wagtailcore.fields.RichTextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='PastEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Event')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.RemoveField(
model_name='howweworkpage',
name='header_image',
),
migrations.RemoveField(
model_name='howweworkpage',
name='page_ptr',
),
migrations.RemoveField(
model_name='howweworkpage',
name='what_we_do_image',
),
migrations.RemoveField(
model_name='ourteampage',
name='header_image',
),
migrations.RemoveField(
model_name='ourteampage',
name='page_ptr',
),
migrations.RemoveField(
model_name='project',
name='background_image',
),
migrations.RemoveField(
model_name='project',
name='logo',
),
migrations.RemoveField(
model_name='projectsplacement',
name='page',
),
migrations.RemoveField(
model_name='projectsplacement',
name='project',
),
migrations.RemoveField(
model_name='teammate',
name='initial_photo',
),
migrations.RemoveField(
model_name='teammate',
name='secondary_photo',
),
migrations.RemoveField(
model_name='teammatepageplacement',
name='page',
),
migrations.RemoveField(
model_name='teammatepageplacement',
name='teammate',
),
migrations.RemoveField(
model_name='teammateplacement',
name='page',
),
migrations.RemoveField(
model_name='teammateplacement',
name='teammate',
),
migrations.RemoveField(
model_name='technologiespageplacement',
name='page',
),
migrations.RemoveField(
model_name='technologiespageplacement',
name='technology',
),
migrations.RemoveField(
model_name='technologiesplacement',
name='page',
),
migrations.RemoveField(
model_name='technologiesplacement',
name='technology',
),
migrations.RemoveField(
model_name='technologiesweusepage',
name='header_image',
),
migrations.RemoveField(
model_name='technologiesweusepage',
name='page_ptr',
),
migrations.RemoveField(
model_name='technology',
name='logo',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_center',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_left',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_right',
),
migrations.RemoveField(
model_name='homepage',
name='how_we_work_title',
),
migrations.RemoveField(
model_name='homepage',
name='intro_h1',
),
migrations.RemoveField(
model_name='homepage',
name='intro_h2',
),
migrations.RemoveField(
model_name='homepage',
name='intro_image',
),
migrations.RemoveField(
model_name='homepage',
name='our_team_center',
),
migrations.RemoveField(
model_name='homepage',
name='our_team_title',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_center',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_image',
),
migrations.RemoveField(
model_name='homepage',
name='portfolio_title',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_center',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_image',
),
migrations.RemoveField(
model_name='homepage',
name='technologies_we_use_title',
),
migrations.AddField(
model_name='homepage',
name='about_text',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='call_for_speakers_form_url',
field=models.URLField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_image_logo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='homepage',
name='header_text',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='location_description',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='speakers_title',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_description',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_title',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='tickets_widget_code',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='video_url',
field=models.URLField(blank=True, max_length=255, null=True),
),
migrations.DeleteModel(
name='HowWeWorkPage',
),
migrations.DeleteModel(
name='OurTeamPage',
),
migrations.DeleteModel(
name='Project',
),
migrations.DeleteModel(
name='ProjectsPlacement',
),
migrations.DeleteModel(
name='Teammate',
),
migrations.DeleteModel(
name='TeammatePagePlacement',
),
migrations.DeleteModel(
name='TeammatePlacement',
),
migrations.DeleteModel(
name='TechnologiesPagePlacement',
),
migrations.DeleteModel(
name='TechnologiesPlacement',
),
migrations.DeleteModel(
name='TechnologiesWeUsePage',
),
migrations.DeleteModel(
name='Technology',
),
migrations.AddField(
model_name='pastevents',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='past_events', to='website.HomePage'),
),
migrations.AddField(
model_name='mediapartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='media_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='mediapartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
migrations.AddField(
model_name='generalpartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='general_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='generalpartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
migrations.AddField(
model_name='branchpartners',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='branch_partners', to='website.HomePage'),
),
migrations.AddField(
model_name='branchpartners',
name='partner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='website.Partner'),
),
]
| mit | 5,730,577,551,946,824,000 | 33.883191 | 151 | 0.537406 | false |
duhzecca/cinder | cinder/tests/unit/fake_service.py | 13 | 1787 | # Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from oslo_versionedobjects import fields
from cinder import objects
def fake_db_service(**updates):
NOW = timeutils.utcnow().replace(microsecond=0)
db_service = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'host': 'fake-host',
'binary': 'fake-service',
'topic': 'fake-service-topic',
'report_count': 1,
'disabled': False,
'disabled_reason': None,
'modified_at': NOW,
}
for name, field in objects.Service.fields.items():
if name in db_service:
continue
if field.nullable:
db_service[name] = None
elif field.default != fields.UnspecifiedDefault:
db_service[name] = field.default
else:
raise Exception('fake_db_service needs help with %s.' % name)
if updates:
db_service.update(updates)
return db_service
def fake_service_obj(context, **updates):
return objects.Service._from_db_object(context, objects.Service(),
fake_db_service(**updates))
| apache-2.0 | 948,339,716,318,436,500 | 30.910714 | 78 | 0.623951 | false |
beiko-lab/gengis | bin/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/flashwin.py | 6 | 8028 | #----------------------------------------------------------------------
# Name: wx.lib.flashwin
# Purpose: A class that allows the use of the Shockwave Flash
# ActiveX control
#
# Author: Robin Dunn
#
# Created: 22-March-2004
# RCS-ID: $Id: flashwin.py 54040 2008-06-08 23:03:22Z RD $
# Copyright: (c) 2008 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
import wx
import wx.lib.activex
import comtypes.client as cc
import sys
if not hasattr(sys, 'frozen'):
cc.GetModule( ('{D27CDB6B-AE6D-11CF-96B8-444553540000}', 1, 0) )
from comtypes.gen import ShockwaveFlashObjects
clsID = '{D27CDB6E-AE6D-11CF-96B8-444553540000}'
progID = 'ShockwaveFlash.ShockwaveFlash.1'
class FlashWindow(wx.lib.activex.ActiveXCtrl):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name='FlashWindow'):
wx.lib.activex.ActiveXCtrl.__init__(self, parent, progID,
id, pos, size, style, name)
def SetZoomRect(self, left, top, right, bottom):
return self.ctrl.SetZoomRect(left, top, right, bottom)
def Zoom(self, factor):
return self.ctrl.Zoom(factor)
def Pan(self, x, y, mode):
return self.ctrl.Pan(x, y, mode)
def Play(self):
return self.ctrl.Play()
def Stop(self):
return self.ctrl.Stop()
def Back(self):
return self.ctrl.Back()
def Forward(self):
return self.ctrl.Forward()
def Rewind(self):
return self.ctrl.Rewind()
def StopPlay(self):
return self.ctrl.StopPlay()
def GotoFrame(self, FrameNum):
return self.ctrl.GotoFrame(FrameNum)
def CurrentFrame(self):
return self.ctrl.CurrentFrame()
def IsPlaying(self):
return self.ctrl.IsPlaying()
def PercentLoaded(self):
return self.ctrl.PercentLoaded()
def FrameLoaded(self, FrameNum):
return self.ctrl.FrameLoaded(FrameNum)
def FlashVersion(self):
return self.ctrl.FlashVersion()
def LoadMovie(self, layer, url):
return self.ctrl.LoadMovie(layer, url)
def TGotoFrame(self, target, FrameNum):
return self.ctrl.TGotoFrame(target, FrameNum)
def TGotoLabel(self, target, label):
return self.ctrl.TGotoLabel(target, label)
def TCurrentFrame(self, target):
return self.ctrl.TCurrentFrame(target)
def TCurrentLabel(self, target):
return self.ctrl.TCurrentLabel(target)
def TPlay(self, target):
return self.ctrl.TPlay(target)
def TStopPlay(self, target):
return self.ctrl.TStopPlay(target)
def SetVariable(self, name, value):
return self.ctrl.SetVariable(name, value)
def GetVariable(self, name):
return self.ctrl.GetVariable(name)
def TSetProperty(self, target, property, value):
return self.ctrl.TSetProperty(target, property, value)
def TGetProperty(self, target, property):
return self.ctrl.TGetProperty(target, property)
def TCallFrame(self, target, FrameNum):
return self.ctrl.TCallFrame(target, FrameNum)
def TCallLabel(self, target, label):
return self.ctrl.TCallLabel(target, label)
def TSetPropertyNum(self, target, property, value):
return self.ctrl.TSetPropertyNum(target, property, value)
def TGetPropertyNum(self, target, property):
return self.ctrl.TGetPropertyNum(target, property)
def TGetPropertyAsNumber(self, target, property):
return self.ctrl.TGetPropertyAsNumber(target, property)
# Getters, Setters and properties
def _get_ReadyState(self):
return self.ctrl.ReadyState
readystate = property(_get_ReadyState, None)
def _get_TotalFrames(self):
return self.ctrl.TotalFrames
totalframes = property(_get_TotalFrames, None)
def _get_Playing(self):
return self.ctrl.Playing
def _set_Playing(self, Playing):
self.ctrl.Playing = Playing
playing = property(_get_Playing, _set_Playing)
def _get_Quality(self):
return self.ctrl.Quality
def _set_Quality(self, Quality):
self.ctrl.Quality = Quality
quality = property(_get_Quality, _set_Quality)
def _get_ScaleMode(self):
return self.ctrl.ScaleMode
def _set_ScaleMode(self, ScaleMode):
self.ctrl.ScaleMode = ScaleMode
scalemode = property(_get_ScaleMode, _set_ScaleMode)
def _get_AlignMode(self):
return self.ctrl.AlignMode
def _set_AlignMode(self, AlignMode):
self.ctrl.AlignMode = AlignMode
alignmode = property(_get_AlignMode, _set_AlignMode)
def _get_BackgroundColor(self):
return self.ctrl.BackgroundColor
def _set_BackgroundColor(self, BackgroundColor):
self.ctrl.BackgroundColor = BackgroundColor
backgroundcolor = property(_get_BackgroundColor, _set_BackgroundColor)
def _get_Loop(self):
return self.ctrl.Loop
def _set_Loop(self, Loop):
self.ctrl.Loop = Loop
loop = property(_get_Loop, _set_Loop)
def _get_Movie(self):
return self.ctrl.Movie
def _set_Movie(self, Movie):
self.ctrl.Movie = Movie
movie = property(_get_Movie, _set_Movie)
def _get_FrameNum(self):
return self.ctrl.FrameNum
def _set_FrameNum(self, FrameNum):
self.ctrl.FrameNum = FrameNum
framenum = property(_get_FrameNum, _set_FrameNum)
def _get_WMode(self):
return self.ctrl.WMode
def _set_WMode(self, WMode):
self.ctrl.WMode = WMode
wmode = property(_get_WMode, _set_WMode)
def _get_SAlign(self):
return self.ctrl.SAlign
def _set_SAlign(self, SAlign):
self.ctrl.SAlign = SAlign
salign = property(_get_SAlign, _set_SAlign)
def _get_Menu(self):
return self.ctrl.Menu
def _set_Menu(self, Menu):
self.ctrl.Menu = Menu
menu = property(_get_Menu, _set_Menu)
def _get_Base(self):
return self.ctrl.Base
def _set_Base(self, Base):
self.ctrl.Base = Base
base = property(_get_Base, _set_Base)
def _get_Scale(self):
return self.ctrl.Scale
def _set_Scale(self, Scale):
self.ctrl.Scale = Scale
scale = property(_get_Scale, _set_Scale)
def _get_DeviceFont(self):
return self.ctrl.DeviceFont
def _set_DeviceFont(self, DeviceFont):
self.ctrl.DeviceFont = DeviceFont
devicefont = property(_get_DeviceFont, _set_DeviceFont)
def _get_EmbedMovie(self):
return self.ctrl.EmbedMovie
def _set_EmbedMovie(self, EmbedMovie):
self.ctrl.EmbedMovie = EmbedMovie
embedmovie = property(_get_EmbedMovie, _set_EmbedMovie)
def _get_BGColor(self):
return self.ctrl.BGColor
def _set_BGColor(self, BGColor):
self.ctrl.BGColor = BGColor
bgcolor = property(_get_BGColor, _set_BGColor)
def _get_Quality2(self):
return self.ctrl.Quality2
def _set_Quality2(self, Quality2):
self.ctrl.Quality2 = Quality2
quality2 = property(_get_Quality2, _set_Quality2)
def _get_SWRemote(self):
return self.ctrl.SWRemote
def _set_SWRemote(self, SWRemote):
self.ctrl.SWRemote = SWRemote
swremote = property(_get_SWRemote, _set_SWRemote)
def _get_FlashVars(self):
return self.ctrl.FlashVars
def _set_FlashVars(self, FlashVars):
self.ctrl.FlashVars = FlashVars
flashvars = property(_get_FlashVars, _set_FlashVars)
def _get_AllowScriptAccess(self):
return self.ctrl.AllowScriptAccess
def _set_AllowScriptAccess(self, AllowScriptAccess):
self.ctrl.AllowScriptAccess = AllowScriptAccess
allowscriptaccess = property(_get_AllowScriptAccess, _set_AllowScriptAccess)
def _get_MovieData(self):
return self.ctrl.MovieData
def _set_MovieData(self, MovieData):
self.ctrl.MovieData = MovieData
moviedata = property(_get_MovieData, _set_MovieData)
| gpl-3.0 | 4,649,919,121,262,041,000 | 29.641221 | 80 | 0.645615 | false |
sencha/chromium-spacewalk | third_party/libxml/src/check-xsddata-test-suite.py | 343 | 10682 | #!/usr/bin/python
import sys
import time
import os
import string
import StringIO
sys.path.insert(0, "python")
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
debug = 0
verbose = 0
quiet = 1
#
# the testsuite description
#
CONF=os.path.join(os.path.dirname(__file__), "test/xsdtest/xsdtestsuite.xml")
LOG="check-xsddata-test-suite.log"
log = open(LOG, "w")
nb_schemas_tests = 0
nb_schemas_success = 0
nb_schemas_failed = 0
nb_instances_tests = 0
nb_instances_success = 0
nb_instances_failed = 0
libxml2.lineNumbersDefault(1)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
#
# Resolver callback
#
resources = {}
def resolver(URL, ID, ctxt):
global resources
if resources.has_key(URL):
return(StringIO.StringIO(resources[URL]))
log.write("Resolver failure: asked %s\n" % (URL))
log.write("resources: %s\n" % (resources))
return None
#
# handle a valid instance
#
def handle_valid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nFailed to parse correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
if mem != libxml2.debugMemory(1):
print "validating instance %d line %d leaks" % (
nb_instances_tests, node.lineNo())
if ret != 0:
log.write("\nFailed to validate correct instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an invalid instance
#
def handle_invalid(node, schema):
global log
global nb_instances_success
global nb_instances_failed
instance = node.prop("dtd")
if instance == None:
instance = ""
child = node.children
while child != None:
if child.type != 'text':
instance = instance + child.serialize()
child = child.next
# mem = libxml2.debugMemory(1);
try:
doc = libxml2.parseDoc(instance)
except:
doc = None
if doc == None:
log.write("\nStrange: failed to parse incorrect instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
return
if debug:
print "instance line %d" % (node.lineNo())
try:
ctxt = schema.relaxNGNewValidCtxt()
ret = doc.relaxNGValidateDoc(ctxt)
del ctxt
except:
ret = -1
doc.freeDoc()
# if mem != libxml2.debugMemory(1):
# print "validating instance %d line %d leaks" % (
# nb_instances_tests, node.lineNo())
if ret == 0:
log.write("\nFailed to detect validation problem in instance:\n-----\n")
log.write(instance)
log.write("\n-----\n")
nb_instances_failed = nb_instances_failed + 1
else:
nb_instances_success = nb_instances_success + 1
#
# handle an incorrect test
#
def handle_correct(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs == None:
log.write("\nFailed to compile correct schema:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
nb_schemas_success = nb_schemas_success + 1
return rngs
def handle_incorrect(node):
global log
global nb_schemas_success
global nb_schemas_failed
schema = ""
child = node.children
while child != None:
if child.type != 'text':
schema = schema + child.serialize()
child = child.next
try:
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
except:
rngs = None
if rngs != None:
log.write("\nFailed to detect schema error in:\n-----\n")
log.write(schema)
log.write("\n-----\n")
nb_schemas_failed = nb_schemas_failed + 1
else:
# log.write("\nSuccess detecting schema error in:\n-----\n")
# log.write(schema)
# log.write("\n-----\n")
nb_schemas_success = nb_schemas_success + 1
return None
#
# resource handling: keep a dictionary of URL->string mappings
#
def handle_resource(node, dir):
global resources
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
res = ""
child = node.children
while child != None:
if child.type != 'text':
res = res + child.serialize()
child = child.next
resources[name] = res
#
# dir handling: pseudo directory resources
#
def handle_dir(node, dir):
try:
name = node.prop('name')
except:
name = None
if name == None or name == '':
log.write("resource has no name")
return;
if dir != None:
# name = libxml2.buildURI(name, dir)
name = dir + '/' + name
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, name)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, name)
#
# handle a testCase element
#
def handle_testCase(node):
global nb_schemas_tests
global nb_instances_tests
global resources
sections = node.xpathEval('string(section)')
log.write("\n ======== test %d line %d section %s ==========\n" % (
nb_schemas_tests, node.lineNo(), sections))
resources = {}
if debug:
print "test %d line %d" % (nb_schemas_tests, node.lineNo())
dirs = node.xpathEval('dir')
for dir in dirs:
handle_dir(dir, None)
res = node.xpathEval('resource')
for r in res:
handle_resource(r, None)
tsts = node.xpathEval('incorrect')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <incorrect> example" %(node.lineNo())
schema = handle_incorrect(tsts[0])
else:
tsts = node.xpathEval('correct')
if tsts != []:
if len(tsts) != 1:
print "warning test line %d has more than one <correct> example"% (node.lineNo())
schema = handle_correct(tsts[0])
else:
print "warning <testCase> line %d has no <correct> nor <incorrect> child" % (node.lineNo())
nb_schemas_tests = nb_schemas_tests + 1;
valids = node.xpathEval('valid')
invalids = node.xpathEval('invalid')
nb_instances_tests = nb_instances_tests + len(valids) + len(invalids)
if schema != None:
for valid in valids:
handle_valid(valid, schema)
for invalid in invalids:
handle_invalid(invalid, schema)
#
# handle a testSuite element
#
def handle_testSuite(node, level = 0):
global nb_schemas_tests, nb_schemas_success, nb_schemas_failed
global nb_instances_tests, nb_instances_success, nb_instances_failed
if verbose and level >= 0:
old_schemas_tests = nb_schemas_tests
old_schemas_success = nb_schemas_success
old_schemas_failed = nb_schemas_failed
old_instances_tests = nb_instances_tests
old_instances_success = nb_instances_success
old_instances_failed = nb_instances_failed
docs = node.xpathEval('documentation')
authors = node.xpathEval('author')
if docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
if authors != []:
msg = msg + "written by "
for author in authors:
msg = msg + author.content + " "
if quiet == 0:
print msg
sections = node.xpathEval('section')
if verbose and sections != [] and level <= 0:
msg = ""
for section in sections:
msg = msg + section.content + " "
if quiet == 0:
print "Tests for section %s" % (msg)
for test in node.xpathEval('testCase'):
handle_testCase(test)
for test in node.xpathEval('testSuite'):
handle_testSuite(test, level + 1)
if verbose and level >= 0 :
if sections != []:
msg = ""
for section in sections:
msg = msg + section.content + " "
print "Result of tests for section %s" % (msg)
elif docs != []:
msg = ""
for doc in docs:
msg = msg + doc.content + " "
print "Result of tests for %s" % (msg)
if nb_schemas_tests != old_schemas_tests:
print "found %d test schemas: %d success %d failures" % (
nb_schemas_tests - old_schemas_tests,
nb_schemas_success - old_schemas_success,
nb_schemas_failed - old_schemas_failed)
if nb_instances_tests != old_instances_tests:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests - old_instances_tests,
nb_instances_success - old_instances_success,
nb_instances_failed - old_instances_failed)
#
# Parse the conf file
#
libxml2.substituteEntitiesDefault(1);
testsuite = libxml2.parseFile(CONF)
#
# Error and warnng callbacks
#
def callback(ctx, str):
global log
log.write("%s%s" % (ctx, str))
libxml2.registerErrorHandler(callback, "")
libxml2.setEntityLoader(resolver)
root = testsuite.getRootElement()
if root.name != 'testSuite':
print "%s doesn't start with a testSuite element, aborting" % (CONF)
sys.exit(1)
if quiet == 0:
print "Running Relax NG testsuite"
handle_testSuite(root)
if quiet == 0 or nb_schemas_failed != 0:
print "\nTOTAL:\nfound %d test schemas: %d success %d failures" % (
nb_schemas_tests, nb_schemas_success, nb_schemas_failed)
if quiet == 0 or nb_instances_failed != 0:
print "found %d test instances: %d success %d failures" % (
nb_instances_tests, nb_instances_success, nb_instances_failed)
testsuite.freeDoc()
# Memory debug specific
libxml2.relaxNGCleanupTypes()
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
if quiet == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause | -4,038,784,097,681,990,000 | 24.433333 | 96 | 0.624134 | false |
Learningtribes/edx-platform | openedx/core/djangolib/nose.py | 32 | 1212 | """
Utilities related to nose.
"""
from django.core.management import call_command
from django.db import DEFAULT_DB_ALIAS, connections, transaction
import django_nose
class NoseTestSuiteRunner(django_nose.NoseTestSuiteRunner):
"""Custom NoseTestSuiteRunner."""
def setup_databases(self):
""" Setup databases and then flush to remove data added by migrations. """
return_value = super(NoseTestSuiteRunner, self).setup_databases()
# Delete all data added by data migrations. Unit tests should setup their own data using factories.
call_command('flush', verbosity=0, interactive=False, load_initial_data=False)
# Through Django 1.8, auto increment sequences are not reset when calling flush on a SQLite db.
# So we do it ourselves.
# http://sqlite.org/autoinc.html
connection = connections[DEFAULT_DB_ALIAS]
if connection.vendor == 'sqlite' and not connection.features.supports_sequence_reset:
with transaction.atomic(using=DEFAULT_DB_ALIAS):
cursor = connection.cursor()
cursor.execute(
"delete from sqlite_sequence;"
)
return return_value
| agpl-3.0 | 7,844,781,151,393,649,000 | 39.4 | 107 | 0.674917 | false |
ran5515/DeepDecision | tensorflow/contrib/rnn/python/kernel_tests/core_rnn_cell_test.py | 21 | 34945 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
# TODO(ebrevdo): Remove once _linear is fully deprecated.
# pylint: disable=protected-access
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
# pylint: enable=protected-access
linear = rnn_cell_impl._linear
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = linear([x], 2, False)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with variable_scope.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test GRUCell with input_size != num_units.
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g],
{x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 8])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
g, out_m = cell(x, m)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(
expected_variable_names, [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[
0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,
0.39897051, 0.24024698, 0.24024698
]])
self.assertAllClose(res[1], expected_mem)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = array_ops.zeros([1, 4])
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])})
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(base_cell)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/cpu:14159")
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/gpu:0")
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self, batch_size=None, time_steps=None,
parallel_iterations=None, **kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)
] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.constant(
[[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=rnn_cell_impl.DropoutWrapper(
rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoState(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
self.assertAllClose(np.zeros(res[1].c.shape), res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none, output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_some,
state_keep_prob=keep_all, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
class SlimRNNCellTest(test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inputs = random_ops.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
rnn_cell = rnn_cell_impl.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
variable_scope.get_variable_scope().reuse_variables()
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = rnn_cell_impl._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(linear([inputs, state], num_units, True))
return output, output
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,530,988,242,362,112,000 | 42.46393 | 80 | 0.612906 | false |
crisisking/udbraaains | brains/namelist/migrations/0006_auto.py | 1 | 2967 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Player', fields ['is_dead']
db.create_index('namelist_player', ['is_dead'])
# Adding index on 'Player', fields ['profile_id']
db.create_index('namelist_player', ['profile_id'])
def backwards(self, orm):
# Removing index on 'Player', fields ['profile_id']
db.delete_index('namelist_player', ['profile_id'])
# Removing index on 'Player', fields ['is_dead']
db.delete_index('namelist_player', ['is_dead'])
models = {
'mapping.location': {
'Meta': {'unique_together': "(('x', 'y'),)", 'object_name': 'Location'},
'building_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'has_tree': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'namelist.category': {
'Meta': {'object_name': 'Category'},
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'namelist.player': {
'Meta': {'object_name': 'Player'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['namelist.Category']", 'null': 'True', 'blank': 'True'}),
'group_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dead': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mapping.Location']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'profile_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'scrape_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['namelist']
| bsd-3-clause | 8,975,468,805,227,400,000 | 50.155172 | 144 | 0.552747 | false |
SimVascular/VTK | ThirdParty/Twisted/twisted/test/test_manhole.py | 41 | 2092 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.manhole import service
from twisted.spread.util import LocalAsRemote
class Dummy:
pass
class DummyTransport:
def getHost(self):
return 'INET', '127.0.0.1', 0
class DummyManholeClient(LocalAsRemote):
zero = 0
broker = Dummy()
broker.transport = DummyTransport()
def __init__(self):
self.messages = []
def console(self, messages):
self.messages.extend(messages)
def receiveExplorer(self, xplorer):
pass
def setZero(self):
self.zero = len(self.messages)
def getMessages(self):
return self.messages[self.zero:]
# local interface
sync_console = console
sync_receiveExplorer = receiveExplorer
sync_setZero = setZero
sync_getMessages = getMessages
class ManholeTest(unittest.TestCase):
"""Various tests for the manhole service.
Both the the importIdentity and importMain tests are known to fail
when the __name__ in the manhole namespace is set to certain
values.
"""
def setUp(self):
self.service = service.Service()
self.p = service.Perspective(self.service)
self.client = DummyManholeClient()
self.p.attached(self.client, None)
def test_importIdentity(self):
"""Making sure imported module is the same as one previously loaded.
"""
self.p.perspective_do("from twisted.manhole import service")
self.client.setZero()
self.p.perspective_do("int(service is sys.modules['twisted.manhole.service'])")
msg = self.client.getMessages()[0]
self.assertEqual(msg, ('result',"1\n"))
def test_importMain(self):
"""Trying to import __main__"""
self.client.setZero()
self.p.perspective_do("import __main__")
if self.client.getMessages():
msg = self.client.getMessages()[0]
if msg[0] in ("exception","stderr"):
self.fail(msg[1])
#if __name__=='__main__':
# unittest.main()
| bsd-3-clause | -8,801,727,760,354,455,000 | 26.893333 | 87 | 0.640535 | false |
jab1982/opennsa | opennsa/config.py | 1 | 8668 | """
Configuration reader and defaults.
Author: Henrik Thostrup Jensen <[email protected]>
Copyright: NORDUnet (2011)
"""
import os
import ConfigParser
from opennsa import constants as cnt
# defaults
DEFAULT_CONFIG_FILE = '/etc/opennsa.conf'
DEFAULT_LOG_FILE = '/var/log/opennsa.log'
DEFAULT_TLS = 'true'
DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl'
DEFAULT_TCP_PORT = 9080
DEFAULT_TLS_PORT = 9443
DEFAULT_VERIFY = True
DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' # This will work on most mordern linux distros
# config blocks and options
BLOCK_SERVICE = 'service'
BLOCK_DUD = 'dud'
BLOCK_JUNIPER_EX = 'juniperex'
BLOCK_JUNOS = 'junos'
BLOCK_FORCE10 = 'force10'
BLOCK_ARGIA = 'argia'
BLOCK_BROCADE = 'brocade'
BLOCK_DELL = 'dell'
BLOCK_NCSVPN = 'ncsvpn'
# service block
NETWORK_NAME = 'network' # mandatory
LOG_FILE = 'logfile'
HOST = 'host'
PORT = 'port'
TLS = 'tls'
NRM_MAP_FILE = 'nrmmap'
PEERS = 'peers'
POLICY = 'policy'
PLUGIN = 'plugin'
# database
DATABASE = 'database' # mandatory
DATABASE_USER = 'dbuser' # mandatory
DATABASE_PASSWORD = 'dbpassword' # can be none (os auth)
# tls
KEY = 'key' # mandatory, if tls is set
CERTIFICATE = 'certificate' # mandatory, if tls is set
CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty)
VERIFY_CERT = 'verify'
ALLOWED_HOSTS = 'allowedhosts' # comma seperated list
# generic ssh stuff, don't use directly
_SSH_HOST = 'host'
_SSH_PORT = 'port'
_SSH_HOST_FINGERPRINT = 'fingerprint'
_SSH_USER = 'user'
_SSH_PASSWORD = 'password'
_SSH_PUBLIC_KEY = 'publickey'
_SSH_PRIVATE_KEY = 'privatekey'
# juniper block - same for ex/qxf backend and mx backend
JUNIPER_HOST = _SSH_HOST
JUNIPER_PORT = _SSH_PORT
JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNIPER_USER = _SSH_USER
JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# force10 block
FORCE10_HOST = _SSH_HOST
FORCE10_PORT = _SSH_PORT
FORCE10_USER = _SSH_USER
FORCE10_PASSWORD = _SSH_PASSWORD
FORCE10_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
FORCE10_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# argia block
ARGIA_COMMAND_DIR = 'commanddir'
ARGIA_COMMAND_BIN = 'commandbin'
# Brocade block
BROCADE_HOST = _SSH_HOST
BROCADE_PORT = _SSH_PORT
BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
BROCADE_USER = _SSH_USER
BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
BROCADE_ENABLE_PASSWORD = 'enablepassword'
# Dell PowerConnect
DELL_HOST = _SSH_HOST
DELL_PORT = _SSH_PORT
DELL_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
DELL_USER = _SSH_USER
DELL_PASSWORD = _SSH_PASSWORD
# NCS VPN Backend
NCS_SERVICES_URL = 'url'
NCS_USER = 'user'
NCS_PASSWORD = 'password'
class ConfigurationError(Exception):
"""
Raised in case of invalid/inconsistent configuration.
"""
class Peer(object):
def __init__(self, url, cost):
self.url = url
self.cost = cost
def readConfig(filename):
cfg = ConfigParser.SafeConfigParser()
cfg.add_section(BLOCK_SERVICE)
cfg.read( [ filename ] )
return cfg
def readVerifyConfig(cfg):
"""
Read a config and verify that things are correct. Will also fill in
default values where applicable.
This is supposed to be used during application creation (before service
start) to ensure that simple configuration errors do not pop up efter
daemonization.
Returns a "verified" config, which is a dictionary.
"""
vc = {}
try:
vc[NETWORK_NAME] = cfg.get(BLOCK_SERVICE, NETWORK_NAME)
except ConfigParser.NoOptionError:
raise ConfigurationError('No network name specified in configuration file (mandatory)')
try:
vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE)
except ConfigParser.NoOptionError:
vc[LOG_FILE] = DEFAULT_LOG_FILE
try:
nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
if not os.path.exists(nrm_map_file):
raise ConfigurationError('Specified NRM mapping file does not exist (%s)' % nrm_map_file)
vc[NRM_MAP_FILE] = nrm_map_file
except ConfigParser.NoOptionError:
vc[NRM_MAP_FILE] = None
try:
peers_raw = cfg.get(BLOCK_SERVICE, PEERS)
vc[PEERS] = [ Peer(purl, 1) for purl in peers_raw.split('\n') ]
except ConfigParser.NoOptionError:
vc[PEERS] = None
try:
vc[HOST] = cfg.get(BLOCK_SERVICE, HOST)
except ConfigParser.NoOptionError:
vc[HOST] = None
try:
vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS)
except ConfigParser.NoOptionError:
vc[TLS] = DEFAULT_TLS
try:
vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT)
except ConfigParser.NoOptionError:
vc[PORT] = DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT
try:
policies = cfg.get(BLOCK_SERVICE, POLICY).split(',')
for policy in policies:
if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE):
raise ConfigurationError('Invalid policy: %s' % policy)
vc[POLICY] = policies
except ConfigParser.NoOptionError:
vc[POLICY] = []
try:
vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN)
except ConfigParser.NoOptionError:
vc[PLUGIN] = None
# database
try:
vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE)
except ConfigParser.NoOptionError:
raise ConfigurationError('No database specified in configuration file (mandatory)')
try:
vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER)
except ConfigParser.NoOptionError:
raise ConfigurationError('No database user specified in configuration file (mandatory)')
try:
vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD)
except ConfigParser.NoOptionError:
vc[DATABASE_PASSWORD] = None
# we always extract certdir and verify as we need that for performing https requests
try:
certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR)
if not os.path.exists(certdir):
raise ConfigurationError('Specified certdir does not exist (%s)' % certdir)
vc[CERTIFICATE_DIR] = certdir
except ConfigParser.NoOptionError, e:
vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR
try:
vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT)
except ConfigParser.NoOptionError:
vc[VERIFY_CERT] = DEFAULT_VERIFY
# tls
if vc[TLS]:
try:
hostkey = cfg.get(BLOCK_SERVICE, KEY)
hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE)
if not os.path.exists(hostkey):
raise ConfigurationError('Specified hostkey does not exist (%s)' % hostkey)
if not os.path.exists(hostcert):
raise ConfigurationError('Specified hostcert does not exist (%s)' % hostcert)
vc[KEY] = hostkey
vc[CERTIFICATE] = hostcert
try:
allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS)
vc[ALLOWED_HOSTS] = allowed_hosts_cfg.split(',')
except:
pass
except ConfigParser.NoOptionError, e:
# Not enough options for configuring tls context
raise ConfigurationError('Missing TLS option: %s' % str(e))
# backends
backends = {}
for section in cfg.sections():
if section == 'service':
continue
if ':' in section:
backend_type, name = section.split(':',2)
else:
backend_type = section
name = ''
if name in backends:
raise ConfigurationError('Can only have one backend named "%s"' % name)
if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNOS, BLOCK_FORCE10, BLOCK_BROCADE, BLOCK_DELL, BLOCK_NCSVPN):
backend_conf = dict( cfg.items(section) )
backend_conf['_backend_type'] = backend_type
backends[name] = backend_conf
if not backends:
raise ConfigurationError('No or invalid backend specified')
vc['backend'] = backends
return vc
| bsd-3-clause | -7,235,053,058,537,928,000 | 29.202091 | 126 | 0.61479 | false |
jrwdunham/old-webapp | onlinelinguisticdatabase/lib/app_globals.py | 1 | 16584 | # −*− coding: UTF−8 −*−
# Copyright (C) 2010 Joel Dunham
#
# This file is part of OnlineLinguisticDatabase.
#
# OnlineLinguisticDatabase is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OnlineLinguisticDatabase is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OnlineLinguisticDatabase. If not, see
# <http://www.gnu.org/licenses/>.
"""The application's Globals object"""
import string
from pylons import session
from orthography import Orthography
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
def __init__(self):
"""One instance of Globals is created during application
initialization and is available during requests via the
'app_globals' variable
"""
# Options for searchTypes: these are used by the queryBuilder module.
self.searchTypes = [
'as a phrase',
'all of these',
'any of these',
'as a reg exp',
'exactly'
]
# Options for dropdown menu between search expressions 1 and 2
self.andOrNot = [
('and_', 'and'),
('or_', 'or'),
('not_', 'and not')
]
# Search Locations - columns that can be searched in search expressions
self.searchLocations = {
'form': [
('transcription', 'orthographic transcription'),
('phoneticTranscription', 'broad phonetic transcription'),
('narrowPhoneticTranscription', 'narrow phonetic transcription'),
('gloss', 'gloss'),
('morphemeBreak', 'morpheme break'),
('morphemeGloss', 'morpheme gloss'),
('comments', 'general comments'),
('speakerComments', 'speaker comments'),
('context', 'context'),
('syntacticCategoryString', 'syntactic category string'),
('id', 'ID')
],
'file': [
('name', 'name'),
('description', 'description'),
('id', 'ID')
],
'collection': [
('title', 'title'),
('type', 'type'),
('description', 'description'),
('contents', 'contents'),
('id', 'ID')
]
}
# Search Integer Filter Locations - columns that can be searched in
# integer filters
self.searchIntegerFilterLocations = {
'form': [
('id', 'ID')
],
'file': [
('id', 'ID'),
('size', 'size')
],
'collection': [
('id', 'ID')
]
}
# Grammaticalities: possible values in grammaticality and
# glossGrammaticality fields
self.grammaticalities = [u'', u'*', u'?', u'#']
# Export Options: correspond to defs in /templates/base/exporter.html
self.exportOptions = [
('t', ' Plain text: transcription only'),
('t_g', ' Plain text: transcription & gloss'),
('t_mb_mg_g', """ Plain text: transcription, morpheme break,
morpheme gloss & gloss"""),
('all', ' Plain text: all fields')
]
# Number of Forms to display per page
self.form_items_per_page = 10
# Number of Files to display per page
self.file_items_per_page = 10
# Number of Collections to display per page
self.collection_items_per_page = 100
# Number of previous (Form) searches that are remembered in the session
self.maxNoPreviousSearches = 10
# The roles that users of the OLD may have
self.roles = ['administrator', 'contributor', 'viewer']
# The ways in which the content of a Collection (Forms and textual
# commentary) can be displayed
self.collectionViewTypes = ['long', 'short', 'columns']
# The MIME types of the Files that can be uploaded to the OLD
# Values are user-friendly names of the file types.
# Empty values indicate that key.split('/')[0] should be used.
# See http://en.wikipedia.org/wiki/Internet_media_type
self.allowedFileTypes = {
u'text/plain': u'plain text',
u'application/x-latex': u'LaTeX document',
u'application/msword': u'MS Word document',
u'application/vnd.ms-powerpoint': u'MS PowerPoint document',
u'application/vnd.openxmlformats-officedocument.wordprocessingml.document': u'Open Document Format (.odt)',
u'application/vnd.oasis.opendocument.text': u'Office Open XML (.docx)',
u'application/pdf': u'PDF',
u'image/gif': u'',
u'image/jpeg': u'',
u'image/png': u'',
u'audio/mpeg': u'',
u'audio/ogg': u'',
u'audio/x-wav': u'',
u'video/mpeg': u'',
u'video/mp4': u'',
u'video/ogg': u'',
u'video/quicktime': u'',
u'video/x-ms-wmv': u''
}
# Valid morpheme delimiters, i.e., characters that can occur between morphemes
self.morphDelimiters = ['-', '=']
# Valid punctuation.
self.punctuation = list(u""".,;:!?'"\u2018\u2019\u201C\u201D[]{}()-""")
# Collection types are the basic categories of Collections
self.collectionTypes = [u'story', u'elicitation', u'paper',
u'discourse', u'other']
self.collectionTypesPlurals = {
u'elicitation': u'elicitations',
u'story': u'stories',
u'paper': u'papers',
u'discourse': u'discourses',
u'other': u'other'
}
# Collection view types: long, short or column
self.collectionViewTypes = ['long', 'short', 'columns']
self.topPrimaryMenuItems = [
{
'id': 'database',
'name': 'Database',
'url': '/home',
'accesskey': 'h',
'title': 'Database mode'
},
{
'id': 'dictionary',
'name': 'Dictionary',
'url': '/dictionary/browse',
'accesskey': 'd',
'title': 'Dictionary mode'
},
{
'id': 'help',
'name': 'Help',
'url': '/help',
'title': 'Help with using the OLD'
},
{
'id': 'settings',
'name': 'Settings',
'url': '/settings',
'title': 'View and edit system-wide settings'
}
]
self.topSecondaryMenuItemChoices = {
'database': [
{
'id': 'people',
'name': 'People',
'url': '/people',
'accesskey': 'p',
'title': 'Info about Speakers and Researchers'
},
{
'id': 'tag',
'name': 'Tags',
'url': '/tag',
'title': 'Keywords, Categories and Elicitation Methods',
'accesskey':'t'
},
{
'id': 'source',
'name': 'Sources',
'url': '/source',
'title': 'Info about Sources'
},
{
'id': 'memory',
'name': 'Memory',
'url': '/memory',
'accesskey': 'm',
'title': 'Forms that you are currently interested in'
}
],
'dictionary': [
{
'id': 'dictionarybrowse',
'name': 'Browse',
'url': '/dictionary/browse',
'title': 'Browse the dictionary'
},
{
'id': 'dictionarysearch',
'name': 'Search',
'url':' /dictionary/search',
'title':'Search the dictionary'
}
],
'help': [
{
'id': 'helpolduserguide',
'name': 'OLD User Guide',
'url': '/help/olduserguide',
'title': 'View the OLD user guide'
},
{
'id': 'helpapplicationhelp',
'name': 'Help Page',
'url': '/help/applicationhelp',
'title': "View this OLD application's help page"
}
]
}
self.sideMenuItems = {
'form': [
{
'id': 'formadd',
'name': 'Add',
'url': '/form/add',
'accesskey': 'a',
'title': 'Add a Form'
},
{
'id': 'formsearch',
'name': 'Search',
'url': '/form/search',
'accesskey': 's',
'title': 'Search for Forms'
}
],
'file': [
{
'id': 'fileadd',
'name': 'Add',
'url': '/file/add',
'accesskey': 'q',
'title': 'Create a new File'
},
{
'id': 'filesearch',
'name': 'Search',
'url': '/file/search',
'accesskey': 'w',
'title': 'Search for Files'
}
],
'collection': [
{
'id': 'collectionadd',
'name': 'Add',
'url': '/collection/add',
'accesskey': 'z',
'title': 'Add a new Collection'
},
{
'id': 'collectionsearch',
'name': 'Search',
'url': '/collection/search',
'accesskey': 'x',
'title': 'Search for Collections'
}
]
}
# MUTABLE APP GLOBALS
# these attributes are set with defaults at initialization but
# may be changed over the lifespan of the application
# APPLICATION SETTINGS
# name of the object language, metalanguage, etc.
defaultOrthography = ','.join(list(string.ascii_lowercase))
self.objectLanguageName = u'Anonymous'
self.objectLanguageId = u''
self.metaLanguageName = u'Unknown'
self.headerImageName = u''
self.colorsCSS = 'green.css'
self.morphemeBreakIsObjectLanguageString = u'no'
self.metaLanguageOrthography = Orthography(defaultOrthography)
self.OLOrthographies = {
u'Orthography 1': (
u'Unnamed',
Orthography(
defaultOrthography, lowercase=1, initialGlottalStops=1
)
)
}
self.storageOrthography = self.OLOrthographies[
u'Orthography 1']
self.defaultInputOrthography = self.OLOrthographies[
u'Orthography 1']
self.defaultOutputOrthography = self.OLOrthographies[
u'Orthography 1']
self.inputToStorageTranslator = None
self.storageToInputTranslator = None
self.storageToOutputTranslator = None
# formCount is the number of Forms in the OLD application.
# This variable is updated on the deletion and addition of Forms.
# THIS IS PROBABLY NOT A GOOD IDEA BECAUSE OF MULTI-THREADING.
# JUST DO A SQLALCHEMY COUNT(ID) QUERY!
self.formCount = None
# Secondary Object Lists
# These variables are set by the function
# updateSecondaryObjectsInAppGlobals() in lib/functions.py
self.speakers = []
self.users = []
self.nonAdministrators = []
self.unrestrictedUsers = []
self.sources = []
self.syncats = []
self.keywords = []
self.elicitationMethods = []
def getActiveTopPrimaryMenuItem(self, url):
"""Given the url of the current page, return the appropriate active top
primary menu item.
"""
result = ''
controller = url.split('/')[1]
controllerToPrimaryMenuItem = {
'form': 'database',
'file': 'database',
'collection': 'database',
'people': 'database',
'tag': 'database',
'source': 'database',
'memory': 'database',
'speaker': 'database',
'researcher': 'database',
'key': 'database',
'category': 'database',
'method': 'database',
'home': 'database',
'settings': 'settings',
'dictionary': 'dictionary',
'help': 'help'
}
try:
result = controllerToPrimaryMenuItem[controller]
except KeyError:
pass
return result
def getMenuItemsTurnedOnByController(self, url):
"""Certain controllers need to make certain menu items active; encode
that here.
"""
result = []
controller = url.split('/')[1]
controllerXTurnsOn = {
'speaker': ['people'],
'researcher': ['people'],
'key': ['tag'],
'category': ['tag'],
'method': ['tag']
}
try:
result = controllerXTurnsOn[controller]
except KeyError:
pass
return result
def getActiveMenuItems(self, url):
""" Function returns the ID of each menu item that should be active
given a particular URL.
Partially logical, partially ad hoc specification.
"""
activeMenuItems = []
controller = url.split('/')[1]
controllerAction = ''.join(url.split('/')[1:3])
activeMenuItems.append(self.getActiveTopPrimaryMenuItem(url))
activeMenuItems += self.getMenuItemsTurnedOnByController(url)
activeMenuItems.append(controllerAction)
activeMenuItems.append(controller)
return activeMenuItems
def authorizedMenuItem(self, menuItem):
"""Return True if menu item should be viewable by current user;
else False.
"""
if 'authorizationLevel' not in menuItem or (
'user_role' in session and session['user_role'] in menuItem[
'authorizationLevel']
) :
return True
else:
return False
def getTopSecondaryMenuItems(self, url):
"""The menu items in the top secondary tier are determined by the active
menu item in the top primary tier. Return an empty list if the top
secondary tier should be omitted.
"""
activeTopPrimaryMenuItem = self.getActiveTopPrimaryMenuItem(url)
topSecondaryMenuItems = []
try:
temp = self.topSecondaryMenuItemChoices[activeTopPrimaryMenuItem]
topSecondaryMenuItems = [x for x in temp
if self.authorizedMenuItem(x)]
except KeyError:
pass
return topSecondaryMenuItems
def getTopPrimaryMenuItems(self):
"""Return top priamry menu items for which the current user is
authorized.
"""
return [x for x in self.topPrimaryMenuItems
if self.authorizedMenuItem(x)]
| gpl-3.0 | -8,743,266,454,911,274,000 | 33.892632 | 119 | 0.482141 | false |
Hellowlol/PyTunes | libs/mutagen/monkeysaudio.py | 16 | 2785 | # A Monkey's Audio (APE) reader/tagger
#
# Copyright 2006 Lukas Lalinsky <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Monkey's Audio streams with APEv2 tags.
Monkey's Audio is a very efficient lossless audio compressor developed
by Matt Ashland.
For more information, see http://www.monkeysaudio.com/.
"""
__all__ = ["MonkeysAudio", "Open", "delete"]
import struct
from mutagen.apev2 import APEv2File, error, delete
from mutagen._util import cdata
class MonkeysAudioHeaderError(error):
pass
class MonkeysAudioInfo(object):
"""Monkey's Audio stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* version -- Monkey's Audio stream version, as a float (eg: 3.99)
"""
def __init__(self, fileobj):
header = fileobj.read(76)
if len(header) != 76 or not header.startswith("MAC "):
raise MonkeysAudioHeaderError("not a Monkey's Audio file")
self.version = cdata.ushort_le(header[4:6])
if self.version >= 3980:
(blocks_per_frame, final_frame_blocks, total_frames,
self.bits_per_sample, self.channels,
self.sample_rate) = struct.unpack("<IIIHHI", header[56:76])
else:
compression_level = cdata.ushort_le(header[6:8])
self.channels, self.sample_rate = struct.unpack(
"<HI", header[10:16])
total_frames, final_frame_blocks = struct.unpack(
"<II", header[24:32])
if self.version >= 3950:
blocks_per_frame = 73728 * 4
elif self.version >= 3900 or (self.version >= 3800 and
compression_level == 4):
blocks_per_frame = 73728
else:
blocks_per_frame = 9216
self.version /= 1000.0
self.length = 0.0
if self.sample_rate != 0 and total_frames > 0:
total_blocks = ((total_frames - 1) * blocks_per_frame +
final_frame_blocks)
self.length = float(total_blocks) / self.sample_rate
def pprint(self):
return "Monkey's Audio %.2f, %.2f seconds, %d Hz" % (
self.version, self.length, self.sample_rate)
class MonkeysAudio(APEv2File):
_Info = MonkeysAudioInfo
_mimes = ["audio/ape", "audio/x-ape"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith("MAC ") + filename.lower().endswith(".ape")
Open = MonkeysAudio
| gpl-3.0 | -5,587,546,866,950,363,000 | 32.154762 | 76 | 0.610772 | false |
bdang2012/taiga-back-casting | taiga/projects/mixins/on_destroy.py | 1 | 1879 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import transaction as tx
from taiga.base.api.utils import get_object_or_404
#############################################
# ViewSets
#############################################
class MoveOnDestroyMixin:
@tx.atomic
def destroy(self, request, *args, **kwargs):
move_to = self.request.QUERY_PARAMS.get('moveTo', None)
if move_to is None:
return super().destroy(request, *args, **kwargs)
obj = self.get_object_or_none()
move_item = get_object_or_404(self.model, id=move_to)
self.check_permissions(request, 'destroy', obj)
qs = self.move_on_destroy_related_class.objects.filter(**{self.move_on_destroy_related_field: obj})
qs.update(**{self.move_on_destroy_related_field: move_item})
if getattr(obj.project, self.move_on_destroy_project_default_field) == obj:
setattr(obj.project, self.move_on_destroy_project_default_field, move_item)
obj.project.save()
return super().destroy(request, *args, **kwargs)
| agpl-3.0 | 480,005,793,771,961,600 | 39.804348 | 107 | 0.670751 | false |
klundberg/swift-corelibs-foundation | lib/target.py | 2 | 12322 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
from .config import Configuration
import platform
class ArchType:
UnknownArch = 0
armv7 = 1
armeb = 2
aarch64 = 3
aarch64_be = 4
bpfel = 5
bpfeb = 6
hexagon = 7
mips = 8
mipsel = 9
mips64 = 10
mips64el = 11
msp430 = 12
ppc = 13
ppc64 = 14
ppc64le = 15
r600 = 16
amdgcn = 17
sparc = 18
sparcv9 = 19
sparcel = 20
systemz = 21
tce = 22
thumb = 23
thumbeb = 24
x86 = 25
x86_64 = 26
xcore = 27
nvptx = 28
nvptx64 = 29
le32 = 30
le64 = 31
amdil = 32
amdil64 = 33
hsail = 34
hsail64 = 35
spir = 36
spir64 = 37
kalimba = 38
shave = 39
armv6 = 40
s390x = 41
# Do not assume that these are 1:1 mapping. This should follow
# canonical naming conventions for arm, etc. architectures.
# See apple/swift PR #608
@staticmethod
def to_string(value):
if value == ArchType.armv7:
return "armv7"
if value == ArchType.armv6:
return "armv6"
if value == ArchType.armeb:
return "armeb"
if value == ArchType.aarch64:
return "aarch64"
if value == ArchType.aarch64_be:
return "aarch64_be"
if value == ArchType.bpfel:
return "bpfel"
if value == ArchType.bpfeb:
return "bpfeb"
if value == ArchType.hexagon:
return "hexagon"
if value == ArchType.mips:
return "mips"
if value == ArchType.mipsel:
return "mipsel"
if value == ArchType.mips64:
return "mips64"
if value == ArchType.mips64el:
return "mips64el"
if value == ArchType.msp430:
return "msp430"
if value == ArchType.ppc:
return "ppc"
if value == ArchType.ppc64:
return "ppc64"
if value == ArchType.ppc64le:
return "ppc64le"
if value == ArchType.r600:
return "r600"
if value == ArchType.amdgcn:
return "amdgcn"
if value == ArchType.sparc:
return "sparc"
if value == ArchType.sparcv9:
return "sparcv9"
if value == ArchType.sparcel:
return "sparcel"
if value == ArchType.systemz:
return "systemz"
if value == ArchType.tce:
return "tce"
if value == ArchType.thumb:
return "armv7"
if value == ArchType.thumbeb:
return "thumbeb"
if value == ArchType.x86:
return "i386"
if value == ArchType.x86_64:
return "x86_64"
if value == ArchType.xcore:
return "xcore"
if value == ArchType.nvptx:
return "nvptx"
if value == ArchType.nvptx64:
return "nvptx64"
if value == ArchType.le32:
return "le32"
if value == ArchType.le64:
return "le64"
if value == ArchType.amdil:
return "amdil"
if value == ArchType.amdil64:
return "amdil64"
if value == ArchType.hsail:
return "hsail"
if value == ArchType.hsail64:
return "hsail64"
if value == ArchType.spir:
return "spir"
if value == ArchType.spir64:
return "spir64"
if value == ArchType.kalimba:
return "kalimba"
if value == ArchType.shave:
return "shave"
if value == ArchType.s390x:
return "s390x"
return "unknown"
# Not 1:1, See to_string
@staticmethod
def from_string(string):
if string == "armeb":
return ArchType.armeb
if string == "arm":
return ArchType.armv7
if string == "armv7":
return ArchType.armv7
if string == "armv7l":
return ArchType.armv7
if string == "armv6":
return ArchType.armv6
if string == "armv6l":
return ArchType.armv6
if string == "aarch64":
return ArchType.aarch64
if string == "aarch64_be":
return ArchType.aarch64_be
if string == "bpfel":
return ArchType.bpfel
if string == "bpfeb":
return ArchType.bpfeb
if string == "hexagon":
return ArchType.hexagon
if string == "mips":
return ArchType.mips
if string == "mipsel":
return ArchType.mipsel
if string == "mips64":
return ArchType.mips64
if string == "mips64el":
return ArchType.mips64el
if string == "msp430":
return ArchType.msp430
if string == "ppc":
return ArchType.ppc
if string == "ppc64":
return ArchType.ppc64
if string == "ppc64le":
return ArchType.ppc64le
if string == "r600":
return ArchType.r600
if string == "amdgcn":
return ArchType.amdgcn
if string == "sparc":
return ArchType.sparc
if string == "sparcv9":
return ArchType.sparcv9
if string == "sparcel":
return ArchType.sparcel
if string == "systemz":
return ArchType.systemz
if string == "tce":
return ArchType.tce
if string == "thumb":
return ArchType.thumb
if string == "thumbeb":
return ArchType.thumbeb
if string == "x86":
return ArchType.x86
if string == "x86_64":
return ArchType.x86_64
if string == "xcore":
return ArchType.xcore
if string == "nvptx":
return ArchType.nvptx
if string == "nvptx64":
return ArchType.nvptx64
if string == "le32":
return ArchType.le32
if string == "le64":
return ArchType.le64
if string == "amdil":
return ArchType.amdil
if string == "amdil64":
return ArchType.amdil64
if string == "hsail":
return ArchType.hsail
if string == "hsail64":
return ArchType.hsail64
if string == "spir":
return ArchType.spir
if string == "spir64":
return ArchType.spir64
if string == "kalimba":
return ArchType.kalimba
if string == "shave":
return ArchType.shave
if string == "s390x":
return ArchType.s390x
return ArchType.UnknownArch
class ArchSubType:
NoSubArch = 0
ARMSubArch_v8_1a = 1
ARMSubArch_v8 = 2
ARMSubArch_v7 = 3
ARMSubArch_v7em = 4
ARMSubArch_v7m = 5
ARMSubArch_v7s = 6
ARMSubArch_v6 = 7
ARMSubArch_v6m = 8
ARMSubArch_v6k = 9
ARMSubArch_v6t2 = 10
ARMSubArch_v5 = 11
ARMSubArch_v5te = 12
ARMSubArch_v4t = 13
KalimbaSubArch_v3 = 14
KalimbaSubArch_v4 = 15
KalimbaSubArch_v5 = 16
class OSType:
UnknownOS = 0
CloudABI = 1
Darwin = 2
DragonFly = 3
FreeBSD = 4
IOS = 5
KFreeBSD = 6
Linux = 7
Lv2 = 8
MacOSX = 9
NetBSD = 10
OpenBSD = 11
Solaris = 12
Win32 = 13
Haiku = 14
Minix = 15
RTEMS = 16
NaCl = 17
CNK = 18
Bitrig = 19
AIX = 20
CUDA = 21
NVCL = 22
AMDHSA = 23
PS4 = 24
class ObjectFormat:
UnknownObjectFormat = 0
COFF = 1
ELF = 2
MachO = 3
class EnvironmentType:
UnknownEnvironment = 0
GNU = 1
GNUEABI = 2
GNUEABIHF = 3
GNUX32 = 4
CODE16 = 5
EABI = 6
EABIHF = 7
Android = 8
MSVC = 9
Itanium = 10
Cygnus = 11
class Vendor:
UnknownVendor = 0
Apple = 1
PC = 2
SCEI = 3
BGP = 4
BGQ = 5
Freescale = 6
IBM = 7
ImaginationTechnologies = 8
MipsTechnologies = 9
NVIDIA = 10
CSR = 11
class Target:
triple = None
sdk = None
arch = None
executable_suffix = ""
dynamic_library_prefix = "lib"
dynamic_library_suffix = ".dylib"
static_library_prefix = "lib"
static_library_suffix = ".a"
def __init__(self, triple):
if "linux" in triple:
self.sdk = OSType.Linux
self.dynamic_library_suffix = ".so"
elif "freebsd" in triple:
self.sdk = OSType.FreeBSD
self.dynamic_library_suffix = ".so"
elif "windows" in triple or "win32" in triple:
self.sdk = OSType.Win32
self.dynamic_library_suffix = ".dll"
self.executable_suffix = ".exe"
elif "darwin" in triple:
self.sdk = OSType.MacOSX
else:
print("Unknown platform")
self.triple = triple
comps = triple.split('-')
self.arch = ArchType.from_string(comps[0])
@staticmethod
def default():
arch = ArchType.from_string(platform.machine())
triple = ArchType.to_string(arch)
if platform.system() == "Linux":
if (arch == ArchType.armv6) or (arch == ArchType.armv7):
triple += "-linux-gnueabihf"
else:
triple += "-linux-gnu"
elif platform.system() == "Darwin":
triple += "-apple-darwin"
elif platform.system() == "FreeBSD":
# Make this work on 10 as well.
triple += "-freebsd11.0"
else:
# TODO: This should be a bit more exhaustive
print("unknown host os")
return None
return triple
@property
def swift_triple(self):
triple = ArchType.to_string(self.arch)
if self.sdk == OSType.MacOSX:
return None
elif self.sdk == OSType.Linux:
# FIXME: It would be nice to detect the host ABI here
if (self.arch == ArchType.armv6) or (self.arch == ArchType.armv7):
triple += "-unknown-linux-gnueabihf"
else:
triple += "-unknown-linux"
elif self.sdk == OSType.FreeBSD:
triple += "-unknown-freebsd"
else:
print("unknown sdk for swift")
return None
return triple
@property
def swift_sdk_name(self):
if self.sdk == OSType.MacOSX:
return "macosx"
elif self.sdk == OSType.Linux:
return "linux"
elif self.sdk == OSType.FreeBSD:
return "freebsd"
else:
print("unknown sdk for swift")
return None
@property
def swift_arch(self):
return ArchType.to_string(self.arch)
class TargetConditional:
_sdk = None
_arch = None
_default = None
def __init__(self, sdk = None, arch = None, default = None):
self._sdk = sdk
self._arch = arch
self._default = default
def evalulate(self, target):
if self._sdk is not None and target.sdk in self._sdk:
return self._sdk[target.sdk]
if self._arch is not None and target.arch in self._arch:
return self._arch[target.arch]
return self._default
@staticmethod
def value(value):
if type(value) is TargetConditional:
return value.evalulate(Configuration.current.target)
return value
| apache-2.0 | 4,916,125,948,489,552,000 | 27.589327 | 78 | 0.503571 | false |
marcelometal/Django-facebook | facebook_example/facebook_example/urls.py | 2 | 1535 | try:
from django.conf.urls import include, patterns, url
except ImportError:
from django.conf.urls.defaults import include, patterns, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# facebook and registration urls
(r'^facebook/', include('django_facebook.urls')),
(r'^accounts/', include('django_facebook.auth_urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
if settings.MODE == 'userena':
urlpatterns += patterns('',
(r'^accounts/', include('userena.urls')),
)
elif settings.MODE == 'django_registration':
urlpatterns += patterns('',
(r'^accounts/', include(
'registration.backends.default.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| bsd-3-clause | 2,463,787,182,072,904,700 | 39.394737 | 90 | 0.500326 | false |
hirokihamasaki/irma | probe/modules/antivirus/avg/avg.py | 1 | 3787 | #
# Copyright (c) 2013-2016 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
import os
import stat
from ..base import Antivirus
log = logging.getLogger(__name__)
class AVGAntiVirusFree(Antivirus):
_name = "AVG AntiVirus Free (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super(AVGAntiVirusFree, self).__init__(*args, **kwargs)
# scan tool variables
self._scan_args = (
"--heur " # use heuristics for scanning
"--paranoid " # Enable paranoid mode. Scan for less dangerous
# malware and more time consuming algoritms.
"--arc " # scan through archives
"--macrow " # report documents with macros.
"--pwdw " # report password protected files
"--pup " # scan for Potentially Unwanted Programs
)
self._scan_patterns = [
re.compile(r'(?P<file>.*)'
r'\s+(Found|Virus found|Potentially harmful program|'
r'Virus identified|Trojan horse)\s+'
r'(?P<name>.*)(\\n)*.*$', re.IGNORECASE)
]
def is_error_fn(x):
return x in [1, 2, 3, 6, 7, 8, 9, 10]
# NOTE: do 'man avgscan' for return codes
self._scan_retcodes[self.ScanResult.CLEAN] = lambda x: x in [0]
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [4, 5]
self._scan_retcodes[self.ScanResult.ERROR] = lambda x: is_error_fn(x)
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
result = None
if self.scan_path:
cmd = self.build_cmd(self.scan_path, '-v')
retcode, stdout, stderr = self.run_cmd(cmd)
if not retcode:
matches = re.search(r'(?P<version>\d+(\.\d+)+)',
stdout,
re.IGNORECASE)
if matches:
result = matches.group('version').strip()
return result
def get_database(self):
"""return list of files in the database"""
# extract folder where are installed definition files
avg_path = '/opt/avg/'
# NOTE: the structure/location of the update folders are documented in
# the /var/lib/avast/Setup/avast.setup script.
search_paths = map(lambda x:
'{avg_path}/av/update/{folder}/'
''.format(avg_path=avg_path, folder=x),
['backup', 'download', 'prepare'])
database_patterns = [
'*',
]
results = []
for pattern in database_patterns:
result = self.locate(pattern, search_paths, syspath=False)
results.extend(result)
return results if results else None
def get_scan_path(self):
"""return the full path of the scan tool"""
paths = self.locate("avgscan")
return paths[0] if paths else None
| apache-2.0 | 3,487,845,709,680,859,600 | 36.49505 | 78 | 0.537365 | false |
nullishzero/Portage | pym/_emerge/create_depgraph_params.py | 2 | 3934 | # Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
from portage.util import writemsg_level
def create_depgraph_params(myopts, myaction):
#configure emerge engine parameters
#
# self: include _this_ package regardless of if it is merged.
# selective: exclude the package if it is merged
# recurse: go into the dependencies
# deep: go into the dependencies of already merged packages
# empty: pretend nothing is merged
# complete: completely account for all known dependencies
# remove: build graph for use in removing packages
# rebuilt_binaries: replace installed packages with rebuilt binaries
# rebuild_if_new_slot: rebuild or reinstall packages when
# slot/sub-slot := operator dependencies can be satisfied by a newer
# slot/sub-slot, so that older packages slots will become eligible for
# removal by the --depclean action as soon as possible
# ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
# of dependencies that have been recorded when packages where built
myparams = {"recurse" : True}
bdeps = myopts.get("--with-bdeps")
if bdeps is not None:
myparams["bdeps"] = bdeps
ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
if ignore_built_slot_operator_deps is not None:
myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
dynamic_deps = myopts.get("--dynamic-deps")
if dynamic_deps is not None:
myparams["dynamic_deps"] = dynamic_deps
if myaction == "remove":
myparams["remove"] = True
myparams["complete"] = True
myparams["selective"] = True
return myparams
rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
if rebuild_if_new_slot is not None:
myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
if "--update" in myopts or \
"--newrepo" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
myopts.get("--selective", "n") != "n":
myparams["selective"] = True
deep = myopts.get("--deep")
if deep is not None and deep != 0:
myparams["deep"] = deep
complete_if_new_use = \
myopts.get("--complete-graph-if-new-use")
if complete_if_new_use is not None:
myparams["complete_if_new_use"] = complete_if_new_use
complete_if_new_ver = \
myopts.get("--complete-graph-if-new-ver")
if complete_if_new_ver is not None:
myparams["complete_if_new_ver"] = complete_if_new_ver
if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
"--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
myparams["complete"] = True
if "--emptytree" in myopts:
myparams["empty"] = True
myparams["deep"] = True
myparams.pop("selective", None)
if "--nodeps" in myopts:
myparams.pop("recurse", None)
myparams.pop("deep", None)
myparams.pop("complete", None)
rebuilt_binaries = myopts.get('--rebuilt-binaries')
if rebuilt_binaries is True or \
rebuilt_binaries != 'n' and \
'--usepkgonly' in myopts and \
myopts.get('--deep') is True and \
'--update' in myopts:
myparams['rebuilt_binaries'] = True
binpkg_respect_use = myopts.get('--binpkg-respect-use')
if binpkg_respect_use is not None:
myparams['binpkg_respect_use'] = binpkg_respect_use
elif '--usepkgonly' not in myopts:
# If --binpkg-respect-use is not explicitly specified, we enable
# the behavior automatically (like requested in bug #297549), as
# long as it doesn't strongly conflict with other options that
# have been specified.
myparams['binpkg_respect_use'] = 'auto'
if myopts.get("--selective") == "n":
# --selective=n can be used to remove selective
# behavior that may have been implied by some
# other option like --update.
myparams.pop("selective", None)
if '--debug' in myopts:
writemsg_level('\n\nmyparams %s\n\n' % myparams,
noiselevel=-1, level=logging.DEBUG)
return myparams
| gpl-2.0 | 8,581,386,654,657,836,000 | 34.125 | 82 | 0.706151 | false |
ghold/OneKeySql | onekey/oracle/OkSqlHandler.py | 1 | 1458 | import cx_Oracle
import logging
import os
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
class OkSqlHandler(object):
@classmethod
def setupConn(cls):
# dsn = cx_Oracle.makedsn("10.0.44.99", "1521", "ompdb")
dsn = cx_Oracle.makedsn("10.0.76.128", "1521", "omp2st")
conn = cx_Oracle.connect('OMPBASE', 'OMPBASE', dsn)
return conn
@classmethod
def insertAction(cls, sql):
conn = cls.setupConn()
cursor = conn.cursor()
#logging
logging.basicConfig(filename='onkey.log',level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.info(sql)
try:
cursor.execute(sql)
except Exception as ex:
logging.error(ex)
finally:
cursor.close()
conn.commit()
conn.close()
#if __name__ == "__main__":
# OkSqlHandler.insertAction("insert into omp.tt_bar_record (BAR_RECORD_ID, OP_CODE, ZONE_CODE, WAYBILL_NO, CONTNR_CODE, OP_ATTACH_INFO, STAY_WHY_CODE, BAR_SCAN_TM, BAR_OPR_CODE, COURIER_CODE, PHONE_ZONE, PHONE, SUBBILL_PIECE_QTY, BAR_UPLOAD_TYPE_CODE, WEIGHT_QTY, OTHER_INFO, AUTOLOADING, OBJ_TYPE_CODE, CREATE_TM) values (1989012000004, '30', '755R', '960837100044', '333124100065', '755R021R0430', '', to_date('05-12-2013 04:10:39', 'mm-dd-yyyy hh24:mi:ss'), '243099', '', '', '', 0, 0, 0.00, '', '1', 30, to_date('05-12-2013 04:35:39', 'mm-dd-yyyy hh24:mi:ss'))")
| apache-2.0 | -4,715,842,806,353,815,000 | 46.032258 | 569 | 0.606996 | false |
einaru/cconverter | cconverter.py | 1 | 1284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Date: Thu Jun 30 17:17:35 CEST 2011
:Version: 1
:Authors: Einar Uvsløkk <[email protected]>
:Copyright: (c) 2011 Einar Uvsløkk
:License: GNU General Public License (GPL) version 3 or later
vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
import gettext
import locale
import os
import cconverter
from cconverter import app
try:
from etrainer.defs import (DATA_DIR, PKG_DATA_DIR, LOCALE_DIR)
DEFS_PRESENT = True
except ImportError:
DATA_DIR = PKG_DATA_DIR = LOCALE_DIR = ''
DEFS_PRESENT = False
if not DEFS_PRESENT:
_prefix = '/usr'
DATA_DIR = os.path.join(_prefix, 'share')
LOCALE_DIR = os.path.join(_prefix, 'share', 'locale')
_me = os.path.abspath(os.path.dirname(__file__))
PKG_DATA_DIR = os.path.join(_me, 'data')
cconverter.DATA_DIR = DATA_DIR
cconverter.PKG_DATA_DIR = PKG_DATA_DIR
cconverter.LOCALE_DIR = LOCALE_DIR
cconverter.APP_NAME = 'cconverter'
locale.setlocale(locale.LC_ALL, None)
gettext.bindtextdomain(cconverter.APP_NAME, LOCALE_DIR)
gettext.textdomain(cconverter.APP_NAME)
gettext.install(cconverter.APP_NAME)
dirs = {'DATA_DIR': DATA_DIR,
'PKG_DATA_DIR': PKG_DATA_DIR,
'LOCALE_DIR': LOCALE_DIR,}
kwargs = {'data': dirs,}
app.run(**kwargs)
| gpl-3.0 | 47,153,859,680,810,424 | 26.276596 | 66 | 0.695788 | false |
Thraxis/SickRage | lib/hachoir_parser/image/bmp.py | 95 | 6682 | """
Microsoft Bitmap picture parser.
- file extension: ".bmp"
Author: Victor Stinner
Creation: 16 december 2005
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt8, UInt16, UInt32, Bits,
String, RawBytes, Enum,
PaddingBytes, NullBytes, createPaddingField)
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_parser.image.common import RGB, PaletteRGBA
from hachoir_core.tools import alignValue
class Pixel4bit(Bits):
static_size = 4
def __init__(self, parent, name):
Bits.__init__(self, parent, name, 4)
class ImageLine(FieldSet):
def __init__(self, parent, name, width, pixel_class):
FieldSet.__init__(self, parent, name)
self._pixel = pixel_class
self._width = width
self._size = alignValue(self._width * self._pixel.static_size, 32)
def createFields(self):
for x in xrange(self._width):
yield self._pixel(self, "pixel[]")
size = self.size - self.current_size
if size:
yield createPaddingField(self, size)
class ImagePixels(FieldSet):
def __init__(self, parent, name, width, height, pixel_class, size=None):
FieldSet.__init__(self, parent, name, size=size)
self._width = width
self._height = height
self._pixel = pixel_class
def createFields(self):
for y in xrange(self._height-1, -1, -1):
yield ImageLine(self, "line[%u]" % y, self._width, self._pixel)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding", size)
class CIEXYZ(FieldSet):
def createFields(self):
yield UInt32(self, "x")
yield UInt32(self, "y")
yield UInt32(self, "z")
class BmpHeader(FieldSet):
color_space_name = {
1: "Business (Saturation)",
2: "Graphics (Relative)",
4: "Images (Perceptual)",
8: "Absolute colormetric (Absolute)",
}
def getFormatVersion(self):
if "gamma_blue" in self:
return 4
if "important_color" in self:
return 3
return 2
def createFields(self):
# Version 2 (12 bytes)
yield UInt32(self, "header_size", "Header size")
yield UInt32(self, "width", "Width (pixels)")
yield UInt32(self, "height", "Height (pixels)")
yield UInt16(self, "nb_plan", "Number of plan (=1)")
yield UInt16(self, "bpp", "Bits per pixel") # may be zero for PNG/JPEG picture
# Version 3 (40 bytes)
if self["header_size"].value < 40:
return
yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME)
yield UInt32(self, "image_size", "Image size (bytes)")
yield UInt32(self, "horizontal_dpi", "Horizontal DPI")
yield UInt32(self, "vertical_dpi", "Vertical DPI")
yield UInt32(self, "used_colors", "Number of color used")
yield UInt32(self, "important_color", "Number of import colors")
# Version 4 (108 bytes)
if self["header_size"].value < 108:
return
yield textHandler(UInt32(self, "red_mask"), hexadecimal)
yield textHandler(UInt32(self, "green_mask"), hexadecimal)
yield textHandler(UInt32(self, "blue_mask"), hexadecimal)
yield textHandler(UInt32(self, "alpha_mask"), hexadecimal)
yield Enum(UInt32(self, "color_space"), self.color_space_name)
yield CIEXYZ(self, "red_primary")
yield CIEXYZ(self, "green_primary")
yield CIEXYZ(self, "blue_primary")
yield UInt32(self, "gamma_red")
yield UInt32(self, "gamma_green")
yield UInt32(self, "gamma_blue")
def parseImageData(parent, name, size, header):
if ("compression" not in header) or (header["compression"].value in (0, 3)):
width = header["width"].value
height = header["height"].value
bpp = header["bpp"].value
if bpp == 32:
cls = UInt32
elif bpp == 24:
cls = RGB
elif bpp == 8:
cls = UInt8
elif bpp == 4:
cls = Pixel4bit
else:
cls = None
if cls:
return ImagePixels(parent, name, width, height, cls, size=size*8)
return RawBytes(parent, name, size)
class BmpFile(Parser):
PARSER_TAGS = {
"id": "bmp",
"category": "image",
"file_ext": ("bmp",),
"mime": (u"image/x-ms-bmp", u"image/x-bmp"),
"min_size": 30*8,
# "magic": (("BM", 0),),
"magic_regex": ((
# "BM", <filesize>, <reserved>, header_size=(12|40|108)
"BM.{4}.{8}[\x0C\x28\x6C]\0{3}",
0),),
"description": "Microsoft bitmap (BMP) picture"
}
endian = LITTLE_ENDIAN
COMPRESSION_NAME = {
0: u"Uncompressed",
1: u"RLE 8-bit",
2: u"RLE 4-bit",
3: u"Bitfields",
4: u"JPEG",
5: u"PNG",
}
def validate(self):
if self.stream.readBytes(0, 2) != 'BM':
return "Wrong file signature"
if self["header/header_size"].value not in (12, 40, 108):
return "Unknown header size (%s)" % self["header_size"].value
if self["header/nb_plan"].value != 1:
return "Invalid number of planes"
return True
def createFields(self):
yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII")
yield UInt32(self, "file_size", "File size (bytes)")
yield PaddingBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "data_start", "Data start position")
yield BmpHeader(self, "header")
# Compute number of color
header = self["header"]
bpp = header["bpp"].value
if 0 < bpp <= 8:
if "used_colors" in header and header["used_colors"].value:
nb_color = header["used_colors"].value
else:
nb_color = (1 << bpp)
else:
nb_color = 0
# Color palette (if any)
if nb_color:
yield PaletteRGBA(self, "palette", nb_color)
# Seek to data start
field = self.seekByte(self["data_start"].value)
if field:
yield field
# Image pixels
size = min(self["file_size"].value-self["data_start"].value, (self.size - self.current_size)//8)
yield parseImageData(self, "pixels", size, header)
def createDescription(self):
return u"Microsoft Bitmap version %s" % self["header"].getFormatVersion()
def createContentSize(self):
return self["file_size"].value * 8
| gpl-3.0 | 8,301,769,140,815,707,000 | 33.266667 | 104 | 0.577971 | false |
pshen/ansible | docs/docsite/rst/conf.py | 37 | 7361 | # -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath('_themes'))
VERSION = '2.4'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2017 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
rst_epilog = """
.. |acapi| replace:: *Ansible Core API Guide*
.. |acrn| replace:: *Ansible Core Release Notes*
.. |ac| replace:: Ansible Core
.. |acversion| replace:: Ansible Core Version 2.1
.. |acversionshort| replace:: Ansible Core 2.1
.. |versionshortest| replace:: 2.2
.. |versiondev| replace:: 2.3
.. |pubdate| replace:: July 19, 2016
.. |rhel| replace:: Red Hat Enterprise Linux
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'srtd'
html_short_title = 'Ansible Documentation'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/2', (None, '../python2-2.7.13.inv')),
'python3': ('https://docs.python.org/3', (None, '../python3-3.6.1.inv')),
'jinja2': ('http://jinja.pocoo.org/docs', (None, 'jinja2-2.9.6.inv'))}
| gpl-3.0 | 735,740,287,424,502,000 | 31.004348 | 131 | 0.707513 | false |
ejoful/scrapy_example | zhihu_spider/zhihu_spider/settings.py | 1 | 3176 | # -*- coding: utf-8 -*-
# Scrapy settings for zhihu_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'zhihu_spider'
SPIDER_MODULES = ['zhihu_spider.spiders']
NEWSPIDER_MODULE = 'zhihu_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'zhihu_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'zhihu_spider.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'zhihu_spider.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'zhihu_spider.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| gpl-3.0 | -9,045,986,626,991,025,000 | 34.288889 | 109 | 0.766058 | false |
tboyce021/home-assistant | tests/helpers/test_area_registry.py | 1 | 5502 | """Tests for the Area Registry."""
import asyncio
import pytest
from homeassistant.core import callback
from homeassistant.helpers import area_registry
import tests.async_mock
from tests.common import flush_store, mock_area_registry
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_area_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(area_registry.EVENT_AREA_REGISTRY_UPDATED, async_capture)
return events
async def test_list_areas(registry):
"""Make sure that we can read areas."""
registry.async_create("mock")
areas = registry.async_list_areas()
assert len(areas) == len(registry.areas)
async def test_create_area(hass, registry, update_events):
"""Make sure that we can create an area."""
area = registry.async_create("mock")
assert area.id == "mock"
assert area.name == "mock"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 1
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
async def test_create_area_with_name_already_in_use(hass, registry, update_events):
"""Make sure that we can't create an area with a name already in use."""
area1 = registry.async_create("mock")
with pytest.raises(ValueError) as e_info:
area2 = registry.async_create("mock")
assert area1 != area2
assert e_info == "Name is already in use"
await hass.async_block_till_done()
assert len(registry.areas) == 1
assert len(update_events) == 1
async def test_create_area_with_id_already_in_use(registry):
"""Make sure that we can't create an area with a name already in use."""
area1 = registry.async_create("mock")
updated_area1 = registry.async_update(area1.id, "New Name")
assert updated_area1.id == area1.id
area2 = registry.async_create("mock")
assert area2.id == "mock_2"
async def test_delete_area(hass, registry, update_events):
"""Make sure that we can delete an area."""
area = registry.async_create("mock")
await registry.async_delete(area.id)
assert not registry.areas
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "remove"
assert update_events[1]["area_id"] == area.id
async def test_delete_non_existing_area(registry):
"""Make sure that we can't delete an area that doesn't exist."""
registry.async_create("mock")
with pytest.raises(KeyError):
await registry.async_delete("")
assert len(registry.areas) == 1
async def test_update_area(hass, registry, update_events):
"""Make sure that we can read areas."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock1")
assert updated_area != area
assert updated_area.name == "mock1"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "update"
assert update_events[1]["area_id"] == area.id
async def test_update_area_with_same_name(registry):
"""Make sure that we can reapply the same name to the area."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock")
assert updated_area == area
assert len(registry.areas) == 1
async def test_update_area_with_name_already_in_use(registry):
"""Make sure that we can't update an area with a name already in use."""
area1 = registry.async_create("mock1")
area2 = registry.async_create("mock2")
with pytest.raises(ValueError) as e_info:
registry.async_update(area1.id, name="mock2")
assert e_info == "Name is already in use"
assert area1.name == "mock1"
assert area2.name == "mock2"
assert len(registry.areas) == 2
async def test_load_area(hass, registry):
"""Make sure that we can load/save data correctly."""
registry.async_create("mock1")
registry.async_create("mock2")
assert len(registry.areas) == 2
registry2 = area_registry.AreaRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
assert list(registry.areas) == list(registry2.areas)
async def test_loading_area_from_storage(hass, hass_storage):
"""Test loading stored areas on start."""
hass_storage[area_registry.STORAGE_KEY] = {
"version": area_registry.STORAGE_VERSION,
"data": {"areas": [{"id": "12345A", "name": "mock"}]},
}
registry = await area_registry.async_get_registry(hass)
assert len(registry.areas) == 1
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with tests.async_mock.patch(
"homeassistant.helpers.area_registry.AreaRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
area_registry.async_get_registry(hass),
area_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
| apache-2.0 | 2,767,099,778,234,758,000 | 27.957895 | 83 | 0.666848 | false |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/protorpc/protorpc/util_test.py | 19 | 14232 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for protorpc.util."""
import six
__author__ = '[email protected] (Rafe Kaplan)'
import datetime
import random
import sys
import types
import unittest
from protorpc import test_util
from protorpc import util
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = util
class PadStringTest(test_util.TestCase):
def testPadEmptyString(self):
self.assertEquals(' ' * 512, util.pad_string(''))
def testPadString(self):
self.assertEquals('hello' + (507 * ' '), util.pad_string('hello'))
def testPadLongString(self):
self.assertEquals('x' * 1000, util.pad_string('x' * 1000))
class UtilTest(test_util.TestCase):
def testDecoratedFunction_LengthZero(self):
@util.positional(0)
def fn(kwonly=1):
return [kwonly]
self.assertEquals([1], fn())
self.assertEquals([2], fn(kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 0 positional '
r'arguments \(1 given\)',
fn, 1)
def testDecoratedFunction_LengthOne(self):
@util.positional(1)
def fn(pos, kwonly=1):
return [pos, kwonly]
self.assertEquals([1, 1], fn(1))
self.assertEquals([2, 2], fn(2, kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 1 positional '
r'argument \(2 given\)',
fn, 2, 3)
def testDecoratedFunction_LengthTwoWithDefault(self):
@util.positional(2)
def fn(pos1, pos2=1, kwonly=1):
return [pos1, pos2, kwonly]
self.assertEquals([1, 1, 1], fn(1))
self.assertEquals([2, 2, 1], fn(2, 2))
self.assertEquals([2, 3, 4], fn(2, 3, kwonly=4))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 2 positional '
r'arguments \(3 given\)',
fn, 2, 3, 4)
def testDecoratedMethod(self):
class MyClass(object):
@util.positional(2)
def meth(self, pos1, kwonly=1):
return [pos1, kwonly]
self.assertEquals([1, 1], MyClass().meth(1))
self.assertEquals([2, 2], MyClass().meth(2, kwonly=2))
self.assertRaisesWithRegexpMatch(TypeError,
r'meth\(\) takes at most 2 positional '
r'arguments \(3 given\)',
MyClass().meth, 2, 3)
def testDefaultDecoration(self):
@util.positional
def fn(a, b, c=None):
return a, b, c
self.assertEquals((1, 2, 3), fn(1, 2, c=3))
self.assertEquals((3, 4, None), fn(3, b=4))
self.assertRaisesWithRegexpMatch(TypeError,
r'fn\(\) takes at most 2 positional '
r'arguments \(3 given\)',
fn, 2, 3, 4)
def testDefaultDecorationNoKwdsFails(self):
def fn(a):
return a
self.assertRaisesRegexp(
ValueError,
'Functions with no keyword arguments must specify max_positional_args',
util.positional, fn)
class AcceptItemTest(test_util.TestCase):
def CheckAttributes(self, item, main_type, sub_type, q=1, values={}, index=1):
self.assertEquals(index, item.index)
self.assertEquals(main_type, item.main_type)
self.assertEquals(sub_type, item.sub_type)
self.assertEquals(q, item.q)
self.assertEquals(values, item.values)
def testParse(self):
self.CheckAttributes(util.AcceptItem('*/*', 1), None, None)
self.CheckAttributes(util.AcceptItem('text/*', 1), 'text', None)
self.CheckAttributes(util.AcceptItem('text/plain', 1), 'text', 'plain')
self.CheckAttributes(
util.AcceptItem('text/plain; q=0.3', 1), 'text', 'plain', 0.3,
values={'q': '0.3'})
self.CheckAttributes(
util.AcceptItem('text/plain; level=2', 1), 'text', 'plain',
values={'level': '2'})
self.CheckAttributes(
util.AcceptItem('text/plain', 10), 'text', 'plain', index=10)
def testCaseInsensitive(self):
self.CheckAttributes(util.AcceptItem('Text/Plain', 1), 'text', 'plain')
def testBadValue(self):
self.assertRaises(util.AcceptError,
util.AcceptItem, 'bad value', 1)
self.assertRaises(util.AcceptError,
util.AcceptItem, 'bad value/', 1)
self.assertRaises(util.AcceptError,
util.AcceptItem, '/bad value', 1)
def testSortKey(self):
item = util.AcceptItem('main/sub; q=0.2; level=3', 11)
self.assertEquals((False, False, -0.2, False, 11), item.sort_key)
item = util.AcceptItem('main/*', 12)
self.assertEquals((False, True, -1, True, 12), item.sort_key)
item = util.AcceptItem('*/*', 1)
self.assertEquals((True, True, -1, True, 1), item.sort_key)
def testSort(self):
i1 = util.AcceptItem('text/*', 1)
i2 = util.AcceptItem('text/html', 2)
i3 = util.AcceptItem('text/html; q=0.9', 3)
i4 = util.AcceptItem('text/html; q=0.3', 4)
i5 = util.AcceptItem('text/xml', 5)
i6 = util.AcceptItem('text/html; level=1', 6)
i7 = util.AcceptItem('*/*', 7)
items = [i1, i2 ,i3 ,i4 ,i5 ,i6, i7]
random.shuffle(items)
self.assertEquals([i6, i2, i5, i3, i4, i1, i7], sorted(items))
def testMatchAll(self):
item = util.AcceptItem('*/*', 1)
self.assertTrue(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertTrue(item.match('image/png'))
self.assertTrue(item.match('image/png; q=0.3'))
def testMatchMainType(self):
item = util.AcceptItem('text/*', 1)
self.assertTrue(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertFalse(item.match('image/png'))
self.assertFalse(item.match('image/png; q=0.3'))
def testMatchFullType(self):
item = util.AcceptItem('text/plain', 1)
self.assertFalse(item.match('text/html'))
self.assertTrue(item.match('text/plain; level=1'))
self.assertFalse(item.match('image/png'))
self.assertFalse(item.match('image/png; q=0.3'))
def testMatchCaseInsensitive(self):
item = util.AcceptItem('text/plain', 1)
self.assertTrue(item.match('tExt/pLain'))
def testStr(self):
self.assertHeaderSame('*/*', str(util.AcceptItem('*/*', 1)))
self.assertHeaderSame('text/*', str(util.AcceptItem('text/*', 1)))
self.assertHeaderSame('text/plain',
str(util.AcceptItem('text/plain', 1)))
self.assertHeaderSame('text/plain; q=0.2',
str(util.AcceptItem('text/plain; q=0.2', 1)))
self.assertHeaderSame(
'text/plain; q=0.2; level=1',
str(util.AcceptItem('text/plain; level=1; q=0.2', 1)))
def testRepr(self):
self.assertEquals("AcceptItem('*/*', 1)", repr(util.AcceptItem('*/*', 1)))
self.assertEquals("AcceptItem('text/plain', 11)",
repr(util.AcceptItem('text/plain', 11)))
def testValues(self):
item = util.AcceptItem('text/plain; a=1; b=2; c=3;', 1)
values = item.values
self.assertEquals(dict(a="1", b="2", c="3"), values)
values['a'] = "7"
self.assertNotEquals(values, item.values)
class ParseAcceptHeaderTest(test_util.TestCase):
def testIndex(self):
accept_header = """text/*, text/html, text/html; q=0.9,
text/xml,
text/html; level=1, */*"""
accepts = util.parse_accept_header(accept_header)
self.assertEquals(6, len(accepts))
self.assertEquals([4, 1, 3, 2, 0, 5], [a.index for a in accepts])
class ChooseContentTypeTest(test_util.TestCase):
def testIgnoreUnrequested(self):
self.assertEquals('application/json',
util.choose_content_type(
'text/plain, application/json, */*',
['application/X-Google-protobuf',
'application/json'
]))
def testUseCorrectPreferenceIndex(self):
self.assertEquals('application/json',
util.choose_content_type(
'*/*, text/plain, application/json',
['application/X-Google-protobuf',
'application/json'
]))
def testPreferFirstInList(self):
self.assertEquals('application/X-Google-protobuf',
util.choose_content_type(
'*/*',
['application/X-Google-protobuf',
'application/json'
]))
def testCaseInsensitive(self):
self.assertEquals('application/X-Google-protobuf',
util.choose_content_type(
'application/x-google-protobuf',
['application/X-Google-protobuf',
'application/json'
]))
class GetPackageForModuleTest(test_util.TestCase):
def setUp(self):
self.original_modules = dict(sys.modules)
def tearDown(self):
sys.modules.clear()
sys.modules.update(self.original_modules)
def CreateModule(self, name, file_name=None):
if file_name is None:
file_name = '%s.py' % name
module = types.ModuleType(name)
sys.modules[name] = module
return module
def assertPackageEquals(self, expected, actual):
self.assertEquals(expected, actual)
if actual is not None:
self.assertTrue(isinstance(actual, six.text_type))
def testByString(self):
module = self.CreateModule('service_module')
module.package = 'my_package'
self.assertPackageEquals('my_package',
util.get_package_for_module('service_module'))
def testModuleNameNotInSys(self):
self.assertPackageEquals(None,
util.get_package_for_module('service_module'))
def testHasPackage(self):
module = self.CreateModule('service_module')
module.package = 'my_package'
self.assertPackageEquals('my_package', util.get_package_for_module(module))
def testHasModuleName(self):
module = self.CreateModule('service_module')
self.assertPackageEquals('service_module',
util.get_package_for_module(module))
def testIsMain(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file.py'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testIsMainCompiled(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file.pyc'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testNoExtension(self):
module = self.CreateModule('__main__')
module.__file__ = '/bing/blam/bloom/blarm/my_file'
self.assertPackageEquals('my_file', util.get_package_for_module(module))
def testNoPackageAtAll(self):
module = self.CreateModule('__main__')
self.assertPackageEquals('__main__', util.get_package_for_module(module))
class DateTimeTests(test_util.TestCase):
def testDecodeDateTime(self):
"""Test that a RFC 3339 datetime string is decoded properly."""
for datetime_string, datetime_vals in (
('2012-09-30T15:31:50.262', (2012, 9, 30, 15, 31, 50, 262000)),
('2012-09-30T15:31:50', (2012, 9, 30, 15, 31, 50, 0))):
decoded = util.decode_datetime(datetime_string)
expected = datetime.datetime(*datetime_vals)
self.assertEquals(expected, decoded)
def testDateTimeTimeZones(self):
"""Test that a datetime string with a timezone is decoded correctly."""
for datetime_string, datetime_vals in (
('2012-09-30T15:31:50.262-06:00',
(2012, 9, 30, 15, 31, 50, 262000, util.TimeZoneOffset(-360))),
('2012-09-30T15:31:50.262+01:30',
(2012, 9, 30, 15, 31, 50, 262000, util.TimeZoneOffset(90))),
('2012-09-30T15:31:50+00:05',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(5))),
('2012-09-30T15:31:50+00:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30t15:31:50-00:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30t15:31:50z',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(0))),
('2012-09-30T15:31:50-23:00',
(2012, 9, 30, 15, 31, 50, 0, util.TimeZoneOffset(-1380)))):
decoded = util.decode_datetime(datetime_string)
expected = datetime.datetime(*datetime_vals)
self.assertEquals(expected, decoded)
def testDecodeDateTimeInvalid(self):
"""Test that decoding malformed datetime strings raises execptions."""
for datetime_string in ('invalid',
'2012-09-30T15:31:50.',
'-08:00 2012-09-30T15:31:50.262',
'2012-09-30T15:31',
'2012-09-30T15:31Z',
'2012-09-30T15:31:50ZZ',
'2012-09-30T15:31:50.262 blah blah -08:00',
'1000-99-99T25:99:99.999-99:99'):
self.assertRaises(ValueError, util.decode_datetime, datetime_string)
def testTimeZoneOffsetDelta(self):
"""Test that delta works with TimeZoneOffset."""
time_zone = util.TimeZoneOffset(datetime.timedelta(minutes=3))
epoch = time_zone.utcoffset(datetime.datetime.utcfromtimestamp(0))
self.assertEqual(180, util.total_seconds(epoch))
def main():
unittest.main()
if __name__ == '__main__':
main()
| bsd-3-clause | 5,187,513,290,084,454,000 | 35.775194 | 80 | 0.599705 | false |
devendermishrajio/nova_test_latest | nova/tests/functional/v3/test_networks.py | 29 | 3919 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.network import api as network_api
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.api.openstack.compute.contrib import test_networks
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-networks"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(NetworksJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.extended_networks.Extended_networks')
return f
def setUp(self):
super(NetworksJsonTests, self).setUp()
fake_network_api = test_networks.FakeNetworkAPI()
self.stubs.Set(network_api.API, "get_all",
fake_network_api.get_all)
self.stubs.Set(network_api.API, "get",
fake_network_api.get)
self.stubs.Set(network_api.API, "associate",
fake_network_api.associate)
self.stubs.Set(network_api.API, "delete",
fake_network_api.delete)
self.stubs.Set(network_api.API, "create",
fake_network_api.create)
self.stubs.Set(network_api.API, "add_network_to_project",
fake_network_api.add_network_to_project)
def test_network_list(self):
response = self._do_get('os-networks')
subs = self._get_regexes()
self._verify_response('networks-list-resp', subs, response, 200)
def test_network_disassociate(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_post('os-networks/%s/action' % uuid,
'networks-disassociate-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_show(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_get('os-networks/%s' % uuid)
subs = self._get_regexes()
self._verify_response('network-show-resp', subs, response, 200)
def test_network_create(self):
response = self._do_post("os-networks",
'network-create-req', {})
subs = self._get_regexes()
self._verify_response('network-create-resp', subs, response, 200)
def test_network_add(self):
response = self._do_post("os-networks/add",
'network-add-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_delete(self):
response = self._do_delete('os-networks/always_delete')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
| apache-2.0 | 8,226,567,836,231,889,000 | 41.139785 | 78 | 0.630263 | false |
msiedlarek/qtwebkit | Tools/Scripts/webkitpy/port/gtk_unittest.py | 117 | 5598 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import sys
import os
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.gtk import GtkPort
from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
from webkitpy.port import port_testcase
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
port_maker = GtkPort
# Additionally mocks out the PulseAudioSanitizer methods.
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
port = super(GtkPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
return port
def test_default_baseline_search_path(self):
port = self.make_port()
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk1',
'/mock-checkout/LayoutTests/platform/gtk'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk2',
'/mock-checkout/LayoutTests/platform/wk2', '/mock-checkout/LayoutTests/platform/gtk'])
def test_port_specific_expectations_files(self):
port = self.make_port()
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk1/TestExpectations'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/wk2/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk2/TestExpectations'])
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
def test_get_crash_log(self):
core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
mock_empty_crash_log = """\
Crash log for DumpRenderTree (pid 28529):
Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
- enable core dumps: ulimit -c unlimited
- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
STDERR: <empty>""" % locals()
def _mock_gdb_output(coredump_path):
return (mock_empty_crash_log, [])
port = self.make_port()
port._get_gdb_output = mock_empty_crash_log
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
| lgpl-3.0 | -3,901,070,757,604,433,000 | 49.890909 | 138 | 0.728117 | false |
keedio/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/postgresql_psycopg2/base.py | 47 | 6866 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db.backends import *
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
can_rollback_ddl = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
opts = self.settings_dict["OPTIONS"]
RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_level = opts.get('isolation_level', RC)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if 'isolation_level' in conn_params:
del conn_params['isolation_level']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
settings_dict = self.settings_dict
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
# Commit after setting the time zone (see #17062)
self.connection.commit()
self.connection.set_isolation_level(self.isolation_level)
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_isolation_level(self, isolation_level):
assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
if self.psycopg2_version >= (2, 4, 2):
self.connection.set_session(isolation_level=isolation_level)
else:
self.connection.set_isolation_level(isolation_level)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
if self.psycopg2_version >= (2, 4, 2):
self.connection.autocommit = autocommit
else:
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.isolation_level
self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| apache-2.0 | 5,477,455,794,934,373,000 | 36.113514 | 105 | 0.63647 | false |
tobegit3hub/glance_docker | glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py | 19 | 1514 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
meta_data = sqlalchemy.Column('meta_data',
schema.PickleType(),
default={})
meta_data.create(image_locations_table)
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData()
meta.bind = migrate_engine
image_locations_table = sqlalchemy.Table('image_locations',
meta,
autoload=True)
image_locations_table.columns['meta_data'].drop()
| apache-2.0 | 8,841,245,119,567,651,000 | 34.209302 | 78 | 0.611625 | false |
luoyetx/Apriori | apriori/apriori.py | 1 | 9932 | # -*- coding: utf-8 -*-
from collections import defaultdict
from itertools import combinations
from sys import stdout
class cached_property(object):
"""A cached property only computed once
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class Base(object):
"""A base workflow for Apriori algorithm
"""
def _before_generate_frequent_itemset(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def _after_generate_frequent_itemset(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def generate_frequent_itemset(self):
"""Generate and return frequent itemset
"""
raise NotImplementedError("generate_frequent_itemset(self) need to be implemented.")
def _before_generate_rule(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def _after_generate_rule(self):
"""Invoked before generate_frequent_itemset()
"""
pass
def generate_rule(self):
"""Generate and return rule
"""
raise NotImplementedError("generate_rule(self) need to be implemented.")
def run(self):
"""Run Apriori algorithm and return rules
"""
# generate frequent itemset
self._before_generate_frequent_itemset()
self.generate_frequent_itemset()
self._after_generate_frequent_itemset()
# generate rule
self._before_generate_rule()
self.generate_rule()
self._after_generate_rule()
class Apriori(Base):
"""A simple implementation of Apriori algorithm
Example:
dataset = [
['bread', 'milk'],
['bread', 'diaper', 'beer', 'egg'],
['milk', 'diaper', 'beer', 'cola'],
['bread', 'milk', 'diaper', 'beer'],
['bread', 'milk', 'diaper', 'cola'],
]
minsup = minconf = 0.6
apriori = Apriori(dataset, minsup, minconf)
apriori.run()
apriori.print_rule()
Results:
Rules
milk --> bread (confidence = 0.75)
bread --> milk (confidence = 0.75)
diaper --> bread (confidence = 0.75)
bread --> diaper (confidence = 0.75)
beer --> diaper (confidence = 1.0)
diaper --> beer (confidence = 0.75)
diaper --> milk (confidence = 0.75)
milk --> diaper (confidence = 0.75)
"""
def __init__(self, transaction_list, minsup, minconf, selected_items=None):
"""Initialization
:param transaction_list: a list cantains transaction
:param minsup: minimum support
:param minconf: minimum confidence
:param selected_items: selected items in frequent itemset, default `None`
"""
self.transaction_list = transaction_list
self.transaction_list_full_length = len(transaction_list)
self.minsup = minsup
self.minconf = minconf
if selected_items is not None and selected_items is not []:
self.selected_items = frozenset(selected_items)
else:
self.selected_items = None
self.frequent_itemset = dict()
# support for every frequent itemset
self.frequent_itemset_support = defaultdict(float)
# convert transaction_list
self.transaction_list = list([frozenset(transaction) \
for transaction in transaction_list])
self.rule = []
def set_selected_items(self, selected_items):
"""Set selected items
"""
self.selected_items = frozenset(selected_items)
@cached_property
def items(self):
"""Return all items in the self.transaction_list
"""
items = set()
for transaction in self.transaction_list:
for item in transaction:
items.add(item)
return items
def filter_with_minsup(self, itemsets):
"""Return subset of itemsets which satisfies minsup
and record their support
"""
local_counter = defaultdict(int)
for itemset in itemsets:
for transaction in self.transaction_list:
if itemset.issubset(transaction):
local_counter[itemset] += 1
# filter with counter
result = set()
for itemset, count in local_counter.items():
support = float(count) / self.transaction_list_full_length
if support >= self.minsup:
result.add(itemset)
self.frequent_itemset_support[itemset] = support
return result
def _after_generate_frequent_itemset(self):
"""Filter frequent itemset with selected items
"""
if self.selected_items is None:
return
local_remove = []
for key, val in self.frequent_itemset.items():
for itemset in val:
if not self.selected_items.issubset(itemset):
local_remove.append((key, itemset))
for (key, itemset) in local_remove:
self.frequent_itemset[key].remove(itemset)
def generate_frequent_itemset(self):
"""Generate and return frequent itemset
"""
def _apriori_gen(itemset, length):
"""Return candidate itemset with given itemset and length
"""
# simply use F(k-1) x F(k-1) (itemset + itemset)
return set([x.union(y) for x in itemset for y in itemset \
if len(x.union(y)) == length])
k = 1
current_itemset = set()
# generate 1-frequnt_itemset
for item in self.items: current_itemset.add(frozenset([item]))
self.frequent_itemset[k] = self.filter_with_minsup(current_itemset)
# generate k-frequent_itemset
while True:
k += 1
current_itemset = _apriori_gen(current_itemset, k)
current_itemset = self.filter_with_minsup(current_itemset)
if current_itemset != set([]):
self.frequent_itemset[k] = current_itemset
else:
break
return self.frequent_itemset
def _generate_rule(self, itemset, frequent_itemset_k):
"""Generate rule with F(k) in DFS style
"""
# make sure the itemset has at least two element to generate the rule
if len(itemset) < 2:
return
for element in combinations(list(itemset), 1):
rule_head = itemset - frozenset(element)
confidence = self.frequent_itemset_support[frequent_itemset_k] / \
self.frequent_itemset_support[rule_head]
if confidence >= self.minconf:
rule = ((rule_head, itemset - rule_head), confidence)
# if rule not in self.rule, add and recall _generate_rule() in DFS
if rule not in self.rule:
self.rule.append(rule);
self._generate_rule(rule_head, frequent_itemset_k)
def generate_rule(self):
"""Generate and return rule
"""
# generate frequent itemset if not generated
if len(self.frequent_itemset) == 0:
self.generate_frequent_itemset()
# generate in DFS style
for key, val in self.frequent_itemset.items():
if key == 1:
continue
for itemset in val:
self._generate_rule(itemset, itemset)
return self.rule
def print_frequent_itemset(self):
"""Print out frequent itemset
"""
stdout.write('======================================================\n')
stdout.write('Frequent itemset:\n')
for key, val in self.frequent_itemset.items():
#stdout.write('frequent itemset size of {0}:\n'.format(key))
for itemset in val:
stdout.write('(')
stdout.write(', '.join(itemset))
stdout.write(')')
stdout.write(' support = {0}\n'.format(round(self.frequent_itemset_support[itemset], 3)))
stdout.write('======================================================\n')
def print_rule(self):
"""Print out rules
"""
stdout.write('======================================================\n')
stdout.write('Rules:\n')
for rule in self.rule:
head = rule[0][0]
tail = rule[0][1]
confidence = rule[1]
stdout.write('(')
stdout.write(', '.join(head))
stdout.write(')')
stdout.write(' ==> ')
stdout.write('(')
stdout.write(', '.join(tail))
stdout.write(')')
stdout.write(' confidence = {0}\n'.format(round(confidence, 3)))
stdout.write('======================================================\n')
class ImprovedApriori(Apriori):
"""Use Hash to filter frequent itemsets
"""
def filter_with_minsup(self, itemsets):
"""Return subset of itemset which satisfies minsup
and record their support
"""
for itemset in itemsets:
k = len(itemset)
break
local_counter = defaultdict(int)
for transaction in self.transaction_list:
for itemset in combinations(list(transaction), k):
if frozenset(itemset) in itemsets:
local_counter[frozenset(itemset)] += 1
# filter with counter
result = set()
for itemset, count in local_counter.items():
support = float(count) / self.transaction_list_full_length
if support >= self.minsup:
result.add(itemset)
self.frequent_itemset_support[itemset] = support
return result
| mit | -2,762,654,598,857,889,000 | 34.598566 | 106 | 0.551752 | false |
gptech/ansible | lib/ansible/modules/network/iosxr/iosxr_system.py | 50 | 8452 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS XR devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS XR devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: iosxr
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
lookup_enabled:
description:
- Provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
type: bool
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
iosxr_system:
hostname: iosxr01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
iosxr_system:
state: absent
- name: configure DNS lookup sources
iosxr_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
iosxr_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname iosxr01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def diff_list(want, have):
adds = set(want).difference(have)
removes = set(have).difference(want)
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'ios':
commands.append('no hostname')
if have['domain_name']:
commands.append('no domain name')
if have['lookup_source']:
commands.append('no domain lookup source-interface %s' % have['lookup_source'])
if not have['lookup_enabled']:
commands.append('no domain lookup disable')
for item in have['name_servers']:
commands.append('no domain name-server %s' % item)
for item in have['domain_search']:
commands.append('no domain list %s' % item)
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('domain name %s' % want['domain_name'])
if needs_update('lookup_source'):
commands.append('domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'domain lookup disable'
if want['lookup_enabled']:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['name_servers'] is not None:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in adds:
commands.append('domain name-server %s' % item)
for item in removes:
commands.append('no domain name-server %s' % item)
if want['domain_search'] is not None:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in adds:
commands.append('domain list %s' % item)
for item in removes:
commands.append('no domain list %s' % item)
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search('^domain name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
match = re.search('^domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': re.findall('^domain list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'domain lookup disable' not in config,
'name_servers': re.findall('^domain name-server (\S+)', config, re.M)
}
def map_params_to_obj(module):
return {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
'name_servers': module.params['name_servers']
}
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands, result['warnings'], commit=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | -2,665,395,511,299,902,500 | 32.539683 | 91 | 0.643753 | false |
alanhamlett/flask | examples/flaskr/flaskr.py | 157 | 2893 | # -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create our little application :)
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
DEBUG=True,
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
| bsd-3-clause | 9,113,911,731,009,416,000 | 25.3 | 73 | 0.62945 | false |
wilvk/ansible | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 57 | 2521 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
super(TestNxosVxlanVtepVniModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosVxlanVtepVniModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 | 744,538,491,116,916,600 | 39.015873 | 104 | 0.712812 | false |
lazytech-org/RIOT | tests/libfixmath/do-test.py | 19 | 2250 | #!/usr/bin/env python3
import math
import operator
import sys
def rem(a, b):
ret = a % b
if ret < 0 and a > 0 and b < 0 or \
ret > 0 and a < 0 and b > 0:
ret -= b
return ret
FUNS = {
'add': operator.add,
'sub': operator.sub,
'mul': operator.mul,
'div': operator.truediv,
'mod': rem,
'sadd': operator.add,
'ssub': operator.sub,
'smul': operator.mul,
'sdiv': operator.truediv,
'min': min,
'max': max,
'abs': abs,
'sqrt': math.sqrt,
'sq': lambda x: x * x,
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'asin': math.asin,
'acos': math.acos,
'atan': math.atan,
'exp': math.exp,
'log': math.log,
'log2': math.log2,
'slog2': math.log2,
}
ABS_ERROR_LIMIT = 0.011
def main():
total = 0
errors = 0
print('Calculation: abs result != exp result, abs error > limit')
started = False
for line in sys.stdin:
line = line.strip()
if not started:
if line == 'Unary.':
print(line)
started = True
continue
elif line == 'Binary.':
print(line)
continue
elif line == 'Done.':
print(line)
break
total += 1
try:
res_locals = {}
res_locals['input'], res_locals['expected'] = map(str.strip, line.split('='))
exec('result = {}'.format(res_locals['input']), FUNS, res_locals)
abs_error = abs(res_locals['result'] - float(res_locals['expected']))
res_locals['result'] = '{:.4f}'.format(res_locals['result'])
if abs_error > ABS_ERROR_LIMIT:
print('{}: {} != {}, {:.4f} > {}'.format(res_locals['input'], res_locals['result'], res_locals['expected'],
abs_error, ABS_ERROR_LIMIT))
errors += 1
except:
errors += 1
print('ERROR {}'.format(line))
print('{} calculations passed.'.format(total - errors))
if errors:
print('{} calculations had errors.'.format(errors))
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
| lgpl-2.1 | -4,720,635,382,678,311,000 | 22.195876 | 123 | 0.485778 | false |
frouty/odoo_oph | openerp/workflow/wkf_service.py | 61 | 6596 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import instance
import openerp.netsvc as netsvc
class workflow_service(netsvc.Service):
"""
Sometimes you might want to fire a signal or re-evaluate the current state
of a workflow using the service's API. You can access the workflow services
using:
>>> import netsvc
>>> wf_service = netsvc.LocalService("workflow")
"""
def __init__(self, name='workflow'):
netsvc.Service.__init__(self, name)
self.wkf_on_create_cache={}
def clear_cache(self, cr, uid):
self.wkf_on_create_cache[cr.dbname]={}
def trg_write(self, uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id or None,res_type or None, 'active'))
for (id,) in cr.fetchall():
instance.update(cr, id, ident)
def trg_trigger(self, uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
cr.execute('select instance_id from wkf_triggers where res_id=%s and model=%s', (res_id,res_type))
res = cr.fetchall()
for (instance_id,) in res:
cr.execute('select %s,res_type,res_id from wkf_instance where id=%s', (uid, instance_id,))
ident = cr.fetchone()
instance.update(cr, instance_id, ident)
def trg_delete(self, uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
instance.delete(cr, ident)
def trg_create(self, uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
ident = (uid,res_type,res_id)
self.wkf_on_create_cache.setdefault(cr.dbname, {})
if res_type in self.wkf_on_create_cache[cr.dbname]:
wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type]
else:
cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,))
wkf_ids = cr.fetchall()
self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids
for (wkf_id,) in wkf_ids:
instance.create(cr, ident, wkf_id)
def trg_validate(self, uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
result = False
ident = (uid,res_type,res_id)
# ids of all active workflow instances for a corresponding resource (id, model_nam)
cr.execute('select id from wkf_instance where res_id=%s and res_type=%s and state=%s', (res_id, res_type, 'active'))
for (id,) in cr.fetchall():
res2 = instance.validate(cr, id, ident, signal)
result = result or res2
return result
def trg_redirect(self, uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
# get ids of wkf instances for the old resource (res_id)
#CHECKME: shouldn't we get only active instances?
cr.execute('select id, wkf_id from wkf_instance where res_id=%s and res_type=%s', (res_id, res_type))
for old_inst_id, wkf_id in cr.fetchall():
# first active instance for new resource (new_rid), using same wkf
cr.execute(
'SELECT id '\
'FROM wkf_instance '\
'WHERE res_id=%s AND res_type=%s AND wkf_id=%s AND state=%s',
(new_rid, res_type, wkf_id, 'active'))
new_id = cr.fetchone()
if new_id:
# select all workitems which "wait" for the old instance
cr.execute('select id from wkf_workitem where subflow_id=%s', (old_inst_id,))
for (item_id,) in cr.fetchall():
# redirect all those workitems to the wkf instance of the new resource
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (new_id[0], item_id))
workflow_service()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,100,437,129,249,882,000 | 41.282051 | 139 | 0.603699 | false |
kartben/iotivity | tools/scons/BoostBuild.py | 1 | 5868 | # -*- coding: utf-8 -*-
# *********************************************************************
#
# Copyright 2014 Intel Mobile Communications GmbH All Rights Reserved.
#
# *********************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *********************************************************************
# This builder executes the boost builder ('b2') for the toolchain
# defined currently in the SCONS environment. This builder was created
# to create cross-compiled version of boost. In particular, it has
# created to create boost binaries for Android's various architectures.
import os, subprocess
import SCons.Builder, SCons.Node, SCons.Errors
# Creates the building message
#
# @param s original message
# @param target target name
# @param source source name
# @param env environment object
def __message( s, target, source, env ) :
print "building boost from [%s] for ..." % (source[0])
# Create the builder action which constructs a user-config.jam based
# on the current toolchain and executes the boost build system ('b2')
#
# @param target target file on the local drive
# @param source URL for download
# @@param env environment object
def __action( target, source, env ) :
cmd = None
# Windows...
if env["PLATFORM"] in ["win32"] :
if env.WhereIs("cmd") :
# TODO: Add Windows Support
cmd = None
# read the tools on *nix systems and sets the default parameters
elif env["PLATFORM"] in ["darwin", "linux", "posix"] :
if env.WhereIs("sh") :
cmd = ['./b2']
if not cmd :
raise SCons.Errors.StopError("Boost build system not supported on this platform [%s]" % (env["PLATFORM"]))
# We need to be in the target's directory
cwd = os.path.dirname(os.path.realpath(source[0].path))
# Gather all of the path, bin and flags
version = env.get('VERSION','')
target_os = env['TARGET_OS']
target_arch = env['TARGET_ARCH']
tool_path = os.path.dirname(env['CXX'])
cxx_bin = os.path.basename(env['CXX'])
ar_bin = os.path.basename(env['AR'])
ranlib_bin = os.path.basename(env['RANLIB'])
ccflags = list(env['CFLAGS'])
cxxflags = list(env['CXXFLAGS'])
try:
cxxflags.remove('-fno-rtti')
except ValueError:
pass
try:
cxxflags.remove('-fno-exceptions')
except ValueError:
pass
# Write a user-config for this variant
user_config_name = cwd+os.sep+'tools'+os.sep+'build'+os.sep+'src'+os.sep+'user-config.jam'
user_config_file = open(user_config_name, 'w')
user_config_file.write('import os ;\n')
user_config_file.write('using gcc :')
user_config_file.write(' '+version+' :')
#user_config_file.write(' :')
#user_config_file.write(' '+os.path.basename(toolchain['CXX']['BIN'])+' :\n')
user_config_file.write(' '+cxx_bin+' :\n')
user_config_file.write(' <archiver>'+ar_bin+'\n')
user_config_file.write(' <ranlib>'+ranlib_bin+'\n')
for value in env['CPPDEFINES'] :
if len(value) > 1 :
user_config_file.write(' <compileflags>-D'+value[0]+'='+value[1]+'\n')
else :
user_config_file.write(' <compileflags>-D'+value[0]+'\n')
for value in env['CPPPATH'] :
user_config_file.write(' <compileflags>-I'+value+'\n')
for flag in ccflags :
user_config_file.write(' <compileflags>'+flag+'\n')
for flag in cxxflags :
user_config_file.write(' <cxxflags>'+flag+'\n')
user_config_file.write(' ;\n')
user_config_file.close();
# Ensure that the toolchain is in the PATH
penv = os.environ.copy()
penv["PATH"] = tool_path+":" + penv["PATH"]
build_path = 'build' + os.sep + target_os + os.sep + target_arch
cmd.append('-q')
cmd.append('target-os=linux')
cmd.append('link=static')
cmd.append('threading=multi')
cmd.append('--layout=system')
cmd.append('--build-type=minimal')
cmd.append('--prefix='+env['PREFIX'])
cmd.append('--build-dir='+build_path)
for module in env.get('MODULES',[]) :
cmd.append('--with-'+module)
cmd.append('headers')
cmd.append('install')
# build it now (we need the shell, because some programs need it)
devnull = open(os.devnull, "wb")
handle = subprocess.Popen( cmd, env=penv, cwd=cwd ) #, stdout=devnull )
if handle.wait() <> 0 :
raise SCons.Errors.BuildError( "Building boost [%s] on the source [%s]" % (cmd, source[0]) )
# Define the emitter of the builder
#
# @param target target file on the local drive
# @param source
# @param env environment object
def __emitter( target, source, env ) :
return target, source
# Generate function which adds the builder to the environment
#
# @param env environment object
def generate( env ) :
env["BUILDERS"]["BoostBuild"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.Entry, source_factory = SCons.Node.FS.File, single_source = True, PRINT_CMD_LINE_FUNC = __message )
# Exist function of the builder
# @param env environment object
# @return true
def exists( env ) :
return 1
| apache-2.0 | -9,195,859,482,871,720,000 | 35.615385 | 234 | 0.604635 | false |
davetcoleman/sdk-examples | baxter/baxter_interface/src/baxter_interface/robustcontroller.py | 3 | 5500 | # Copyright (c) 2013, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import errno
import roslib
roslib.load_manifest('baxter_interface')
import rospy
from baxter_msgs.msg import (
RobustControllerStatus,
)
class RobustController(object):
STATE_IDLE = 0
STATE_STARTING = 1
STATE_RUNNING = 2
STATE_STOPPING = 3
def __init__(self, namespace, enable_msg, disable_msg, timeout = 60):
"""
Wrapper around controlling a RobustController
@param namespace - namespace containing the enable and status topics
@param enable_msg - message to send to enable the RC
@param disable_msg - message to send to disable the RC
@param timeout - seconds to wait for the RC to finish [60]
"""
self._command_pub = rospy.Publisher(
namespace + '/enable',
type(enable_msg))
self._status_sub = rospy.Subscriber(
namespace + '/status',
RobustControllerStatus,
self._callback)
self._enable_msg = enable_msg
self._disable_msg = disable_msg
self._timeout = timeout
self._state = self.STATE_IDLE
self._return = 0
rospy.on_shutdown(self._on_shutdown)
def _callback(self, msg):
if self._state == self.STATE_RUNNING:
if msg.complete == RobustControllerStatus.COMPLETE_W_SUCCESS:
self._state = self.STATE_STOPPING
self._return = 0
elif msg.complete == RobustControllerStatus.COMPLETE_W_FAILURE:
self._state = self.STATE_STOPPING
self._return = errno.EIO
elif not msg.isEnabled:
self._state = self.STATE_IDLE
self._return = errno.ENOMSG
elif self._state == self.STATE_STOPPING and not msg.isEnabled:
# Would be nice to use msg.state here, but it does not
# consistently reflect reality.
self._state = self.STATE_IDLE
elif self._state == self.STATE_STARTING and msg.isEnabled:
self._state = self.STATE_RUNNING
def _run_loop(self):
# RobustControllers need messages at < 1Hz in order to continue
# their current operation.
rate = rospy.Rate(2)
start = rospy.Time.now()
while not rospy.is_shutdown():
if self._state == self.STATE_RUNNING and (rospy.Time.now() - start).to_sec() > self._timeout:
self._state = self.STATE_STOPPING
self._command_pub.publish(self._disable_msg)
self._return = errno.ETIMEDOUT
elif self._state in (self.STATE_STARTING, self.STATE_RUNNING):
self._command_pub.publish(self._enable_msg)
elif self._state == self.STATE_STOPPING:
self._command_pub.publish(self._disable_msg)
elif self._state == self.STATE_IDLE:
break
rate.sleep()
def _on_shutdown(self):
rate = rospy.Rate(2)
while not self._state == self.STATE_IDLE:
self._command_pub.publish(self._disable_msg)
rate.sleep()
self._return = errno.ECONNABORTED
def run(self):
"""
Enable the RobustController and run until completion or error.
"""
self._state = self.STATE_STARTING
self._command_pub.publish(self._enable_msg)
self._run_loop()
if self._return != 0:
if self._return == errno.EIO:
raise IOError(self._return, "Robust controller failed")
elif self._return == errno.ENOMSG:
raise IOError(self._return, "Robust controller failed to enable")
elif self._return == errno.ETIMEDOUT:
raise IOError(self._return, "Robust controller timed out")
elif self._return == errno.ECONNABORTED:
raise IOError(self._return, "Robust controller interruped by user")
else:
raise IOError(self._return)
| bsd-3-clause | -5,226,904,284,030,844,000 | 38.007092 | 105 | 0.638727 | false |
megaumi/django | tests/m2m_recursive/tests.py | 424 | 5410 | from __future__ import unicode_literals
from operator import attrgetter
from django.test import TestCase
from .models import Person
class RecursiveM2MTests(TestCase):
def setUp(self):
self.a, self.b, self.c, self.d = [
Person.objects.create(name=name)
for name in ["Anne", "Bill", "Chuck", "David"]
]
# Anne is friends with Bill and Chuck
self.a.friends.add(self.b, self.c)
# David is friends with Anne and Chuck - add in reverse direction
self.d.friends.add(self.a, self.c)
def test_recursive_m2m_all(self):
""" Test that m2m relations are reported correctly """
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Bill",
"Chuck",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), [
"Anne",
],
attrgetter("name")
)
# Who is friends with Chuck?
self.assertQuerysetEqual(
self.c.friends.all(), [
"Anne",
"David"
],
attrgetter("name"),
ordered=False
)
# Who is friends with David?
self.assertQuerysetEqual(
self.d.friends.all(), [
"Anne",
"Chuck",
],
attrgetter("name"),
ordered=False
)
def test_recursive_m2m_reverse_add(self):
""" Test reverse m2m relation is consistent """
# Bill is already friends with Anne - add Anne again, but in the
# reverse direction
self.b.friends.add(self.a)
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Bill",
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), [
"Anne",
],
attrgetter("name")
)
def test_recursive_m2m_remove(self):
""" Test that we can remove items from an m2m relationship """
# Remove Anne from Bill's friends
self.b.friends.remove(self.a)
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), [
"Chuck",
"David",
],
attrgetter("name"),
ordered=False
)
# Who is friends with Bill?
self.assertQuerysetEqual(
self.b.friends.all(), []
)
def test_recursive_m2m_clear(self):
""" Tests the clear method works as expected on m2m fields """
# Clear Anne's group of friends
self.a.friends.clear()
# Who is friends with Anne?
self.assertQuerysetEqual(
self.a.friends.all(), []
)
# Reverse relationships should also be gone
# Who is friends with Chuck?
self.assertQuerysetEqual(
self.c.friends.all(), [
"David",
],
attrgetter("name")
)
# Who is friends with David?
self.assertQuerysetEqual(
self.d.friends.all(), [
"Chuck",
],
attrgetter("name")
)
def test_recursive_m2m_add_via_related_name(self):
""" Tests that we can add m2m relations via the related_name attribute """
# David is idolized by Anne and Chuck - add in reverse direction
self.d.stalkers.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"David",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
self.a.stalkers.all(), [],
attrgetter("name")
)
def test_recursive_m2m_add_in_both_directions(self):
""" Check that adding the same relation twice results in a single relation """
# Ann idolizes David
self.a.idols.add(self.d)
# David is idolized by Anne
self.d.stalkers.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"David",
],
attrgetter("name"),
ordered=False
)
# As the assertQuerysetEqual uses a set for comparison,
# check we've only got David listed once
self.assertEqual(self.a.idols.all().count(), 1)
def test_recursive_m2m_related_to_self(self):
""" Check the expected behavior when an instance is related to itself """
# Ann idolizes herself
self.a.idols.add(self.a)
# Who are Anne's idols?
self.assertQuerysetEqual(
self.a.idols.all(), [
"Anne",
],
attrgetter("name"),
ordered=False
)
# Who is stalking Anne?
self.assertQuerysetEqual(
self.a.stalkers.all(), [
"Anne",
],
attrgetter("name")
)
| bsd-3-clause | -3,986,207,669,628,941,300 | 26.74359 | 86 | 0.502588 | false |
jpanikulam/experiments | gpgpu/generators/sdf_shape_defs.py | 1 | 1463 | # %codegen(cl_gen)
import generate_opencl_structs
def main():
plane_defd = [
{
'type': 'vector',
'length': 3,
'name': 'normal',
},
{
'type': 'float',
'length': 1,
'name': 'd',
}
]
sphere_defd = [
{
'type': 'vector',
'length': 3,
'name': 'origin',
},
{
'type': 'float',
'length': 1,
'name': 'r',
}
]
box_defd = [
{
'type': 'vector',
'length': 3,
'name': 'origin',
},
{
'type': 'vector',
'length': 3,
'name': 'extents',
},
]
cfg_defd = [
{
'type': 'int',
'length': 1,
'name': 'debug_mode',
},
{
'type': 'bool',
'length': 1,
'name': 'test_feature',
},
{
'type': 'int',
'length': 1,
'name': 'terminal_iteration',
}
]
definitions = [
("Plane", plane_defd),
("Sphere", sphere_defd),
("Box", box_defd),
("RenderConfig", cfg_defd),
]
destination = "/home/jacob/repos/experiments/gpgpu/demos/signed_distance_shapes"
generate_opencl_structs.write_files(definitions, destination)
if __name__ == '__main__':
main()
| mit | -843,259,497,694,248,400 | 18.77027 | 84 | 0.358852 | false |
napkindrawing/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_facts.py | 33 | 3282 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vm_facts
short_description: Return basic facts pertaining to a vSphere virtual machine guest
description:
- Return basic facts pertaining to a vSphere virtual machine guest
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather all registered virtual machines
local_action:
module: vmware_vm_facts
hostname: esxi_or_vcenter_ip_or_hostname
username: username
password: password
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(content):
virtual_machines = get_all_objs(content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
virtual_machine = {
summary.config.name: {
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address,
"uuid": summary.config.uuid
}
}
_virtual_machines.update(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
content = connect_to_api(module)
_virtual_machines = get_all_virtual_machines(content)
module.exit_json(changed=False, virtual_machines=_virtual_machines)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 5,134,906,733,403,485,000 | 29.388889 | 86 | 0.667276 | false |
rkokkelk/CouchPotatoServer | libs/guessit/__main__.py | 94 | 6835 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from __future__ import print_function
from guessit import u
from guessit import slogging, guess_file_info
from optparse import OptionParser
import logging
import sys
import os
import locale
def detect_filename(filename, filetype, info=['filename'], advanced = False):
filename = u(filename)
print('For:', filename)
print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string(advanced))
def run_demo(episodes=True, movies=True, advanced=False):
# NOTE: tests should not be added here but rather in the tests/ folder
# this is just intended as a quick example
if episodes:
testeps = [ 'Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi',
'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi',
'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi',
'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi',
'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi',
'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg',
'Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi',
'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi',
'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi'
]
for f in testeps:
print('-'*80)
detect_filename(f, filetype='episode', advanced=advanced)
if movies:
testmovies = [ 'Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv',
'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi',
'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi',
'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv',
'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv',
'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', # FIXME: PROPER and R5 get overwritten
'[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', # FIXME: title gets overwritten
'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi',
'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt',
'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv',
'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv',
'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi',
'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi',
'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi',
'Movies/Juno (2007)/Juno KLAXXON.avi',
'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv',
'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt',
'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi',
'testsmewt_bugs/movies/Baraka_Edition_Collector.avi'
]
for f in testmovies:
print('-'*80)
detect_filename(f, filetype = 'movie', advanced = advanced)
def main():
slogging.setupLogging()
# see http://bugs.python.org/issue2128
if sys.version_info.major < 3 and os.name == 'nt':
for i, a in enumerate(sys.argv):
sys.argv[i] = a.decode(locale.getpreferredencoding())
parser = OptionParser(usage = 'usage: %prog [options] file1 [file2...]')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help = 'display debug output')
parser.add_option('-i', '--info', dest = 'info', default = 'filename',
help = 'the desired information type: filename, hash_mpc or a hash from python\'s '
'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of '
'them, comma-separated')
parser.add_option('-t', '--type', dest = 'filetype', default = 'autodetect',
help = 'the suggested file type: movie, episode or autodetect')
parser.add_option('-a', '--advanced', dest = 'advanced', action='store_true', default = False,
help = 'display advanced information for filename guesses, as json output')
parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False,
help = 'run a few builtin tests instead of analyzing a file')
options, args = parser.parse_args()
if options.verbose:
logging.getLogger('guessit').setLevel(logging.DEBUG)
if options.demo:
run_demo(episodes=True, movies=True, advanced=options.advanced)
else:
if args:
for filename in args:
detect_filename(filename,
filetype = options.filetype,
info = options.info.split(','),
advanced = options.advanced)
else:
parser.print_help()
if __name__ == '__main__':
main()
| gpl-3.0 | 126,389,051,734,252 | 53.238095 | 160 | 0.610623 | false |
coreos/chromite | scripts/cros_generate_sysroot.py | 2 | 3711 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a sysroot tarball for building a specific package.
Meant for use after setup_board and build_packages have been run.
"""
import os
from chromite.buildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import commandline
from chromite.lib import osutils
from chromite.lib import sudo
DEFAULT_NAME = 'sysroot_%(package)s.tar.xz'
PACKAGE_SEPARATOR = '/'
SYSROOT = 'sysroot'
def ParseCommandLine(argv):
"""Parse args, and run environment-independent checks."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', required=True,
help=('The board to generate the sysroot for.'))
parser.add_argument('--package', required=True,
help=('The package to generate the sysroot for.'))
parser.add_argument('--out-dir', type=osutils.ExpandPath, required=True,
help='Directory to place the generated tarball.')
parser.add_argument('--out-file',
help=('The name to give to the tarball. Defaults to %r.'
% DEFAULT_NAME))
options = parser.parse_args(argv)
if not options.out_file:
options.out_file = DEFAULT_NAME % {
'package': options.package.replace(PACKAGE_SEPARATOR, '_')
}
return options
class GenerateSysroot(object):
"""Wrapper for generation functionality."""
PARALLEL_EMERGE = os.path.join(constants.CHROMITE_BIN_DIR, 'parallel_emerge')
def __init__(self, sysroot, options):
"""Initialize
Arguments:
sysroot: Path to sysroot.
options: Parsed options.
"""
self.sysroot = sysroot
self.options = options
def _InstallToolchain(self):
cros_build_lib.RunCommand(
[os.path.join(constants.CROSUTILS_DIR, 'install_toolchain'),
'--noconfigure', '--board_root', self.sysroot, '--board',
self.options.board])
def _InstallKernelHeaders(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root-deps=rdeps', '--getbinpkg', '--usepkg',
'--root=%s' % self.sysroot, 'sys-kernel/linux-headers'])
def _InstallBuildDependencies(self):
cros_build_lib.SudoRunCommand(
[self.PARALLEL_EMERGE, '--board=%s' % self.options.board,
'--root=%s' % self.sysroot, '--usepkg', '--onlydeps',
'--usepkg-exclude=%s' % self.options.package, self.options.package])
def _CreateTarball(self):
target = os.path.join(self.options.out_dir, self.options.out_file)
cros_build_lib.CreateTarball(target, self.sysroot, sudo=True)
def Perform(self):
"""Generate the sysroot."""
self._InstallToolchain()
self._InstallKernelHeaders()
self._InstallBuildDependencies()
self._CreateTarball()
def FinishParsing(options):
"""Run environment dependent checks on parsed args."""
target = os.path.join(options.out_dir, options.out_file)
if os.path.exists(target):
cros_build_lib.Die('Output file %r already exists.' % target)
if not os.path.isdir(options.out_dir):
cros_build_lib.Die(
'Non-existent directory %r specified for --out-dir' % options.out_dir)
def main(argv):
options = ParseCommandLine(argv)
FinishParsing(options)
cros_build_lib.AssertInsideChroot()
with sudo.SudoKeepAlive(ttyless_sudo=False):
with osutils.TempDirContextManager(sudo_rm=True) as tempdir:
sysroot = os.path.join(tempdir, SYSROOT)
os.mkdir(sysroot)
GenerateSysroot(sysroot, options).Perform()
| bsd-3-clause | -5,398,978,733,905,501,000 | 32.133929 | 79 | 0.674212 | false |
psi4/psi4 | psi4/share/psi4/scripts/apply_license.py | 7 | 3497 | # Checks all psi4 relevant files for proper boilerplate GNU license.
# This is sold as is with no warrenty-- probably should double check everything
# after running. I am not responsible if you break Psi4.
#
# Do not forget to do share/plugins by hand!
import os
# File type we know how to handle
ftypes = ['cc', 'h', 'py']
c_header ="""/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2021 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/"""
py_header = c_header.replace(' */', '#')
py_header = py_header.replace('/*', '#')
py_header = py_header.replace(' *', '#')
c_header = c_header.splitlines()
py_header = py_header.splitlines()
def check_header(infile):
f = open(infile, 'r+')
data = f.read().splitlines()
# Find the header location
max_lines = 30
try:
symbol = None
if filename.split('.')[-1] in ['py']:
start = data.index("# @BEGIN LICENSE") - 1
end = data.index("# @END LICENSE") + 1
if data[start] != '#' or data[end] != '#':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
else:
start = data.index(" * @BEGIN LICENSE") - 1
end = data.index(" * @END LICENSE") + 1
if data[start] != '/*' or data[end] != ' */':
f.close()
print('Did not find "wings" of license block in file %s' % infile)
return
except:
print('Could not find license block in file %s' % infile)
f.close()
return
# Make sure the block actually looks like a license
license = data[start:end+1]
top = any("PSI4:" in x.upper() for x in license[:5])
bot = any("51 Franklin Street" in x for x in license[5:])
if not (top and bot):
print('Did not understand infile %s' % infile)
f.close()
return
# Replace license
if filename.split('.')[-1] in ['cc', 'h']:
data[start:end + 1] = c_header
elif filename.split('.')[-1] in ['py']:
data[start:end + 1] = py_header
else:
print('Did not understand infile end: %s' % infile)
f.close()
return
# Write it out
f.seek(0)
f.write("\n".join(data))
f.truncate()
f.close()
avoid_strings = ['qcdb', 'libJKFactory']
walk = list(os.walk('../../src/'))
walk += list(os.walk('../python'))
for root, dirnames, filenames in walk:
if any(x in root for x in avoid_strings):
continue
for filename in filenames:
if filename.split('.')[-1] not in ftypes:
continue
check_header(root + '/' + filename)
| lgpl-3.0 | 3,931,227,482,959,317,000 | 29.675439 | 82 | 0.598799 | false |
Tivix/wagtail | wagtail/utils/setup.py | 5 | 1510 | from __future__ import absolute_import, print_function, unicode_literals
import os
import subprocess
from setuptools import Command
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist as base_sdist
class assets_mixin(object):
def compile_assets(self):
try:
subprocess.check_call(['npm', 'run', 'build'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error compiling assets: ' + str(e))
raise SystemExit(1)
class assets(Command, assets_mixin):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.compile_assets()
class sdist(base_sdist, assets_mixin):
def run(self):
self.compile_assets()
base_sdist.run(self)
class check_bdist_egg(bdist_egg):
# If this file does not exist, warn the user to compile the assets
sentinel_dir = 'wagtail/wagtailadmin/static/'
def run(self):
bdist_egg.run(self)
if not os.path.isdir(self.sentinel_dir):
print("\n".join([
"************************************************************",
"The front end assets for Wagtail are missing.",
"To generate the assets, please refer to the documentation in",
"docs/contributing/css_guidelines.rst",
"************************************************************",
]))
| bsd-3-clause | 1,847,419,954,249,556,700 | 26.962963 | 79 | 0.566887 | false |
xxshutong/openerp-7.0 | openerp/addons/mrp/company.py | 56 | 1393 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'manufacturing_lead': fields.float('Manufacturing Lead Time', required=True,
help="Security days for each manufacturing operation."),
}
_defaults = {
'manufacturing_lead': lambda *a: 1.0,
}
company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,361,544,100,795,959,600 | 37.694444 | 84 | 0.615219 | false |
CareerVillage/slack-moderation | tools/smt/smt/conf/settings.py | 1 | 1207 | # -*- coding: utf-8 -*-
import os
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
def rel(*x):
return os.path.abspath(os.path.join(PROJECT_ROOT, *x))
SETUP_DIR = rel('../../../setup')
KEY_DIR = rel('../../../keys')
# Staging
STA_PUPPET_GIT_BRANCH = 'sta'
STA_PUPPET_GIT_REPO = '[email protected]:CareerVillage/slack-moderation.git'
STA_PUPPET_BASE_DOMAIN = 'staging.slack-moderation'
STA_PUPPET_AWS_ACCESS_KEY_ID = None
STA_PUPPET_AWS_SECRET_ACCESS_KEY = None
STA_PUPPET_SENTRY_DSN = None
STA_PUPPET_NEWRELIC_LICENSE = None
STA_PUPPET_SECRET_KEY = None
# Production
PRO_PUPPET_GIT_BRANCH = 'master'
PRO_PUPPET_GIT_REPO = '[email protected]:CareerVillage/slack-moderation.git'
PRO_PUPPET_BASE_DOMAIN = 'slack-moderation'
PRO_PUPPET_AWS_ACCESS_KEY_ID = None
PRO_PUPPET_AWS_SECRET_ACCESS_KEY = None
PRO_PUPPET_SENTRY_DSN = None
PRO_PUPPET_NEWRELIC_LICENSE = None
PRO_PUPPET_SECRET_KEY = None
try:
from secrets import *
except ImportError:
print 'Error importing secrets module on smt.conf.settings'
try:
from user import *
except ImportError:
print 'Error importing user module on smt.conf.settings'
| mit | 2,738,662,883,398,976,000 | 24.680851 | 77 | 0.71831 | false |
RecursiveGreen/pymod | formats/XM.py | 1 | 16803 | import struct
from pymod.constants import *
from pymod.module import *
from pymod.util import *
class XMNote(Note):
"""The definition of an note and it's effects in Fast Tracker II"""
def __init__(self, note=0, instrument=0, voleffect=0, volparam=0, effect=0, param=0):
super(XMNote, self).__init__(note, instrument, voleffect, volparam, effect, param)
def __unicode__(self):
keys = ['C-', 'C#', 'D-', 'D#', 'E-', 'F-', 'F#', 'G-', 'G#', 'A-', 'A#', 'B-']
commands = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if self.note == 0: ret1 = '...'
elif self.note > 0 and self.note <=120:
split = divmod(self.note-13, 12)
ret1 = '%s%s' % (keys[split[1]], str(split[0]))
elif self.note == 254: ret1 = '^^^'
elif self.note == 255: ret1 = '==='
if self.instrument: ret2 = hex(self.instrument)[2:].zfill(2).upper()
else: ret2 = '..'
if self.voleffect == VOLFX_NONE: ret3 = '..'
elif self.voleffect == VOLFX_VOLUME: ret3 = hex(self.volparam)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VOLSLIDEDOWN: ret3 = hex(self.volparam + 0x60)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VOLSLIDEUP: ret3 = hex(self.volparam + 0x70)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_FINEVOLDOWN: ret3 = hex(self.volparam + 0x80)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_FINEVOLUP: ret3 = hex(self.volparam + 0x90)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VIBRATOSPEED: ret3 = hex(self.volparam + 0xA0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_VIBRATODEPTH: ret3 = hex(self.volparam + 0xB0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANNING: ret3 = hex(((self.volparam - 2) >> 2) + 0xC0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANSLIDELEFT: ret3 = hex(self.volparam + 0xD0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_PANSLIDERIGHT: ret3 = hex(self.volparam + 0xE0)[2:].zfill(2).upper()
elif self.voleffect == VOLFX_TONEPORTAMENTO: ret3 = hex(self.volparam + 0xF0)[2:].zfill(2).upper()
if self.effect: letter = commands[self.effect-1]
else: letter = '.'
ret4 = '%s%s' % (letter, hex(self.param)[2:].zfill(2).upper())
return '%s %s %s %s' % (ret1, ret2, ret3, ret4)
def __repr__(self):
return self.__unicode__()
class XMPattern(Pattern):
"""The definition of the XM pattern"""
def __init__(self, file=None, rows=64, channels=32):
super(XMPattern, self).__init__(rows, channels)
self.headerlen = 9
self.packtype = 0
self.packsize = rows * channels
if file:
self.load(file)
else:
self.data = self.empty(self.rows, self.channels)
def empty(self, rows, channels):
pattern = []
for row in range(rows):
pattern.append([])
for channel in range(channels):
pattern[row].append(XMNote())
return pattern
def load(self, file):
self.headerlen = struct.unpack("<L", file.read(4))[0]
self.packtype = struct.unpack("<B", file.read(1))[0]
self.rows = struct.unpack("<H", file.read(2))[0]
self.packsize = struct.unpack("<H", file.read(2))[0]
self.data = self.empty(self.rows, self.channels)
maskvar = 0
end = file.tell() + self.packsize
for row in range(self.rows):
for channel in range(self.channels):
if file.tell() < end:
maskvar = struct.unpack("<B", file.read(1))[0]
note = 0
inst = 0
voldata = 0
command = 0
param = 0
if maskvar & 128:
if maskvar & 1: note = struct.unpack("<B", file.read(1))[0]
if maskvar & 2: inst = struct.unpack("<B", file.read(1))[0]
if maskvar & 4: voldata = struct.unpack("<B", file.read(1))[0]
if maskvar & 8: command = struct.unpack("<B", file.read(1))[0]
if maskvar & 16: param = struct.unpack("<B", file.read(1))[0]
else:
note = maskvar
inst = struct.unpack("<B", file.read(1))[0]
voldata = struct.unpack("<B", file.read(1))[0]
command = struct.unpack("<B", file.read(1))[0]
param = struct.unpack("<B", file.read(1))[0]
# Cleanup. . .
if note > NOTE_NONE and note < 97:
self.data[row][channel].note = note + 12
elif note == 97:
self.data[row][channel].note = NOTE_OFF
else:
self.data[row][channel].note = NOTE_NONE
if inst == 255:
self.data[row][channel].instrument = 0
else:
self.data[row][channel].instrument = inst
if voldata >= 16 and voldata <= 80:
self.data[row][channel].voleffect = VOLFX_VOLUME
self.data[row][channel].volparam = voldata - 16
elif voldata >= 96:
volcmd = voldata & 0xF0
voldata = voldata & 0x0F
self.data[row][channel].volparam = voldata
if volcmd == 0x60: self.data[row][channel].voleffect = VOLFX_VOLSLIDEDOWN
if volcmd == 0x70: self.data[row][channel].voleffect = VOLFX_VOLSLIDEUP
if volcmd == 0x80: self.data[row][channel].voleffect = VOLFX_FINEVOLDOWN
if volcmd == 0x90: self.data[row][channel].voleffect = VOLFX_FINEVOLUP
if volcmd == 0xA0: self.data[row][channel].voleffect = VOLFX_VIBRATOSPEED
if volcmd == 0xB0: self.data[row][channel].voleffect = VOLFX_VIBRATODEPTH
if volcmd == 0xC0:
self.data[row][channel].voleffect = VOLFX_PANNING
self.data[row][channel].volparam = (voldata << 2) + 2
if volcmd == 0xD0: self.data[row][channel].voleffect = VOLFX_PANSLIDELEFT
if volcmd == 0xE0: self.data[row][channel].voleffect = VOLFX_PANSLIDERIGHT
if volcmd == 0xF0: self.data[row][channel].voleffect = VOLFX_TONEPORTAMENTO
self.data[row][channel].effect = command
self.data[row][channel].param = param
class XMEnvelope(Envelope):
"""The definition of an envelope for an XM instrument. There are a total
of two envelopes: Volume and Panning."""
def __init__(self, type=0):
super(XMEnvelope, self).__init__(type)
class XMSample(Sample):
"""Definition of an Fast Tracker II sample"""
def __init__(self, file=None):
super(XMSample, self).__init__()
self.xmsamploadflags = SF_LE | SF_M | SF_PCMD
if file: self.load(file, 0)
def load(self, file, loadtype=0):
if loadtype == 0:
# Loads the XM sample headers
xmsamplength = struct.unpack("<L", file.read(4))[0]
xmsamploopbegin = struct.unpack("<L", file.read(4))[0]
xmsamploopend = struct.unpack("<L", file.read(4))[0] + xmsamploopbegin
xmsampvolume = struct.unpack("<B", file.read(1))[0]
xmsampfinetune = struct.unpack("<b", file.read(1))[0]
xmsampflags = struct.unpack("<B", file.read(1))[0]
xmsamppanning = struct.unpack("<B", file.read(1))[0]
xmsamprelnote = struct.unpack("<b", file.read(1))[0]
xmsampRESERVED = struct.unpack("<B", file.read(1))[0]
xmsampname = struct.unpack("<22s", file.read(22))[0]
# Parse it into generic Sample
self.name = xmsampname
self.filename = xmsampname
self.volume = MIN(xmsampvolume, 64) * 4
self.length = xmsamplength
self.loopbegin = xmsamploopbegin
self.loopend = xmsamploopend
self.flags = CHN_PANNING
if self.loopbegin >= self.loopend:
xmsampflags = xmsampflags & ~3
if xmsampflags & 3:
if xmsampflags & 3 == 2: self.flags = self.flags | CHN_PINGPONGLOOP
if xmsampflags & 3 == 1: self.flags = self.flags | CHN_LOOP
if xmsampflags & 0x10:
self.flags = self.flags | CHN_16BIT
self.length = self.length >> 1
self.loopbegin = self.loopbegin >> 1
self.loopend = self.loopend >> 1
self.panning = xmsamppanning
self.c5speed = transpose_to_frequency(xmsamprelnote, xmsampfinetune)
elif loadtype == 1:
# . . .otherwise, load sample data
self.xmsamploadflags = self.xmsamploadflags | (SF_8, SF_16)[bool(self.flags & CHN_16BIT)]
super(XMSample, self).load(file, file.tell(), self.xmsamploadflags)
class XMInstrument(Instrument):
"""Definition of an Fast Tracker II instrument"""
def __init__(self, file=None):
super(XMInstrument, self).__init__()
self.xminstnumsamples = 0
self.samples = []
if file: self.load(file)
def load(self, file):
# Load the XM instrument data
xminstheadsize = struct.unpack("<L", file.read(4))[0]
xminstname = struct.unpack("<22s", file.read(22))[0]
xminsttype = struct.unpack("<B", file.read(1))[0] # random junk, supposedly. . .
self.xminstnumsamples = struct.unpack("<H", file.read(2))[0]
self.name = xminstname
xminstsmpheadsize = struct.unpack("<L", file.read(4))[0]
if self.xminstnumsamples > 0:
xminstnotekeytable = []
for i in range(96):
xminstnotekeytable.append(struct.unpack("<B", file.read(1))[0])
xminstvolenv= []
for i in range(12):
xminstvolenv.append(list(struct.unpack("<HH", file.read(4))))
xminstpanenv= []
for i in range(12):
xminstpanenv.append(list(struct.unpack("<HH", file.read(4))))
xminstvolpoints = struct.unpack("<B", file.read(1))[0]
xminstpanpoints = struct.unpack("<B", file.read(1))[0]
xminstvolsustpnt = struct.unpack("<B", file.read(1))[0]
xminstvollpstpnt = struct.unpack("<B", file.read(1))[0]
xminstvollpedpnt = struct.unpack("<B", file.read(1))[0]
xminstpansustpnt = struct.unpack("<B", file.read(1))[0]
xminstpanlpstpnt = struct.unpack("<B", file.read(1))[0]
xminstpanlpedpnt = struct.unpack("<B", file.read(1))[0]
xminstvolenvtype = struct.unpack("<B", file.read(1))[0]
xminstpanenvtype = struct.unpack("<B", file.read(1))[0]
xminstvibtype = struct.unpack("<B", file.read(1))[0]
xminstvibsweep = struct.unpack("<B", file.read(1))[0]
xminstvibdepth = struct.unpack("<B", file.read(1))[0]
xminstvibrate = struct.unpack("<B", file.read(1))[0]
xminstvolfadeout = struct.unpack("<H", file.read(2))[0]
xminstRESERVED1 = list(struct.unpack("<11H", file.read(22)))
# Parse it into the generic Instrument
for i in range(96):
self.notemap[i] = i
self.samplemap[i] = xminstnotekeytable[i]
self.volumeenv = XMEnvelope()
self.volumeenv.ticks = []
self.volumeenv.values = []
self.panningenv = XMEnvelope()
self.panningenv.ticks = []
self.panningenv.values = []
for i in range(12):
self.volumeenv.ticks.append(xminstvolenv[i][0])
self.volumeenv.values.append(xminstvolenv[i][1])
self.panningenv.ticks.append(xminstpanenv[i][0])
self.panningenv.values.append(xminstpanenv[i][1])
self.volumeenv.nodes = xminstvolpoints
self.panningenv.nodes = xminstpanpoints
self.volumeenv.sustloopbegin = xminstvolsustpnt
self.volumeenv.sustloopend = xminstvolsustpnt
self.volumeenv.loopbegin = xminstvollpstpnt
self.volumeenv.loopend = xminstvollpedpnt
self.panningenv.sustloopbegin = xminstpansustpnt
self.panningenv.sustloopend = xminstpansustpnt
self.panningenv.loopbegin = xminstpanlpstpnt
self.panningenv.loopend = xminstpanlpedpnt
if xminstvolenvtype & 1: self.flags | ENV_VOLUME
if xminstvolenvtype & 2: self.flags | ENV_VOLSUSTAIN
if xminstvolenvtype & 4: self.flags | ENV_VOLLOOP
if xminstpanenvtype & 1: self.flags | ENV_PANNING
if xminstpanenvtype & 2: self.flags | ENV_PANSUSTAIN
if xminstpanenvtype & 4: self.flags | ENV_PANLOOP
self.fadeout = xminstvolfadeout
if self.xminstnumsamples:
# Load headers. . .
for num in range(self.xminstnumsamples):
self.samples.append(XMSample(file))
self.samples[num].vibtype = xminstvibtype
self.samples[num].vibrate = xminstvibsweep
self.samples[num].vibdepth = xminstvibdepth
self.samples[num].vibspeed = xminstvibrate
# . . .followed by sample data
for num in range(self.xminstnumsamples):
self.samples[num].load(file, 1)
class XM(Module):
"""A class that holds an XM module file"""
def __init__(self, filename=None):
super(XM, self).__init__()
if not filename:
self.id = 'Extended Module: ' # 17 char length (stupid space)
self.b1Atch = 0x1A # byte 1A temp char. . . ;)
self.tracker = 'FastTracker v2.00'
self.cwtv = 0x0104
self.headerlength = 0
self.restartpos = 0
self.channelnum = 32
else:
f = open(filename, 'rb')
self.filename = filename
self.id = struct.unpack("<17s", f.read(17))[0] # 'Extended module: '
self.name = struct.unpack("<20s", f.read(20))[0] # Song title (padded with NULL)
self.b1Atch = struct.unpack("<B", f.read(1))[0] # 0x1A
self.tracker = struct.unpack("<20s", f.read(20))[0]
self.cwtv = struct.unpack("<H", f.read(2))[0] # Created with tracker version (XM y.xx = 0yxxh)
self.headerlength = struct.unpack("<L", f.read(4))[0]
self.ordernum = struct.unpack("<H", f.read(2))[0] # Number of orders in song
self.restartpos = struct.unpack("<H", f.read(2))[0] # Restart position
self.channelnum = struct.unpack("<H", f.read(2))[0] # Number of channels in song
self.patternnum = struct.unpack("<H", f.read(2))[0] # Number of patterns in song
self.instrumentnum = struct.unpack("<H", f.read(2))[0] # Number of instruments in song
self.flags = struct.unpack("<H", f.read(2))[0]
self.tempo = struct.unpack("<H", f.read(2))[0]
self.speed = struct.unpack("<H", f.read(2))[0]
self.orders = list(struct.unpack("<256B", f.read(256)))
self.patterns = []
if self.patternnum:
for num in range(self.patternnum):
self.patterns.append(XMPattern(f, channels=self.channelnum))
self.instruments = []
if self.instrumentnum:
for num in range(self.instrumentnum):
self.instruments.append(XMInstrument(f))
f.close()
def detect(filename):
f = open(filename, 'rb')
id = struct.unpack("<17s", f.read(17))[0]
f.close()
if id == 'Extended Module: ':
return 2
else:
return 0
detect = staticmethod(detect)
def gettracker(self):
return self.tracker.replace('\x00', ' ').strip()
| gpl-3.0 | 775,969,824,501,384,100 | 46.067227 | 116 | 0.526632 | false |
chatcannon/scipy | scipy/linalg/_cython_signature_generator.py | 52 | 8369 | """
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
"""
import glob
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return '{0} {1}({2})\n'.format(return_type, name, args)
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = glob.glob(directory + '/*.f*')
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = filename.split('\\')[-1][:-2]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_wrapper_generators.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# The signature that is used for zcgesv in lapack 3.1.0 and 3.1.1 changed
# in version 3.2.0. The version included in the clapack on OSX has the
# more recent signature though.
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla'])
elif libname.lower() == 'lapack':
# Exclude all routines that do not have consistent interfaces from
# LAPACK 3.1.0 through 3.6.0.
# Also exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
# Exclude sisnan and slaneg since they aren't currently included in
# The ABI compatibility wrappers.
exclusions = ['sisnan', 'csrot', 'zdrot', 'ilaenv', 'iparmq', 'lsamen',
'xerbla', 'zcgesv', 'dlaisnan', 'slaisnan', 'dlazq3',
'dlazq4', 'slazq3', 'slazq4', 'dlasq3', 'dlasq4',
'slasq3', 'slasq4', 'dlasq5', 'slasq5', 'slaneg',
# Routines deprecated in LAPACK 3.6.0
'cgegs', 'cgegv', 'cgelsx', 'cgeqpf', 'cggsvd', 'cggsvp',
'clahrd', 'clatzm', 'ctzrqf', 'dgegs', 'dgegv', 'dgelsx',
'dgeqpf', 'dggsvd', 'dggsvp', 'dlahrd', 'dlatzm', 'dtzrqf',
'sgegs', 'sgegv', 'sgelsx', 'sgeqpf', 'sggsvd', 'sggsvp',
'slahrd', 'slatzm', 'stzrqf', 'zgegs', 'zgegv', 'zgelsx',
'zgeqpf', 'zggsvd', 'zggsvp', 'zlahrd', 'zlatzm', 'ztzrqf']
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=exclusions)
| bsd-3-clause | -2,962,760,018,220,139,000 | 64.382813 | 297 | 0.620982 | false |
samdoran/ansible | lib/ansible/modules/network/nxos/nxos_evpn_vni.py | 25 | 10474 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nxos_evpn_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
description:
- Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
Identifier (VNI) configurations of a Nexus device.
author: Gabriele Gerbino (@GGabriele)
notes:
- default, where supported, restores params default value.
- RD override is not permitted. You should set it to the default values
first and then reconfigure it.
- C(route_target_both), C(route_target_import) and
C(route_target_export valid) values are a list of extended communities,
(i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
- The C(route_target_both) property is discouraged due to the inconsistent
behavior of the property across Nexus platforms and image versions.
For this reason it is recommended to use explicit C(route_target_export)
and C(route_target_import) properties instead of C(route_target_both).
- RD valid values are a string in one of the route-distinguisher formats,
the keyword 'auto', or the keyword 'default'.
options:
vni:
description:
- The EVPN VXLAN Network Identifier.
required: true
default: null
route_distinguisher:
description:
- The VPN Route Distinguisher (RD). The RD is combined with
the IPv4 or IPv6 prefix learned by the PE router to create a
globally unique address.
required: true
default: null
route_target_both:
description:
- Enables/Disables route-target settings for both import and
export target communities using a single property.
required: false
default: null
route_target_import:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
route_target_export:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: vni configuration
nxos_evpn_vni:
vni: 6000
route_distinguisher: "60:10"
route_target_import:
- "5000:10"
- "4100:100"
route_target_export: auto
route_target_both: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
'''
import re
import time
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'vni': 'vni',
'route_distinguisher': 'rd',
'route_target_both': 'route-target both',
'route_target_import': 'route-target import',
'route_target_export': 'route-target export'
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
value = ''
if command in config:
value = command_re.search(config).group('value')
return value
def get_route_target_value(arg, config, module):
splitted_config = config.splitlines()
value_list = []
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
for line in splitted_config:
value = ''
if command in line.strip():
value = command_re.search(line).group('value')
value_list.append(value)
return value_list
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'vni':
if arg == 'route_distinguisher':
existing[arg] = get_value(arg, config, module)
else:
existing[arg] = get_route_target_value(arg, config, module)
existing_fix = dict((k, v) for k, v in existing.items() if v)
if existing_fix:
existing['vni'] = module.params['vni']
else:
existing = existing_fix
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def fix_proposed(proposed_commands):
new_proposed = {}
for key, value in proposed_commands.items():
if key == 'route-target both':
new_proposed['route-target export'] = value
new_proposed['route-target import'] = value
else:
new_proposed[key] = value
return new_proposed
def state_present(module, existing, proposed):
commands = list()
parents = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
if proposed_commands.get('route-target both'):
proposed_commands = fix_proposed(proposed_commands)
for key, value in proposed_commands.items():
if key.startswith('route-target'):
if value == ['default']:
existing_value = existing_commands.get(key)
if existing_value:
for target in existing_value:
commands.append('no {0} {1}'.format(key, target))
elif not isinstance(value, list):
value = [value]
for target in value:
if existing:
if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
commands.append('{0} {1}'.format(key, target))
else:
commands.append('{0} {1}'.format(key, target))
elif value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
return commands, parents
def state_absent(module, existing, proposed):
commands = ['no vni {0} l2'.format(module.params['vni'])]
parents = ['evpn']
return commands, parents
def main():
argument_spec = dict(
vni=dict(required=True, type='str'),
route_distinguisher=dict(required=False, type='str'),
route_target_both=dict(required=False, type='list'),
route_target_import=dict(required=False, type='list'),
route_target_export=dict(required=False, type='list'),
state=dict(choices=['present', 'absent'], default='present', required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
commands = []
parents = []
proposed = {}
for key, value in proposed_args.items():
if key != 'vni':
if value == 'true':
value = True
elif value == 'false':
value = False
if existing.get(key) != value:
proposed[key] = value
if state == 'present':
commands, parents = state_present(module, existing, proposed)
elif state == 'absent' and existing:
commands, parents = state_absent(module, existing, proposed)
if commands:
if (existing.get('route_distinguisher') and
proposed.get('route_distinguisher')):
if (existing['route_distinguisher'] != proposed['route_distinguisher'] and
proposed['route_distinguisher'] != 'default'):
warnings.append('EVPN RD {0} was automatically removed. '
'It is highly recommended to use a task '
'(with default as value) to explicitly '
'unconfigure it.'.format(existing['route_distinguisher']))
remove_commands = ['no rd {0}'.format(existing['route_distinguisher'])]
candidate = CustomNetworkConfig(indent=3)
candidate.add(remove_commands, parents=parents)
load_config(module, candidate)
results['changed'] = True
results['commands'] = candidate.items_text()
time.sleep(30)
else:
candidate = CustomNetworkConfig(indent=3)
candidate.add(commands, parents=parents)
load_config(module, candidate)
results['changed'] = True
results['commands'] = candidate.items_text()
else:
results['commands'] = []
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,545,773,953,491,117,000 | 32.89644 | 91 | 0.62068 | false |
NeuralEnsemble/neuroConstruct | lib/jython/Lib/test/test_shelve.py | 138 | 4596 | import os
import unittest
import shelve
import glob
from test import test_support
test_support.import_module('anydbm', deprecated=True)
class TestCase(unittest.TestCase):
fn = "shelftemp" + os.extsep + "db"
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_binary_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_proto2_file_shelf(self):
try:
s = shelve.open(self.fn, protocol=2)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
finally:
for f in glob.glob(self.fn+"*"):
os.unlink(f)
def test_in_memory_shelf(self):
d1 = {}
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = {}
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertNotEqual(d1, d2)
def test_mutable_entry(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = {}
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
s = shelve.Shelf(d, writeback=True)
s['key'] = [1]
p1 = d['key'] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d['key']
self.assertNotEqual(p1, p2) # Write creates new object in store
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf({}, **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
test_support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
test_support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
| gpl-2.0 | 5,054,181,829,437,752,000 | 27.196319 | 74 | 0.532637 | false |
hjanime/VisTrails | vistrails/db/versions/v0_9_0/domain/abstraction.py | 3 | 2728 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
from auto_gen import DBAbstraction as _DBAbstraction
from auto_gen import DBAbstractionRef, DBModule
from id_scope import IdScope
class DBAbstraction(_DBAbstraction):
def __init__(self, *args, **kwargs):
_DBAbstraction.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAbstractionRef.vtType: DBModule.vtType})
self.idScope.setBeginId('action', 1)
def __copy__(self):
return DBAbstraction.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBAbstraction.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBAbstraction
# need to go through and reset the index to the copied objects
cp.idScope = copy.copy(self.idScope)
return cp
| bsd-3-clause | 1,268,253,190,841,583,600 | 46.034483 | 80 | 0.689516 | false |
SanPen/GridCal | src/research/opf/dc_opf_3.py | 1 | 8418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program implements the DC power flow as a linear program
This version uses the sparse structures and it the problem compilation is
blazing fast compared to the full matrix version
"""
from pulp import *
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine import *
class DcOpf3:
def __init__(self, multi_circuit: MultiCircuit):
"""
OPF simple dispatch problem
:param multi_circuit: GridCal Circuit instance (remember this must be a connected island)
"""
self.multi_circuit = multi_circuit
# circuit compilation
self.numerical_circuit = self.multi_circuit.compile_snapshot()
self.islands = self.numerical_circuit.compute()
self.Sbase = multi_circuit.Sbase
self.B = csc_matrix(self.numerical_circuit.get_B())
self.nbus = self.B.shape[0]
# node sets
self.pqpv = self.islands[0].pqpv
self.pv = self.islands[0].pv
self.vd = self.islands[0].ref
self.pq = self.islands[0].pq
# declare the voltage angles
self.theta = [None] * self.nbus
for i in range(self.nbus):
self.theta[i] = LpVariable("Theta" + str(i), -0.5, 0.5)
# declare the generation
self.PG = list()
def solve(self):
"""
Solve OPF using the sparse formulation
:return:
"""
'''
CSR format explanation:
The standard CSR representation where the column indices for row i are stored in
-> indices[indptr[i]:indptr[i+1]]
and their corresponding values are stored in
-> data[indptr[i]:indptr[i+1]]
If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
'''
# print('Compiling LP')
prob = LpProblem("DC optimal power flow", LpMinimize)
################################################################################################################
# Add the objective function
################################################################################################################
fobj = 0
# add the voltage angles multiplied by zero (trick)
for j in self.pqpv:
fobj += self.theta[j] * 0.0
# Add the generators cost
for bus in self.multi_circuit.buses:
# check that there are at least one generator at the slack node
if len(bus.controlled_generators) == 0 and bus.type == BusMode.Slack:
raise Warning('There is no generator at the Slack node ' + bus.name + '!!!')
# Add the bus LP vars
for gen in bus.controlled_generators:
# create the generation variable
gen.initialize_lp_vars()
# add the variable to the objective function
fobj += gen.LPVar_P * gen.Cost
self.PG.append(gen.LPVar_P) # add the var reference just to print later...
# Add the objective function to the problem
prob += fobj
################################################################################################################
# Add the matrix multiplication as constraints
# See: https://math.stackexchange.com/questions/1727572/solving-a-feasible-system-of-linear-equations-
# using-linear-programming
################################################################################################################
for i in self.pqpv:
s = 0
d = 0
# add the calculated node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
if j not in self.vd:
s += self.B.data[ii] * self.theta[j]
# add the generation LP vars
for gen in self.multi_circuit.buses[i].controlled_generators:
d += gen.LPVar_P
# add the nodal demand
for load in self.multi_circuit.buses[i].loads:
d -= load.P / self.Sbase
prob.add(s == d, 'ct_node_mismatch_' + str(i))
################################################################################################################
# set the slack nodes voltage angle
################################################################################################################
for i in self.vd:
prob.add(self.theta[i] == 0, 'ct_slack_theta')
################################################################################################################
# set the slack generator power
################################################################################################################
for i in self.vd:
val = 0
g = 0
# compute the slack node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
val += self.B.data[ii] * self.theta[j]
# Sum the slack generators
for gen in self.multi_circuit.buses[i].controlled_generators:
g += gen.LPVar_P
# the sum of the slack node generators must be equal to the slack node power
prob.add(g == val, 'ct_slack_power_' + str(i))
################################################################################################################
# Set the branch limits
################################################################################################################
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
# branch flow
Fij = self.B[i, j] * (self.theta[i] - self.theta[j])
Fji = self.B[i, j] * (self.theta[j] - self.theta[i])
# constraints
prob.add(Fij <= branch.rate / self.Sbase, 'ct_br_flow_ij_' + str(k))
prob.add(Fji <= branch.rate / self.Sbase, 'ct_br_flow_ji_' + str(k))
################################################################################################################
# Solve
################################################################################################################
print('Solving LP')
prob.solve() # solve with CBC
# prob.solve(CPLEX())
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# The optimised objective function value is printed to the screen
print("Cost =", value(prob.objective), '€')
def print(self):
"""
Print results
:return:
"""
print('\nVoltages in p.u.')
for i, th in enumerate(self.theta):
print('Bus', i, '->', 1, '<', th.value(), 'rad')
print('\nGeneration power (in MW)')
for i, g in enumerate(self.PG):
val = g.value() * self.Sbase if g.value() is not None else 'None'
print(g.name, '->', val)
# Set the branch limits
print('\nBranch flows (in MW)')
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
if self.theta[i].value() is not None and self.theta[j].value() is not None:
F = self.B[i, j] * (self.theta[i].value() - self.theta[j].value()) * self.Sbase
else:
F = 'None'
print('Branch ' + str(i) + '-' + str(j) + '(', branch.rate, 'MW) ->', F)
if __name__ == '__main__':
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
grid = FileOpen(fname).open()
# grid = FileOpen('IEEE30.xlsx').open()
# grid = FileOpen('Illinois200Bus.xlsx').open()
# declare and solve problem
problem = DcOpf3(grid)
problem.solve()
problem.print()
| gpl-3.0 | -8,476,126,009,338,461,000 | 38.144186 | 120 | 0.467681 | false |
tungvx/deploy | appengine_django/management/commands/testserver.py | 45 | 2504 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django testserver command.
Instead of starting the default Django development server this command fires
up a copy of the full fledged appengine dev_appserver.
The appserver is always initialised with a blank datastore with the specified
fixtures loaded into it.
"""
help = 'Runs the development server with data from the given fixtures.'
def run_from_argv(self, argv):
fixtures = argv[2:]
# Ensure an on-disk test datastore is used.
from django.db import connection
connection.use_test_datastore = True
connection.test_datastore_inmemory = False
# Flush any existing test datastore.
connection.flush()
# Load the fixtures.
from django.core.management import call_command
call_command('loaddata', 'initial_data')
if fixtures:
call_command('loaddata', *fixtures)
# Build new arguments for dev_appserver.
datastore_path, history_path = get_test_datastore_paths(False)
new_args = argv[0:1]
new_args.extend(['--datastore_path', datastore_path])
new_args.extend(['--history_path', history_path])
new_args.extend([os.getcwdu()])
# Add email settings
from django.conf import settings
new_args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Allow skipped files so we don't die
new_args.extend(['--allow_skipped_files'])
# Start the test dev_appserver.
from google.appengine.tools import dev_appserver_main
dev_appserver_main.main(new_args)
| apache-2.0 | 4,503,622,777,847,848,400 | 32.837838 | 79 | 0.711262 | false |
jaspreetw/tempest | tempest/tests/base.py | 42 | 1610 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from oslotest import moxstubout
class TestCase(base.BaseTestCase):
def setUp(self):
super(TestCase, self).setUp()
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
def patch(self, target, **kwargs):
"""
Returns a started `mock.patch` object for the supplied target.
The caller may then call the returned patcher to create a mock object.
The caller does not need to call stop() on the returned
patcher object, as this method automatically adds a cleanup
to the test class to stop the patcher.
:param target: String module.class or module.object expression to patch
:param **kwargs: Passed as-is to `mock.patch`. See mock documentation
for details.
"""
p = mock.patch(target, **kwargs)
m = p.start()
self.addCleanup(p.stop)
return m
| apache-2.0 | 8,564,911,682,959,017,000 | 34.777778 | 79 | 0.668944 | false |
RAtechntukan/CouchPotatoServer | libs/pyutil/platformutil.py | 106 | 3607 | # Thanks to Daenyth for help porting this to Arch Linux.
import os, platform, re, subprocess
_distributor_id_cmdline_re = re.compile("(?:Distributor ID:)\s*(.*)", re.I)
_release_cmdline_re = re.compile("(?:Release:)\s*(.*)", re.I)
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
global _distname,_version
_distname = None
_version = None
def get_linux_distro():
""" Tries to determine the name of the Linux OS distribution name.
First, try to parse a file named "/etc/lsb-release". If it exists, and
contains the "DISTRIB_ID=" line and the "DISTRIB_RELEASE=" line, then return
the strings parsed from that file.
If that doesn't work, then invoke platform.dist().
If that doesn't work, then try to execute "lsb_release", as standardized in
2001:
http://refspecs.freestandards.org/LSB_1.0.0/gLSB/lsbrelease.html
The current version of the standard is here:
http://refspecs.freestandards.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/lsbrelease.html
that lsb_release emitted, as strings.
Returns a tuple (distname,version). Distname is what LSB calls a
"distributor id", e.g. "Ubuntu". Version is what LSB calls a "release",
e.g. "8.04".
A version of this has been submitted to python as a patch for the standard
library module "platform":
http://bugs.python.org/issue3937
"""
global _distname,_version
if _distname and _version:
return (_distname, _version)
try:
etclsbrel = open("/etc/lsb-release", "rU")
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_file_re.search(line)
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
(_distname, _version) = platform.dist()[:2]
if _distname and _version:
return (_distname, _version)
try:
p = subprocess.Popen(["lsb_release", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rc = p.wait()
if rc == 0:
for line in p.stdout.readlines():
m = _distributor_id_cmdline_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_cmdline_re.search(p.stdout.read())
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
if os.path.exists("/etc/arch-release"):
return ("Arch_Linux", "")
return (_distname,_version)
def get_platform():
# Our version of platform.platform(), telling us both less and more than the
# Python Standard Library's version does.
# We omit details such as the Linux kernel version number, but we add a
# more detailed and correct rendition of the Linux distribution and
# distribution-version.
if "linux" in platform.system().lower():
return platform.system()+"-"+"_".join(get_linux_distro())+"-"+platform.machine()+"-"+"_".join([x for x in platform.architecture() if x])
else:
return platform.platform()
| gpl-3.0 | 3,251,873,817,201,707,500 | 35.07 | 144 | 0.596618 | false |
inetCatapult/flask-sqlalchemy | test_sqlalchemy.py | 7 | 23928 | from __future__ import with_statement
import atexit
import unittest
from datetime import datetime
import flask
import flask_sqlalchemy as sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import sessionmaker
def make_todo_model(db):
class Todo(db.Model):
__tablename__ = 'todos'
id = db.Column('todo_id', db.Integer, primary_key=True)
title = db.Column(db.String(60))
text = db.Column(db.String)
done = db.Column(db.Boolean)
pub_date = db.Column(db.DateTime)
def __init__(self, title, text):
self.title = title
self.text = text
self.done = False
self.pub_date = datetime.utcnow()
return Todo
class BasicAppTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
self.Todo = make_todo_model(db)
@app.route('/')
def index():
return '\n'.join(x.title for x in self.Todo.query.all())
@app.route('/add', methods=['POST'])
def add():
form = flask.request.form
todo = self.Todo(form['title'], form['text'])
db.session.add(todo)
db.session.commit()
return 'added'
db.create_all()
self.app = app
self.db = db
def tearDown(self):
self.db.drop_all()
def test_basic_insert(self):
c = self.app.test_client()
c.post('/add', data=dict(title='First Item', text='The text'))
c.post('/add', data=dict(title='2nd Item', text='The text'))
rv = c.get('/')
self.assertEqual(rv.data, b'First Item\n2nd Item')
def test_query_recording(self):
with self.app.test_request_context():
todo = self.Todo('Test 1', 'test')
self.db.session.add(todo)
self.db.session.commit()
queries = sqlalchemy.get_debug_queries()
self.assertEqual(len(queries), 1)
query = queries[0]
self.assertTrue('insert into' in query.statement.lower())
self.assertEqual(query.parameters[0], 'Test 1')
self.assertEqual(query.parameters[1], 'test')
self.assertTrue('test_sqlalchemy.py' in query.context)
self.assertTrue('test_query_recording' in query.context)
def test_helper_api(self):
self.assertEqual(self.db.metadata, self.db.Model.metadata)
class CustomMetaDataTestCase(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
self.app.config['TESTING'] = True
def test_custom_metadata_positive(self):
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = sqlalchemy.SQLAlchemy(self.app, metadata=metadata)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertEqual(list(One.__table__.constraints)[0].name, 'pk_one')
self.assertEqual(list(One.__table__.indexes)[0].name, 'ix_one_myindex')
self.assertTrue('fk_two_one_id_one' in [c.name for c in Two.__table__.constraints])
self.assertTrue('uq_two_myunique' in [c.name for c in Two.__table__.constraints])
self.assertTrue('pk_two' in [c.name for c in Two.__table__.constraints])
def test_custom_metadata_negative(self):
db = sqlalchemy.SQLAlchemy(self.app, metadata=None)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertNotEqual(list(One.__table__.constraints)[0].name, 'pk_one')
self.assertFalse('fk_two_one_id_one' in [c.name for c in Two.__table__.constraints])
self.assertFalse('uq_two_myunique' in [c.name for c in Two.__table__.constraints])
self.assertFalse('pk_two' in [c.name for c in Two.__table__.constraints])
class TestQueryProperty(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
self.app.config['TESTING'] = True
def test_no_app_bound(self):
db = sqlalchemy.SQLAlchemy()
db.init_app(self.app)
Todo = make_todo_model(db)
# If no app is bound to the SQLAlchemy instance, a
# request context is required to access Model.query.
self.assertRaises(RuntimeError, getattr, Todo, 'query')
with self.app.test_request_context():
db.create_all()
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
def test_app_bound(self):
db = sqlalchemy.SQLAlchemy(self.app)
Todo = make_todo_model(db)
db.create_all()
# If an app was passed to the SQLAlchemy constructor,
# the query property is always available.
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
class SignallingTestCase(unittest.TestCase):
def setUp(self):
self.app = app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
self.db = sqlalchemy.SQLAlchemy(app)
self.Todo = make_todo_model(self.db)
self.db.create_all()
def tearDown(self):
self.db.drop_all()
def test_before_committed(self):
class Namespace(object):
is_received = False
def before_committed(sender, changes):
Namespace.is_received = True
with sqlalchemy.before_models_committed.connected_to(before_committed, sender=self.app):
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.db.session.commit()
self.assertTrue(Namespace.is_received)
def test_model_signals(self):
recorded = []
def committed(sender, changes):
self.assertTrue(isinstance(changes, list))
recorded.extend(changes)
with sqlalchemy.models_committed.connected_to(committed,
sender=self.app):
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.assertEqual(len(recorded), 0)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'insert')
del recorded[:]
todo.text = 'aha'
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'update')
del recorded[:]
self.db.session.delete(todo)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'delete')
class TablenameTestCase(unittest.TestCase):
def test_name(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class BazBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Ham(db.Model):
__tablename__ = 'spam'
id = db.Column(db.Integer, primary_key=True)
self.assertEqual(FOOBar.__tablename__, 'foo_bar')
self.assertEqual(BazBar.__tablename__, 'baz_bar')
self.assertEqual(Ham.__tablename__, 'spam')
def test_single_name(self):
"""Single table inheritance should not set a new name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Mallard(Duck):
pass
self.assertEqual(Mallard.__tablename__, 'duck')
def test_joined_name(self):
"""Model has a separate primary key; it should set a new name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Donald(Duck):
id = db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
self.assertEqual(Donald.__tablename__, 'donald')
def test_mixin_name(self):
"""Primary key provided by mixin should still allow model to set tablename."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Base(object):
id = db.Column(db.Integer, primary_key=True)
class Duck(Base, db.Model):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_abstract_name(self):
"""Abstract model should not set a name. Subclass should set a name."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
class Duck(Base):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_complex_inheritance(self):
"""Joined table inheritance, but the new primary key is provided by a mixin, not directly on the class."""
app = flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db = sqlalchemy.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class IdMixin(object):
@declared_attr
def id(cls):
return db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
class RubberDuck(IdMixin, Duck):
pass
self.assertEqual(RubberDuck.__tablename__, 'rubber_duck')
class PaginationTestCase(unittest.TestCase):
def test_basic_pagination(self):
p = sqlalchemy.Pagination(None, 1, 20, 500, [])
self.assertEqual(p.page, 1)
self.assertFalse(p.has_prev)
self.assertTrue(p.has_next)
self.assertEqual(p.total, 500)
self.assertEqual(p.pages, 25)
self.assertEqual(p.next_num, 2)
self.assertEqual(list(p.iter_pages()),
[1, 2, 3, 4, 5, None, 24, 25])
p.page = 10
self.assertEqual(list(p.iter_pages()),
[1, 2, None, 8, 9, 10, 11, 12, 13, 14, None, 24, 25])
def test_pagination_pages_when_0_items_per_page(self):
p = sqlalchemy.Pagination(None, 1, 0, 500, [])
self.assertEqual(p.pages, 0)
def test_query_paginate(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
with app.app_context():
db.session.add_all([Todo('', '') for _ in range(100)])
db.session.commit()
@app.route('/')
def index():
p = Todo.query.paginate()
return '{0} items retrieved'.format(len(p.items))
c = app.test_client()
# request default
r = c.get('/')
self.assertEqual(r.status_code, 200)
# request args
r = c.get('/?per_page=10')
self.assertEqual(r.data.decode('utf8'), '10 items retrieved')
with app.app_context():
# query default
p = Todo.query.paginate()
self.assertEqual(p.total, 100)
class BindsTestCase(unittest.TestCase):
def test_basic_binds(self):
import tempfile
_, db1 = tempfile.mkstemp()
_, db2 = tempfile.mkstemp()
def _remove_files():
import os
try:
os.remove(db1)
os.remove(db2)
except IOError:
pass
atexit.register(_remove_files)
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['SQLALCHEMY_BINDS'] = {
'foo': 'sqlite:///' + db1,
'bar': 'sqlite:///' + db2
}
db = sqlalchemy.SQLAlchemy(app)
class Foo(db.Model):
__bind_key__ = 'foo'
__table_args__ = {"info": {"bind_key": "foo"}}
id = db.Column(db.Integer, primary_key=True)
class Bar(db.Model):
__bind_key__ = 'bar'
id = db.Column(db.Integer, primary_key=True)
class Baz(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# simple way to check if the engines are looked up properly
self.assertEqual(db.get_engine(app, None), db.engine)
for key in 'foo', 'bar':
engine = db.get_engine(app, key)
connector = app.extensions['sqlalchemy'].connectors[key]
self.assertEqual(engine, connector.get_engine())
self.assertEqual(str(engine.url),
app.config['SQLALCHEMY_BINDS'][key])
# do the models have the correct engines?
self.assertEqual(db.metadata.tables['foo'].info['bind_key'], 'foo')
self.assertEqual(db.metadata.tables['bar'].info['bind_key'], 'bar')
self.assertEqual(db.metadata.tables['baz'].info.get('bind_key'), None)
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'foo'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('foo' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'bar'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('bar' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('baz' in metadata.tables)
# do the session have the right binds set?
self.assertEqual(db.get_binds(app), {
Foo.__table__: db.get_engine(app, 'foo'),
Bar.__table__: db.get_engine(app, 'bar'),
Baz.__table__: db.get_engine(app, None)
})
class DefaultQueryClassTestCase(unittest.TestCase):
def test_default_query_class(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
children = db.relationship("Child", backref = "parents", lazy='dynamic')
class Child(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('parent.id'))
p = Parent()
c = Child()
c.parent = p
self.assertEqual(type(Parent.query), sqlalchemy.BaseQuery)
self.assertEqual(type(Child.query), sqlalchemy.BaseQuery)
self.assertTrue(isinstance(p.children, sqlalchemy.BaseQuery))
#self.assertTrue(isinstance(c.parents, sqlalchemy.BaseQuery))
class SQLAlchemyIncludesTestCase(unittest.TestCase):
def test(self):
"""Various SQLAlchemy objects are exposed as attributes.
"""
db = sqlalchemy.SQLAlchemy()
import sqlalchemy as sqlalchemy_lib
self.assertTrue(db.Column == sqlalchemy_lib.Column)
# The Query object we expose is actually our own subclass.
from flask_sqlalchemy import BaseQuery
self.assertTrue(db.Query == BaseQuery)
class RegressionTestCase(unittest.TestCase):
def test_joined_inheritance(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'sub_base')
db.create_all()
def test_single_table_inheritance(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'base')
db.create_all()
def test_joined_inheritance_relation(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
class Relation(db.Model):
id = db.Column(db.Integer, primary_key=True)
base_id = db.Column(db.Integer, db.ForeignKey('base.id'))
name = db.Column(db.Unicode(20))
def __init__(self, name):
self.name = name
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': u'sub'}
relations = db.relationship(Relation)
db.create_all()
base = SubBase()
base.relations = [Relation(name=u'foo')]
db.session.add(base)
db.session.commit()
base = base.query.one()
def test_connection_binds(self):
app = flask.Flask(__name__)
db = sqlalchemy.SQLAlchemy(app)
assert db.session.connection()
class SessionScopingTestCase(unittest.TestCase):
def test_default_session_scoping(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb in db.session
def test_session_scoping_changing(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
def scopefunc():
return id(dict())
db = sqlalchemy.SQLAlchemy(app, session_options=dict(scopefunc=scopefunc))
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb not in db.session # because a new scope is generated on each call
class CommitOnTeardownTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = sqlalchemy.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
@app.route('/')
def index():
return '\n'.join(x.title for x in Todo.query.all())
@app.route('/create', methods=['POST'])
def create():
db.session.add(Todo('Test one', 'test'))
if flask.request.form.get('fail'):
raise RuntimeError("Failing as requested")
return 'ok'
self.client = app.test_client()
def test_commit_on_success(self):
resp = self.client.post('/create')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.client.get('/').data, b'Test one')
def test_roll_back_on_failure(self):
resp = self.client.post('/create', data={'fail': 'on'})
self.assertEqual(resp.status_code, 500)
self.assertEqual(self.client.get('/').data, b'')
class StandardSessionTestCase(unittest.TestCase):
def test_insert_update_delete(self):
# Ensure _SignalTrackingMapperExtension doesn't croak when
# faced with a vanilla SQLAlchemy session.
#
# Verifies that "AttributeError: 'SessionMaker' object has no attribute '_model_changes'"
# is not thrown.
app = flask.Flask(__name__)
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
db = sqlalchemy.SQLAlchemy(app)
Session = sessionmaker(bind=db.engine)
class QazWsx(db.Model):
id = db.Column(db.Integer, primary_key=True)
x = db.Column(db.String, default='')
db.create_all()
session = Session()
session.add(QazWsx())
session.flush() # issues an INSERT.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == ''
qaz_wsx.x = 'test'
session.flush() # issues an UPDATE.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == 'test'
session.delete(qaz_wsx) # issues a DELETE.
assert session.query(QazWsx).first() is None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicAppTestCase))
suite.addTest(unittest.makeSuite(CustomMetaDataTestCase))
suite.addTest(unittest.makeSuite(TestQueryProperty))
suite.addTest(unittest.makeSuite(TablenameTestCase))
suite.addTest(unittest.makeSuite(PaginationTestCase))
suite.addTest(unittest.makeSuite(BindsTestCase))
suite.addTest(unittest.makeSuite(DefaultQueryClassTestCase))
suite.addTest(unittest.makeSuite(SQLAlchemyIncludesTestCase))
suite.addTest(unittest.makeSuite(RegressionTestCase))
suite.addTest(unittest.makeSuite(SessionScopingTestCase))
suite.addTest(unittest.makeSuite(CommitOnTeardownTestCase))
if flask.signals_available:
suite.addTest(unittest.makeSuite(SignallingTestCase))
suite.addTest(unittest.makeSuite(StandardSessionTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause | -191,731,611,490,733,000 | 33.23176 | 114 | 0.584838 | false |
un33k/CouchPotatoServer | libs/axl/axel.py | 65 | 13262 | # axel.py
#
# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom
# Edits by Ruud Burger
#
# Based on an idea by Peter Thatcher, found on
# http://www.valuedlessons.com/2008/04/events-in-python.html
#
# This module is part of Axel and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
# Source: http://pypi.python.org/pypi/axel
# Docs: http://packages.python.org/axel
from Queue import Empty, Queue
import hashlib
import sys
import threading
from couchpotato.core.helpers.variable import natsortKey
class Event(object):
"""
Event object inspired by C# events. Handlers can be registered and
unregistered using += and -= operators. Execution and result are
influenced by the arguments passed to the constructor and += method.
from axel import Event
event = Event()
def on_event(*args, **kwargs):
return (args, kwargs)
event += on_event # handler registration
print(event(10, 20, y=30))
>> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),)
event -= on_event # handler is unregistered
print(event(10, 20, y=30))
>> None
class Mouse(object):
def __init__(self):
self.click = Event(self)
self.click += self.on_click # handler registration
def on_click(self, sender, *args, **kwargs):
assert isinstance(sender, Mouse), 'Wrong sender'
return (args, kwargs)
mouse = Mouse()
print(mouse.click(10, 20))
>> ((True, ((10, 20), {}),
>> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),)
mouse.click -= mouse.on_click # handler is unregistered
print(mouse.click(10, 20))
>> None
"""
def __init__(self, name = None, sender = None, asynch = False, exc_info = False,
lock = None, threads = 3, traceback = False):
""" Creates an event
asynch
if True handler's are executes asynchronous
exc_info
if True, result will contain sys.exc_info()[:2] on error
lock
threading.RLock used to synchronize execution
sender
event's sender. The sender is passed as the first argument to the
handler, only if is not None. For this case the handler must have
a placeholder in the arguments to receive the sender
threads
maximum number of threads that will be started
traceback
if True, the execution result will contain sys.exc_info()
on error. exc_info must be also True to get the traceback
hash = self.hash(handler)
Handlers are stored in a dictionary that has as keys the handler's hash
handlers = {
hash : (handler, memoize, timeout),
hash : (handler, memoize, timeout), ...
}
The execution result is cached using the following structure
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
The execution result is returned as a tuple having this structure
exec_result = (
(True, result, handler), # on success
(False, error_info, handler), # on error
(None, None, handler), ... # asynchronous execution
)
"""
self.name = name
self.asynchronous = asynch
self.exc_info = exc_info
self.lock = lock
self.sender = sender
self.threads = threads
self.traceback = traceback
self.handlers = {}
self.memoize = {}
def hash(self, handler):
return hashlib.md5(str(handler)).hexdigest()
def handle(self, handler, priority = 0):
""" Registers a handler. The handler can be transmitted together
with two arguments as a list or dictionary. The arguments are:
memoize
if True, the execution result will be cached in self.memoize
timeout
will allocate a predefined time interval for the execution
If arguments are provided as a list, they are considered to have
this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
handler_, memoize, timeout = self._extract(handler)
self.handlers['%s.%s' % (priority, self.hash(handler_))] = (handler_, memoize, timeout)
return self
def unhandle(self, handler):
""" Unregisters a handler """
handler_, memoize, timeout = self._extract(handler)
key = self.hash(handler_)
if not key in self.handlers:
raise ValueError('Handler "%s" was not found' % str(handler_))
del self.handlers[key]
return self
def fire(self, *args, **kwargs):
""" Stores all registered handlers in a queue for processing """
self.queue = Queue()
result = {}
if self.handlers:
max_threads = 1 if kwargs.get('event_order_lock') else self._threads()
# Set global result
def add_to(key, value):
result[key] = value
kwargs['event_add_to_result'] = add_to
for i in range(max_threads):
t = threading.Thread(target = self._execute,
args = args, kwargs = kwargs)
t.daemon = True
t.start()
handler_keys = self.handlers.keys()
handler_keys.sort(key = natsortKey)
for handler in handler_keys:
self.queue.put(handler)
if self.asynchronous:
handler_, memoize, timeout = self.handlers[handler]
result[handler] = (None, None, handler_)
if not self.asynchronous:
self.queue.join()
return result
def count(self):
""" Returns the count of registered handlers """
return len(self.handlers)
def clear(self):
""" Discards all registered handlers and cached results """
self.handlers.clear()
self.memoize.clear()
def _execute(self, *args, **kwargs):
# Remove get and set from kwargs
add_to_result = kwargs.get('event_add_to_result')
del kwargs['event_add_to_result']
# Get and remove order lock
order_lock = kwargs.get('event_order_lock')
try: del kwargs['event_order_lock']
except: pass
# Get and remove return on first
return_on_result = kwargs.get('event_return_on_result')
try: del kwargs['event_return_on_result']
except: pass
got_results = False
""" Executes all handlers stored in the queue """
while True:
try:
h_ = self.queue.get(timeout = 2)
handler, memoize, timeout = self.handlers[h_]
if return_on_result and got_results:
if not self.asynchronous:
self.queue.task_done()
continue
if order_lock:
order_lock.acquire()
try:
r = self._memoize(memoize, timeout, handler, *args, **kwargs)
if not self.asynchronous:
if not return_on_result or (return_on_result and r[1] is not None):
add_to_result(h_, tuple(r))
got_results = True
except Exception:
if not self.asynchronous:
add_to_result(h_, (False, self._error(sys.exc_info()),
handler))
else:
self.error_handler(sys.exc_info())
finally:
if order_lock:
order_lock.release()
if not self.asynchronous:
self.queue.task_done()
if self.queue.empty():
raise Empty
except Empty:
break
def _extract(self, queue_item):
""" Extracts a handler and handler's arguments that can be provided
as list or dictionary. If arguments are provided as list, they are
considered to have this sequence: (handler, memoize, timeout)
Examples:
event += handler
event += (handler, True, 1.5)
event += {'handler':handler, 'memoize':True, 'timeout':1.5}
"""
assert queue_item, 'Invalid list of arguments'
handler = None
memoize = False
timeout = 0
if not isinstance(queue_item, (list, tuple, dict)):
handler = queue_item
elif isinstance(queue_item, (list, tuple)):
if len(queue_item) == 3:
handler, memoize, timeout = queue_item
elif len(queue_item) == 2:
handler, memoize, = queue_item
elif len(queue_item) == 1:
handler = queue_item
elif isinstance(queue_item, dict):
handler = queue_item.get('handler')
memoize = queue_item.get('memoize', False)
timeout = queue_item.get('timeout', 0)
return (handler, bool(memoize), float(timeout))
def _memoize(self, memoize, timeout, handler, *args, **kwargs):
""" Caches the execution result of successful executions
hash = self.hash(handler)
memoize = {
hash : ((args, kwargs, result), (args, kwargs, result), ...),
hash : ((args, kwargs, result), ...), ...
}
"""
if not isinstance(handler, Event) and self.sender is not None:
args = list(args)[:]
args.insert(0, self.sender)
if not memoize:
if timeout <= 0: #no time restriction
result = [True, handler(*args, **kwargs), handler]
return result
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
return [True, result, handler]
else:
hash_ = self.hash(handler)
if hash_ in self.memoize:
for args_, kwargs_, result in self.memoize[hash_]:
if args_ == args and kwargs_ == kwargs:
return [True, result, handler]
if timeout <= 0: #no time restriction
result = handler(*args, **kwargs)
else:
result = self._timeout(timeout, handler, *args, **kwargs)
if isinstance(result, tuple) and len(result) == 3:
if isinstance(result[1], Exception): #error occurred
return [False, self._error(result), handler]
lock = threading.RLock()
lock.acquire()
try:
if hash_ not in self.memoize:
self.memoize[hash_] = []
self.memoize[hash_].append((args, kwargs, result))
return [True, result, handler]
finally:
lock.release()
def _timeout(self, timeout, handler, *args, **kwargs):
""" Controls the time allocated for the execution of a method """
t = spawn_thread(target = handler, args = args, kwargs = kwargs)
t.daemon = True
t.start()
t.join(timeout)
if not t.is_alive():
if t.exc_info:
return t.exc_info
return t.result
else:
try:
msg = '[%s] Execution was forcefully terminated'
raise RuntimeError(msg % t.name)
except:
return sys.exc_info()
def _threads(self):
""" Calculates maximum number of threads that will be started """
if self.threads < len(self.handlers):
return self.threads
return len(self.handlers)
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1]
__iadd__ = handle
__isub__ = unhandle
__call__ = fire
__len__ = count
class spawn_thread(threading.Thread):
""" Spawns a new thread and returns the execution result """
def __init__(self, target, args = (), kwargs = {}, default = None):
threading.Thread.__init__(self)
self._target = target
self._args = args
self._kwargs = kwargs
self.result = default
self.exc_info = None
def run(self):
try:
self.result = self._target(*self._args, **self._kwargs)
except:
self.exc_info = sys.exc_info()
finally:
del self._target, self._args, self._kwargs
| gpl-3.0 | -4,642,282,576,360,396,000 | 33.808399 | 95 | 0.537928 | false |
ford-prefect/cerbero | test/test_packages_common.py | 31 | 3804 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.config import Platform, Distro, DistroVersion
from cerbero.packages import package
from cerbero.packages.packagesstore import PackagesStore
from test.test_build_common import create_cookbook
class Package1(package.Package):
name = 'gstreamer-test1'
shortdesc = 'GStreamer Test'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
deps = ['gstreamer-test2']
files = ['recipe1:misc:libs:bins']
platform_files = {
Platform.WINDOWS: ['recipe5:libs']
}
class Package2(package.Package):
name = 'gstreamer-test2'
shortdesc = 'GStreamer Test 2'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe2:misc']
class Package3(package.Package):
name = 'gstreamer-test3'
shortdesc = 'GStreamer Test 3'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
files = ['recipe3:misc']
class Package4(package.Package):
name = 'gstreamer-test-bindings'
shortdesc = 'GStreamer Bindings'
version = '1.0'
licences = ['LGPL']
uuid = '1'
vendor = 'GStreamer Project'
sys_deps = {Distro.DEBIAN: ['python'],
DistroVersion.FEDORA_16: ['python27']}
files = ['recipe4:misc']
class MetaPackage(package.MetaPackage):
name = "gstreamer-runtime"
shortdesc = "GStreamer runtime"
longdesc = "GStreamer runtime"
title = "GStreamer runtime"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor = "GStreamer Project"
org = 'net.gstreamer'
packages = [
('gstreamer-test1', True, True),
('gstreamer-test3', False, True),
('gstreamer-test-bindings', False, False)]
platform_packages = {
Platform.LINUX: [('gstreamer-test2', False, False)]}
icon = "gstreamer.ico"
class App(package.App):
name = "gstreamer-app"
shortdesc = "GStreamer sample app"
longdesc = "GStreamer sample app"
title = "GStreamer sample app"
url = "http://www.gstreamer.net"
version = '1.0'
uuid = '3ffe67b2-4565-411f-8287-e8faa892f853'
vendor = "GStreamer Project"
org = 'net.gstreamer'
app_recipe = 'recipe3'
deps = ['gstreamer-test1']
icon = "share/images/gstreamer.png"
embed_deps = True
class DummyConfig(object):
pass
def create_store(config):
cookbook = create_cookbook(config)
store = PackagesStore(config, False)
for klass in [Package1, Package2, Package3, Package4, App]:
package = klass(config, store, cookbook)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
for klass in [MetaPackage]:
package = klass(config, store)
package.__file__ = 'test/test_packages_common.py'
store.add_package(package)
return store
| lgpl-2.1 | -5,708,435,954,015,954,000 | 27.818182 | 67 | 0.660358 | false |
mozts2005/OuterSpace | oslauncher/oslauncher/version.py | 3 | 1031 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
try:
import versiondata
version = versiondata.version
versionString = "%d.%d.%d" % version
except ImportError:
version = (0, 0, 0)
versionString = "[Work In Progress]"
| gpl-2.0 | 9,115,858,535,065,539,000 | 36.185185 | 77 | 0.704171 | false |
JesseLivezey/pylearn2 | pylearn2/datasets/tests/test_ocr.py | 43 | 3754 | """module for testing datasets.ocr"""
import unittest
import numpy as np
from pylearn2.datasets.ocr import OCR
from pylearn2.space import Conv2DSpace
from pylearn2.testing.skip import skip_if_no_data
class TestOCR(unittest.TestCase):
"""
Unit test of OCR dataset
Parameters
----------
None
"""
def setUp(self):
"""Load train, test, valid sets"""
skip_if_no_data()
self.train = OCR(which_set='train')
self.valid = OCR(which_set='valid')
self.test = OCR(which_set='test')
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
topo = self.train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = OCR(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (1, 16, 8, batch_size)
b01c = c01b.transpose(3, 1, 2, 0)
b01c_X = self.test.X[0:batch_size, :]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
def test_iterator(self):
"""
Tests that batches returned by an iterator with topological
data_specs are the same as the ones returned by calling
get_topological_view on the dataset with the corresponding order
"""
batch_size = 100
b01c_X = self.test.X[0:batch_size, :]
b01c_topo = self.test.get_topological_view(b01c_X)
b01c_b01c_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('b', 0, 1, 'c')),
'features'))
b01c_b01c = b01c_b01c_it.next()
assert np.all(b01c_topo == b01c_b01c)
c01b_test = OCR(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b_topo = c01b_test.get_topological_view(c01b_X)
c01b_c01b_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('c', 0, 1, 'b')),
'features'))
c01b_c01b = c01b_c01b_it.next()
assert np.all(c01b_topo == c01b_c01b)
# Also check that samples from iterators with the same data_specs
# with Conv2DSpace do not depend on the axes of the dataset
b01c_c01b_it = self.test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('c', 0, 1, 'b')),
'features'))
b01c_c01b = b01c_c01b_it.next()
assert np.all(b01c_c01b == c01b_c01b)
c01b_b01c_it = c01b_test.iterator(
mode='sequential',
batch_size=batch_size,
data_specs=(Conv2DSpace(shape=(16, 8),
num_channels=1,
axes=('b', 0, 1, 'c')),
'features'))
c01b_b01c = c01b_b01c_it.next()
assert np.all(c01b_b01c == b01c_b01c)
| bsd-3-clause | -5,110,563,710,441,589,000 | 36.54 | 73 | 0.520511 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.