repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vipins/ccccms | env/Lib/site-packages/setuptools/tests/server.py | 452 | 2651 | """Basic http server for tests to simulate PyPI or custom indexes
"""
import sys
import time
import threading
from setuptools.compat import BaseHTTPRequestHandler
from setuptools.compat import (urllib2, URLError, HTTPServer,
SimpleHTTPRequestHandler)
class IndexServer(HTTPServer):
"""Basic single-threaded http server simulating a package index
You can use this server in unittest like this::
s = IndexServer()
s.start()
index_url = s.base_url() + 'mytestindex'
# do some test requests to the index
# The index files should be located in setuptools/tests/indexes
s.stop()
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=SimpleHTTPRequestHandler):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._run = True
def serve(self):
while self._run:
self.handle_request()
def start(self):
self.thread = threading.Thread(target=self.serve)
self.thread.start()
def stop(self):
"Stop the server"
# Let the server finish the last request and wait for a new one.
time.sleep(0.1)
# self.shutdown is not supported on python < 2.6, so just
# set _run to false, and make a request, causing it to
# terminate.
self._run = False
url = 'http://127.0.0.1:%(server_port)s/' % vars(self)
try:
if sys.version_info >= (2, 6):
urllib2.urlopen(url, timeout=5)
else:
urllib2.urlopen(url)
except URLError:
# ignore any errors; all that's important is the request
pass
self.thread.join()
self.socket.close()
def base_url(self):
port = self.server_port
return 'http://127.0.0.1:%s/setuptools/tests/indexes/' % port
class RequestRecorder(BaseHTTPRequestHandler):
def do_GET(self):
requests = vars(self.server).setdefault('requests', [])
requests.append(self)
self.send_response(200, 'OK')
class MockServer(HTTPServer, threading.Thread):
"""
A simple HTTP Server that records the requests made to it.
"""
def __init__(self, server_address=('', 0),
RequestHandlerClass=RequestRecorder):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
threading.Thread.__init__(self)
self.setDaemon(True)
self.requests = []
def run(self):
self.serve_forever()
def url(self):
return 'http://localhost:%(server_port)s/' % vars(self)
url = property(url)
| bsd-3-clause |
jvs/sourcer | sourcer/expressions/str.py | 1 | 1444 | from outsourcer import Code
from . import utils
from .base import Expression
from .constants import POS, RESULT, STATUS, TEXT
class Str(Expression):
is_commented = False
def __init__(self, value):
if not isinstance(value, (bytes, str)):
raise TypeError(f'Expected bytes or str. Received: {type(value)}.')
self.value = value
self.skip_ignored = False
self.num_blocks = 0 if not self.value else 1
def __str__(self):
return repr(self.value)
def always_succeeds(self):
return not self.value
def can_partially_succeed(self):
return False
def argumentize(self, out):
wrap = Code('_wrap_string_literal')
value = Expression.argumentize(self, out)
return out.var('arg', wrap(self.value, value))
def _compile(self, out):
if not self.value:
out += STATUS << True
out += RESULT << ''
return
value = out.var('value', self.value)
end = out.var('end', POS + len(self.value))
with out.IF(TEXT[POS : end] == value):
out += RESULT << value
out += POS << (utils.skip_ignored(end) if self.skip_ignored else end)
out += STATUS << True
with out.ELSE():
out += RESULT << self.error_func()
out += STATUS << False
def complain(self):
return f'Expected to match the string {self.value!r}'
| mit |
rhiever/bokeh | sphinx/source/docs/tutorials/exercises/unemployment.py | 23 | 2160 | import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import ColumnDataSource, figure, output_file, show
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
# EXERCISE: create a `ColumnDataSource` with columns: month, year, color, rate
source = ColumnDataSource(
data=dict(
month=month,
year=year,
color=color,
rate=rate,
)
)
# EXERCISE: output to static HTML file
# create a new figure
p = figure(title="US Unemployment (1948 - 2013)", tools="resize,hover",
x_range=years, y_range=list(reversed(months)),
plot_width=900, plot_height=400, x_axis_location="above")
# EXERCISE: use the `rect renderer with the following attributes:
# - x_range is years, y_range is months (reversed)
# - fill color for the rectangles is the 'color' field
# - line_color for the rectangles is None
# - tools are resize and hover tools
# - add a nice title, and set the plot_width and plot_height
# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
# EXERCISE: configure the hover tool to display the month, year and rate
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
# fill me in
]
show(p)
| bsd-3-clause |
heiher/libreoffice-core | scripting/examples/python/HelloWorld.py | 12 | 1539 | # HelloWorld python script for the scripting framework
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
def HelloWorldPython( ):
"""Prints the string 'Hello World(in Python)' into the current document"""
#get the doc from the scripting context which is made available to all scripts
desktop = XSCRIPTCONTEXT.getDesktop()
model = desktop.getCurrentComponent()
#check whether there's already an opened document. Otherwise, create a new one
if not hasattr(model, "Text"):
model = desktop.loadComponentFromURL(
"private:factory/swriter","_blank", 0, () )
#get the XText interface
text = model.Text
#create an XTextRange at the end of the document
tRange = text.End
#and set the string
tRange.String = "Hello World (in Python)"
return None
| gpl-3.0 |
abrowning80/solutions-geoevent-java | data/packages/geoprocessing/visibility/Toolshare/scripts/makefeature.py | 6 | 1382 | #-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: patr5136
#
# Created: 23/08/2013
# Copyright: (c) patr5136 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import arcpy
import os, json
def makeFeature(geo, wkid):
sr = arcpy.SpatialReference(wkid);
arcpy.CreateFeatureclass_management('in_memory', 'tmpPoly', POLYGON,'#','#','#',sr)
fc = os.path.join('in_memory', 'tmpPoly')
fields = ["SHAPE@"]
insert = arcpy.da.InsertCursor(fc, fields)
insert.insertRow(geo);
return fc
def makePolygon(json):
jsonPoly = json.loads(json)
rings=arcpy.Array()
for ring in jsonPoly['rings']:
points = arcpy.Array();
for coord in ring:
x=coord[0]
y=coord[1]
z=None
if len(coord)>2:
z=coord[2]
#z=coord[3]
p=arcpy.Point()
p.X=x
p.Y=y
if z:
p.Z=z
points.add(p)
rings.add(points)
return arcpy.Polygon(rings)
if __name__ == '__main__':
jsonPolygon = arcpy.GetParameterAsTextsText(0)
wkid = arcpy.GetParameter(1)
polygon = makePolygon(jsonPolygon)
fc = makeFeature(polygon, wkid)
arcpy.SetParameter(2, fc)
| apache-2.0 |
ccarouge/cwsl-mas | cwsl/core/file_creator.py | 4 | 11088 | """
Authors: Tim Bedin, Tim Erwin
Copyright 2014 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contains the FileCreator class.
"""
import os
import re
import itertools
import logging
from cwsl.core.dataset import DataSet
from cwsl.core.constraint import Constraint
from cwsl.core.metafile import MetaFile
module_logger = logging.getLogger('cwsl.core.file_creator')
class FileCreator(DataSet):
''' This class is a DataSet that creates the output MockClimateFiles
objects, given an output pattern and a set of Constraints.
A FileCreator has a 'output_pattern' attribute which defines
what the filename of any created output files should be.
The ArgumentCreator class compares these possible created files
and throws away any that cannot be created as they do not have
matching files in the input DataSet.
The output pattern has a particular syntax. The pattern is given as a
string with attribute names surrounded by % signs.
eg:
"/projects/ua6/CAWCR_CVC_processed/%variable%_%modeling_realm%_%model%.nc"
This class will raise an error if it is instantiated with a pattern with
empty constraints - it does not make sense to have a file creator that
has 'empty' or 'all' constraints - they must be in canonical form.
It will also raise an error if instantiated with 'extra' constraints
that do not match those from its output pattern.
'''
def __init__(self, output_pattern, extra_constraints):
''' This constructor sets up the FileCreator from the
pattern of its output and 'extra' constraints that set the value of
its attributes.
'''
self.output_pattern = output_pattern
# Construct the initial constraints from the output pattern.
self.constraints = FileCreator.constraints_from_pattern(output_pattern)
# Add the extra constraints to the self.constraints, strip out any that
# are not part of the output pattern.
self.merge_constraints(extra_constraints)
# This object must create files, so after merging all constraints must
# be in canonical form.
# "extra" and "info" are keywords for non-compulsory constraints that
# are replaced by a placeholder value.
for constraint in self.constraints:
if not constraint.values:
split_key = constraint.key.split('_')
if 'extra' in split_key:
constraint.values = set(['noextras'])
elif 'info' in split_key:
constraint.values = set(['orig'+split_key[0]])
else:
module_logger.error("Constraint {0} is empty - should be in canonical form!"
.format(constraint))
raise EmptyConstraintError("Constraint {0} is empty - should be in canonical form!"
.format(constraint))
# A set to hold all the valid combinations of attributes.
self.valid_combinations = set()
# One to hold the valid_hashes
self.valid_hashes = set()
self.cons_names = [cons.key for cons in self.constraints]
def get_files(self, att_dict, check=False, update=True):
""" This method returns all possible MockClimateFiles from the
FileCreator that match an input attribute dictionary.
If check is True, then we check that the hash of the for the
file is in the - 'valid_hashes' hash list. This is used when using
the FileCreator as an input, we only want to give files that
actually exists.
"""
# Get the keys of the input dictionary.
search_keys = [att for att in att_dict.keys()]
cons_names = [cons.key for cons in self.constraints]
to_loop = []
# We do this for every constraint in the FileCreator
for key in cons_names:
if key not in search_keys:
# If a key is not in the att_dict, grab the existing constraint.
existing_cons = self.get_constraint(key)
to_loop.append((existing_cons.key, existing_cons.values))
assert(type(existing_cons.values == set))
else:
new_cons = Constraint(key, [att_dict[key]])
to_loop.append((new_cons.key, new_cons.values))
keys = [cons[0] for cons in to_loop]
values = [cons[1] for cons in to_loop]
new_iter = itertools.product(*values)
outfiles = []
for combination in new_iter:
new_file = self.climate_file_from_combination(keys, combination,
check=check, update=update)
if new_file:
outfiles.append(new_file)
return outfiles
@property
def files(self):
""" This property returns all the real files
that exist in this file_creator.
"""
huge_iterator = itertools.product(*[cons.values
for cons in self.constraints])
cons_names = [cons.key for cons in self.constraints]
for combination in huge_iterator:
# Create a set of constraints for this combination.
climate_file = self.climate_file_from_combination(cons_names, combination,
check=True, update=False)
if climate_file:
yield climate_file
def get_constraint(self, attribute_name):
""" Get a particular constraint by name."""
for constraint in self.constraints:
if constraint.key == attribute_name:
return constraint
# If it can't be found, return None.
return None
def merge_constraints(self, new_constraints):
""" This function adds the constraint values to the constraints from
a pattern.
"""
existing_cons_names = [cons.key for cons in self.constraints]
# Now add the constraints - only if they are in the pattern!
for cons in new_constraints:
if cons.key in existing_cons_names:
self.constraints.add(cons)
attribute_names = [cons.key for cons in self.constraints]
repeated_atts = []
for name in attribute_names:
if attribute_names.count(name) > 1:
repeated_atts.append(name)
to_remove = [cons for cons in self.constraints
if cons.key in repeated_atts]
new_cons_dict = {}
for cons in to_remove:
new_cons_dict[cons.key] = set([])
for cons in to_remove:
new_cons_dict[cons.key] = new_cons_dict[cons.key].union(cons.values)
self.constraints.remove(cons)
for key in new_cons_dict:
self.constraints.add(Constraint(key, new_cons_dict[key]))
def climate_file_from_combination(self, keys, next_combination,
check, update):
""" Make a possible output MetaFile object from
a combination of attributes.
"""
# Turn the combination tuple into a dictionary with
# attribute names.
sub_dict = {}
cons_list = []
for key, value in zip(keys, next_combination):
sub_dict[key] = value
cons_list.append(Constraint(key, [value]))
new_file = self.output_pattern
for key in sub_dict:
att_sub = "%" + key + "%"
new_file = re.sub(att_sub, sub_dict[key], new_file)
new_path = os.path.dirname(new_file)
file_name = os.path.basename(new_file)
new_climate_file = MetaFile(path_dir=new_path,
filename=file_name,
all_atts=sub_dict)
if check:
# Check that this combination is valid for the FileCreator
# If it is not, return None.
module_logger.debug("Checking cons_list: {}".format(cons_list))
if frozenset(cons_list) not in self.valid_combinations:
module_logger.debug("This combination: {0} is not found in {1}"
.format(cons_list, self.valid_combinations))
return None
if update:
# Add the hash to the 'valid_hashes' set.
file_hash = hash(new_climate_file)
self.valid_hashes.add(file_hash)
self.valid_combinations.add(frozenset(cons_list))
module_logger.debug("Returning climate file: {}".format(new_climate_file))
return new_climate_file
@staticmethod
def default_pattern(out_constraints, temp=False):
""" Creates a default pattern from a set of constraints.
Mostly for testing - we could extend this to use real patterns.
"""
out_pattern = ''
for cons in out_constraints:
out_pattern += '%' + cons.key + '%_'
output = out_pattern[:-1]
if temp:
# Try some different temp directories.
if "TMPDIR" in os.environ:
output = os.path.join(os.environ["TMPDIR"], output)
elif "TEMP" in os.environ:
output = os.path.join(os.environ["TEMP"], output)
elif "TMP" in os.environ:
output = os.path.join(os.environ["TMP"], output)
else:
output = os.path.join("/tmp", output)
return output
@staticmethod
def constraints_from_pattern(pattern_string):
""" This function builds a set of constraint objects from
an output pattern.
"""
regex_pattern = r"%(\S+?)%"
attribute_names = re.findall(regex_pattern, pattern_string)
constraint_list = [Constraint(att_name, [])
for att_name in attribute_names]
return set(constraint_list)
class EmptyConstraintError(Exception):
def __init__(self, constraint):
self.constraint = constraint
module_logger.error("Constraint {} is empty but must contain values"
.format(self.constraint))
def __repr__(self):
return repr(self.constraint)
class ExtraConstraintError(Exception):
def __init__(self, constraint):
self.constraint = constraint
module_logger.error("Constraint {} passed to FileCreator is not found in the output pattern!"
.format(self.constraint))
def __repr__(self):
return repr(self.constraint)
| apache-2.0 |
knxd/PyKNyX | tests/core/dptXlator/dptXlator2ByteFloat.py | 2 | 2328 | # -*- coding: utf-8 -*-
from pyknyx.core.dptXlator.dptXlator2ByteFloat import *
import unittest
# Mute logger
from pyknyx.services.logger import logging
logger = logging.getLogger(__name__)
logging.getLogger("pyknyx").setLevel(logging.ERROR)
class DPTXlator2ByteFloatTestCase(unittest.TestCase):
def setUp(self):
self.testTable = (
( 0., 0x0000, b"\x00\x00"),
( 0.01, 0x0001, b"\x00\x01"),
( -0.01, 0x87ff, b"\x87\xff"),
( -1., 0x879c, b"\x87\x9c"),
( 1., 0x0064, b"\x00\x64"),
( -272.96, 0xa156, b"\xa1\x56"),
(670760.96, 0x7fff, b"\x7f\xff"),
)
self.dptXlator = DPTXlator2ByteFloat("9.xxx")
def tearDown(self):
pass
#def test_constructor(self):
#print self.dptXlator.handledDPT
def test_typeSize(self):
self.assertEqual(self.dptXlator.typeSize, 2)
def testcheckValue(self):
with self.assertRaises(DPTXlatorValueError):
self.dptXlator.checkValue(self.dptXlator._dpt.limits[1] + 1)
def test_dataToValue(self):
for value, data, frame in self.testTable:
value_ = self.dptXlator.dataToValue(data)
self.assertEqual(value_, value, "Conversion failed (converted value for %s is %.2f, should be %.2f)" %
(hex(data), value_, value))
def test_valueToData(self):
for value, data, frame in self.testTable:
data_ = self.dptXlator.valueToData(value)
self.assertEqual(data_, data, "Conversion failed (converted data for %.2f is %s, should be %s)" %
(value, hex(data_), hex(data)))
def test_dataToFrame(self):
for value, data, frame in self.testTable:
frame_ = self.dptXlator.dataToFrame(data)
self.assertEqual(frame_, frame, "Conversion failed (converted frame for %s is %r, should be %r)" %
(hex(data), frame_, frame))
def test_frameToData(self):
for value, data, frame in self.testTable:
data_ = self.dptXlator.frameToData(frame)
self.assertEqual(data_, data, "Conversion failed (converted data for %r is %s, should be %s)" %
(frame, hex(data_), hex(data)))
| gpl-3.0 |
wweiradio/django | django/conf/locale/zh_Hant/formats.py | 1008 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| bsd-3-clause |
ycl2045/nova-master | nova/tests/integrated/v3/test_security_groups.py | 29 | 2233 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.network.security_group import neutron_driver
from nova.tests.integrated.v3 import test_servers
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 'fake'
nova_group['description'] = ''
nova_group['name'] = 'test'
nova_group['project_id'] = 'fake'
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(self, context, servers):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-security-groups'
def setUp(self):
self.flags(security_group_api=('neutron'))
super(SecurityGroupsJsonTest, self).setUp()
self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
def test_server_create(self):
self._post_server()
def test_server_get(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_server_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
| apache-2.0 |
dannyboi104/SickRage | sickbeard/providers/t411.py | 2 | 9980 | # -*- coding: latin-1 -*-
# Author: djoole <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import traceback
import re
import datetime
import time
from requests.auth import AuthBase
import sickbeard
import generic
import requests
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
from sickbeard.exceptions import ex
class T411Provider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "T411")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.token = None
self.tokenLastUpdate = None
self.cache = T411Cache(self)
self.urls = {'base_url': 'http://www.t411.io/',
'search': 'https://api.t411.io/torrents/search/%s?cid=%s&limit=100',
'login_page': 'https://api.t411.io/auth',
'download': 'https://api.t411.io/torrents/download/%s',
}
self.url = self.urls['base_url']
self.subcategories = [433, 637, 455, 639]
def isEnabled(self):
return self.enabled
def imageName(self):
return 't411.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
if self.token is not None:
if time.time() < (self.tokenLastUpdate + 30 * 60):
logger.log('T411 Authentication token is still valid', logger.DEBUG)
return True
login_params = {'username': self.username,
'password': self.password}
logger.log('Performing authentication to T411', logger.DEBUG)
response = helpers.getURL(self.urls['login_page'], post_data=login_params, timeout=30, json=True)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.WARNING)
return False
if response and 'token' in response:
self.token = response['token']
self.tokenLastUpdate = time.time()
self.uid = response['uid'].encode('ascii', 'ignore')
self.session.auth = T411Auth(self.token)
logger.log('Using T411 Authorization token : ' + self.token, logger.DEBUG)
return True
else:
logger.log('T411 token not found in authentication response', logger.WARNING)
return False
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
if not ep_obj:
return [search_string]
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return [search_string]
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.'
if self.show.air_by_date:
ep_string += str(ep_obj.airdate).replace('-', '|')
elif self.show.sports:
ep_string += str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
elif self.show.anime:
ep_string += "%i" % int(ep_obj.scene_absolute_number)
else:
ep_string += sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
if add_string:
ep_string += ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', '.', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
logger.log(u"_doSearch started with ..." + str(search_params), logger.DEBUG)
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
for sc in self.subcategories:
searchURL = self.urls['search'] % (search_string, sc)
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL, json=True)
if not data:
continue
try:
if 'torrents' not in data:
logger.log(
u"The Data returned from " + self.name + " do not contains any torrent : " + str(data),
logger.DEBUG)
continue
torrents = data['torrents']
if not torrents:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
for torrent in torrents:
try:
torrent_name = torrent['name']
torrent_id = torrent['id']
torrent_download_url = (self.urls['download'] % torrent_id).encode('utf8')
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_download_url + ")",
logger.DEBUG)
items[mode].append(item)
except Exception as e:
logger.log(u"Invalid torrent data, skipping results: {0}".format(str(torrent)), logger.DEBUG)
continue
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = str(url).replace('&', '&')
return title, url
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
searchResults = self._doSearch(searchString[0])
for item in searchResults:
title, url = self._get_title_and_url(item)
if title and url:
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class T411Auth(AuthBase):
"""Attaches HTTP Authentication to the given Request object."""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = self.token
return r
class T411Cache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll T411 every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = T411Provider()
| gpl-3.0 |
444thLiao/VarappX | tests/data_models/test_users.py | 2 | 3481 | #!/usr/bin/env python3
import unittest
import django.test
from varapp.data_models.users import *
from varapp.models.users import *
from django.conf import settings
class TestUser(unittest.TestCase):
def test_user_constructor(self):
s = User('A', 'a@a', 'code', '', 1, Person(firstname='A'), Role('guest'))
self.assertEqual(s.username, 'A')
self.assertEqual(s.person.firstname, 'A')
self.assertEqual(s.role.name, 'guest')
def test_expose(self):
u = User('A', 'a@a', 'code', '', 1, Person(firstname='A'), Role('guest'))
self.assertIsInstance(u.expose(), dict)
self.assertEqual(u.expose()['username'], 'A')
class TestDatabase(unittest.TestCase):
def test_database_constructor(self):
d = Database('db', 'path', 'filename', 'sha1', 'desc', 1, 'size', ['A','B'])
self.assertEqual(d.name, 'db')
self.assertEqual(d.users[1], 'B')
def test_expose(self):
d = Database('db', 'path', 'filename', 'sha1', 'desc', 1, 'size', ['A','B'])
self.assertIsInstance(d.expose(), dict)
self.assertEqual(d.expose()['users'][0], 'A')
class TestFactories(django.test.TestCase):
def test_role_factory(self):
R = Roles(rank=6)
r = role_factory(R)
self.assertIsInstance(r, Role)
self.assertEqual(r.rank, R.rank)
def test_person_factory(self):
P = People(firstname='asdf')
p = person_factory(P)
self.assertIsInstance(p, Person)
self.assertEqual(p.firstname, P.firstname)
def test_database_factory(self):
D = VariantsDb.objects.get(filename=settings.DB_TEST)
d = database_factory(D)
self.assertIsInstance(d, Database)
self.assertEqual(d.name, D.name)
self.assertGreaterEqual(len(d.users), 1)
def test_user_factory(self):
R = Roles.objects.create(rank=6)
P = People.objects.create(firstname='asdf')
U = Users.objects.create(username='adsf', role=R, person=P, is_active=1)
D = VariantsDb.objects.get(filename=settings.DB_TEST)
u = user_factory(U)
self.assertIsInstance(u, User)
self.assertEqual(u.username, U.username)
self.assertGreaterEqual(len(u.databases), 0)
# Add access to test db - it should reflect in User.databases
DbAccess.objects.create(user=U, variants_db=D, is_active=1)
u = user_factory(U)
self.assertGreaterEqual(len(u.databases), 1)
# Make the db inactive - it should get ignored again
D.is_active = 0
D.save()
u = user_factory(U)
self.assertGreaterEqual(len(u.databases), 0)
class TestLists(unittest.TestCase):
def test_users_list_from_users_db(self):
L = users_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], User)
def test_roles_list_from_users_db(self):
L = roles_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Role)
def test_persons_list_from_db(self):
L = persons_list_from_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Person)
def test_databases_list_from_users_db(self):
L = databases_list_from_users_db()
self.assertGreaterEqual(len(L), 1)
self.assertIsInstance(L[0], Database)
self.assertEqual(L[0].filename, settings.DB_TEST)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
bmerry/mlsgpu | utils/simulate.py | 1 | 12858 | #!/usr/bin/env python
# mlsgpu: surface reconstruction from point clouds
# Copyright (C) 2013 University of Cape Town
#
# This file is part of mlsgpu.
#
# mlsgpu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
import sys
import heapq
import timeplot
from optparse import OptionParser
class QItem(object):
def __init__(self, parent, parent_get, parent_push):
self.parent = parent
self.size = 1
self.finish = 0.0
self.parent_get = parent_get
self.parent_push = parent_push
self.children = []
def total_time(self):
ans = self.finish
for x in self.children:
ans += x.parent_get
ans += x.parent_push
return ans
class EndQItem(object):
def __init__(self):
pass
def process_worker(worker, pq):
pqid = 0
item = None
cq = []
get_size = None
for action in worker.actions:
if action.name in ['bbox', 'pop']:
if pqid == len(pq):
break
item = pq[pqid]
pqid += 1
base = action.stop
elif action.name == 'get':
parent_get = action.start - base
base = action.stop
get_size = action.value
elif action.name == 'push':
parent_push = action.start - base
base = action.stop
child = QItem(item, parent_get, parent_push)
if get_size is not None:
child.size = get_size
get_size = None
item.children.append(child)
cq.append(child)
item.finish = 0.0
elif action.name in ['compute', 'load', 'write']:
if worker.name != 'main' or action.name != 'write':
# Want to exclude phase 3
item.finish += action.stop - action.start
elif action.name in ['init']:
pass
else:
raise ValueError('Unhandled action "' + action.name + '"')
if pqid != len(pq):
raise ValueError('Parent queue was not exhausted')
return cq
def get_worker(group, name):
for worker in group:
if worker.name == name:
return worker
return None
class SimPool(object):
def __init__(self, simulator, size, inorder = True):
self._size = size
self._waiters = []
self._watchers = []
self._allocs = []
self._spare = size
self._inorder = inorder
self._simulator = simulator
def spare(self):
return self._spare
def _biggest(self):
"""Maximum possible allocation without blocking"""
if not self._inorder:
return self._spare
elif not self._allocs:
return self._size
else:
start = self._allocs[0][0]
end = self._allocs[-1][1]
if end > start:
return max(self._size - end, start)
else:
return start - end
def get(self, worker, size):
if not self._inorder:
size = 1
assert size > 0
assert size <= self._size
self._waiters.append((worker, size))
self._do_wakeups()
def can_get(self, size):
if not self._inorder:
size = 1
return size <= self._biggest()
def watch(self, worker):
'''Request to be woken up when free space increases'''
self._watches.append(worker)
def unwatch(self, worker):
'''Cancel a previous watch request'''
self._watches.remove(worker)
def _do_wakeups(self):
while self._waiters:
(w, size) = self._waiters[0]
if size > self._biggest():
break
elif not self._allocs:
start = 0
elif not self._inorder:
start = self._allocs[-1][1]
else:
cur_start = self._allocs[0][0]
cur_end = self._allocs[-1][1]
cur_limit = self._size
if cur_end <= cur_start:
limit = cur_start
if cur_limit - cur_end >= size:
start = cur_end
else:
start = 0
a = (start, start + size)
self._allocs.append(a)
self._spare -= size
del self._waiters[0]
self._simulator.wakeup(w, value = a)
while self._watchers:
w = self._watchers.pop(0)
self._simulator.wakeup(w)
def done(self, alloc):
self._allocs.remove(alloc)
self._spare += alloc[1] - alloc[0]
self._do_wakeups()
class SimSimpleQueue(object):
"""
Queue without associated pool. Just accepts objects and provides
a blocking pop.
"""
def __init__(self, simulator):
self._queue = []
self._waiters = []
self._simulator = simulator
self._running = True
def _do_wakeups(self):
while self._waiters and self._queue:
item = self._queue.pop(0)
worker = self._waiters.pop(0)
self._simulator.wakeup(worker, value = item)
while self._waiters and not self._running:
worker = self._waiters.pop(0)
self._simulator.wakeup(worker, value = EndQItem())
def pop(self, worker):
self._waiters.append(worker)
self._do_wakeups()
def push(self, item):
self._queue.append(item)
self._do_wakeups()
def stop(self):
self._running = False
self._do_wakeups()
class SimQueue(object):
def __init__(self, simulator, pool_size, inorder = True):
self._pool = SimPool(simulator, pool_size, inorder)
self._queue = SimSimpleQueue(simulator)
def spare(self):
return self._pool.spare()
def pop(self, worker):
self._queue.pop(worker)
def get(self, worker, size):
self._pool.get(worker, size)
def can_get(self, size):
return self._pool.can_get(worker, size)
def watch(self, worker):
self._pool.watch(worker)
def unwatch(self, worker):
self._pool.unwatch(worker)
def push(self, item, alloc):
self._queue.push(item)
def done(self, alloc):
self._pool.done(alloc)
def watch(self, alloc):
self._pool
def stop(self):
self._queue.stop()
class SimWorker(object):
def __init__(self, simulator, name, inq, outqs, options):
self.simulator = simulator
self.name = name
self.inq = inq
self.outqs = outqs
self.generator = self.run()
def best_queue(self, size):
if len(self.outqs) > 1:
valid_queues = [q for q in self.outqs if q.can_get(size)]
if valid_queues:
return max(valid_queues, key = lambda x: x.spare())
else:
return None
else:
return self.outqs[0]
def run(self):
yield
while True:
self.inq.pop(self)
item = yield
if isinstance(item, EndQItem):
if self.simulator.count_running_workers(self.name) == 1:
# We are the last worker from the set
for q in self.outqs:
q.stop()
break
print(self.name, self.simulator.time, item.total_time())
for child in item.children:
size = child.size
yield child.parent_get
while True:
outq = self.best_queue(size)
if outq is not None:
break
for q in self.outqs:
q.watch(self)
yield
for q in self.outqs:
q.unwatch(self)
outq.get(self, size)
child.alloc = yield
yield child.parent_push
outq.push(child, child.alloc)
if item.finish > 0:
yield item.finish
if hasattr(item, 'alloc'):
self.inq.done(item.alloc)
class Simulator(object):
def __init__(self):
self.workers = []
self.wakeup_queue = []
self.time = 0.0
self.running = set()
def add_worker(self, worker):
self.workers.append(worker)
worker.generator.send(None)
self.wakeup(worker)
def wakeup(self, worker, time = None, value = None):
if time is None:
time = self.time
assert time >= self.time
for (t, w, v) in self.wakeup_queue:
assert w != worker
heapq.heappush(self.wakeup_queue, (time, worker, value))
def count_running_workers(self, name):
ans = 0
for w in self.running:
if w.name == name:
ans += 1
return ans
def run(self):
self.time = 0.0
self.running = set(self.workers)
while self.wakeup_queue:
(self.time, worker, value) = heapq.heappop(self.wakeup_queue)
assert worker in self.running
try:
compute_time = worker.generator.send(value)
if compute_time is not None:
assert compute_time >= 0
self.wakeup(worker, self.time + compute_time)
except StopIteration:
self.running.remove(worker)
if self.running:
print("Workers still running: possible deadlock", file = sys.stderr)
for w in self.running:
print(" " + w.name, file = sys.stderr)
sys.exit(1)
def load_items(group):
copy_worker = get_worker(group, 'bucket.fine.0')
if copy_worker is None:
copy_worker = get_worker(group, 'copy.0')
all_queue = [QItem(None, 0.0, 0.0)]
coarse_queue = process_worker(get_worker(group, 'main'), all_queue)
copy_queue = process_worker(copy_worker, coarse_queue)
mesh_queue = process_worker(get_worker(group, 'device.0'), copy_queue)
process_worker(get_worker(group, 'mesher.0'), mesh_queue)
return all_queue[0]
def simulate(root, options):
simulator = Simulator()
gpus = options.gpus
if options.infinite:
big = 10**30
coarse_cap = big
copy_cap = big
mesher_cap = big
else:
coarse_cap = options.coarse_cap * 1024 * 1024
copy_cap = 2
mesher_cap = options.mesher_cap * 1024 * 1024
all_queue = SimQueue(simulator, 1)
coarse_queue = SimQueue(simulator, coarse_cap)
copy_queues = [SimQueue(simulator, copy_cap, inorder = False) for i in range(gpus)]
mesh_queue = SimQueue(simulator, mesher_cap)
simulator.add_worker(SimWorker(simulator, 'coarse', all_queue, [coarse_queue], options))
simulator.add_worker(SimWorker(simulator, 'copy', coarse_queue, copy_queues, options))
for i in range(gpus):
simulator.add_worker(SimWorker(simulator, 'device', copy_queues[i], [mesh_queue], options))
simulator.add_worker(SimWorker(simulator, 'mesher', mesh_queue, [], options))
all_queue.push(root, None)
all_queue.stop()
simulator.run()
print(simulator.time)
def main():
parser = OptionParser()
parser.add_option('--infinite', action = 'store_true')
parser.add_option('--gpus', type = 'int', default = 1)
parser.add_option('--coarse-cap', type = 'int', metavar = 'MiB', default = 512)
parser.add_option('--bucket-cap', type = 'int', metavar = 'MiB', default = 128)
parser.add_option('--mesher-cap', type = 'int', metavar = 'MiB', default = 512)
(options, args) = parser.parse_args()
groups = []
if args:
for fname in args:
with open(fname, 'r') as f:
groups.append(timeplot.load_data(f))
else:
groups.append(timeplot.load_data(sys.stdin))
if len(groups) != 1:
print("Only one group is supported", file = sys.stderr)
sys.exit(1)
group = groups[0]
for worker in group:
if worker.name.endswith('.1'):
print("Only one worker of each type is supported", file = sys.stderr)
sys.exit(1)
root = load_items(group)
simulate(root, options)
if __name__ == '__main__':
main()
| gpl-3.0 |
kc4271/batch_downloader | requests/packages/urllib3/contrib/pyopenssl.py | 304 | 15086 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
encryption in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from cStringIO import StringIO
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class fileobject(_fileobject):
def _wait_for_sock(self):
rd, wd, ed = select.select([self._sock], [], [],
self._sock.gettimeout())
if not rd:
raise timeout()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| mit |
elahejalalpour/ELRyu | ryu/services/protocols/bgp/operator/commands/show/memory.py | 27 | 2971 | import gc
import sys
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
class Memory(Command):
help_msg = 'show memory information'
command = 'memory'
def __init__(self, *args, **kwargs):
super(Memory, self).__init__(*args, **kwargs)
self.subcommands = {
'summary': self.Summary}
class Summary(Command):
help_msg = 'shows total memory used and how it is getting used'
command = 'summary'
def action(self, params):
count = {}
size = {}
total_size = 0
unreachable = gc.collect()
for obj in gc.get_objects():
inst_name = type(obj).__name__
c = count.get(inst_name, None)
if not c:
count[inst_name] = 0
s = size.get(inst_name, None)
if not s:
size[inst_name] = 0
count[inst_name] += 1
s = sys.getsizeof(obj)
size[inst_name] += s
total_size += s
# Total size in MB
total_size = total_size // 1000000
ret = {
'unreachable': unreachable,
'total': total_size,
'summary': []}
for class_name, s in size.items():
# Calculate size in MB
size_mb = s // 1000000
# We are only interested in class which take-up more than a MB
if size_mb > 0:
ret['summary'].append(
{
'class': class_name,
'instances': count.get(class_name, None),
'size': size_mb
}
)
return CommandsResponse(STATUS_OK, ret)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return Command.cli_resp_formatter(resp)
val = resp.value
ret = 'Unreachable objects: {0}\n'.format(
val.get('unreachable', None)
)
ret += 'Total memory used (MB): {0}\n'.format(
val.get('total', None)
)
ret += 'Classes with instances that take-up more than one MB:\n'
ret += '{0:<20s} {1:>16s} {2:>16s}\n'.format(
'Class',
'#Instance',
'Size(MB)'
)
for s in val.get('summary', []):
ret += '{0:<20s} {1:>16d} {2:>16d}\n'.format(
s.get('class', None), s.get('instances', None),
s.get('size', None)
)
return ret
| apache-2.0 |
drunken-economist/euler-project | 008_largest_product_in_series.py | 1 | 2731 | # The four adjacent digits in the 1000-digit number that have the greatest product are 9 x 9 x 8 x 9 = 5832
# 73167176531330624919225119674426574742355349194934
# 96983520312774506326239578318016984801869478851843
# 85861560789112949495459501737958331952853208805511
# 12540698747158523863050715693290963295227443043557
# 66896648950445244523161731856403098711121722383113
# 62229893423380308135336276614282806444486645238749
# 30358907296290491560440772390713810515859307960866
# 70172427121883998797908792274921901699720888093776
# 65727333001053367881220235421809751254540594752243
# 52584907711670556013604839586446706324415722155397
# 53697817977846174064955149290862569321978468622482
# 83972241375657056057490261407972968652414535100474
# 82166370484403199890008895243450658541227588666881
# 16427171479924442928230863465674813919123162824586
# 17866458359124566529476545682848912883142607690042
# 24219022671055626321111109370544217506941658960408
# 07198403850962455444362981230987879927244284909188
# 84580156166097919133875499200524063689912560717606
# 05886116467109405077541002256983155200055935729725
# 71636269561882670428252483600823257530420752963450
# Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
def max_consec_product(numOfDigits, bigAssNum):
bigAssArray = map(int,str(bigAssNum))
maxProduct = 1
for startNum in xrange(0, len(bigAssArray)-numOfDigits):
thisProduct = 1
for i in range(0, numOfDigits): #using range vs xrange to avoid constructing the objects every time
thisProduct *= bigAssArray[startNum+i]
if thisProduct > maxProduct:
maxProduct = thisProduct
return maxProduct
print max_consec_product(13, 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450) | mit |
mastizada/pontoon | pontoon/base/migrations/0049_create_translation_memory_entries.py | 3 | 1418 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_translation_memory_entries(apps, schema):
Translation = apps.get_model('base', 'Translation')
TranslationMemoryEntry = apps.get_model('base', 'TranslationMemoryEntry')
def get_memory_entry(translation):
return TranslationMemoryEntry(
entity_id=translation['entity_id'],
source=translation['entity__string'],
target=translation['string'],
locale_id=translation['locale_id'],
translation_id=translation['pk'],
)
translations = (
Translation.objects.filter(approved=True, fuzzy=False)
.filter(models.Q(plural_form__isnull=True) | models.Q(plural_form=0))
.prefetch_related('entity')
.values('pk', 'entity_id', 'entity__string', 'string', 'locale_id')
)
TranslationMemoryEntry.objects.bulk_create(map(get_memory_entry, translations), 1000)
def remove_translation_memory_entries(apps, schema):
TranslationMemoryEntry = apps.get_model('base', 'TranslationMemoryEntry')
TranslationMemoryEntry.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('base', '0048_translationmemoryentry'),
]
operations = [
migrations.RunPython(create_translation_memory_entries, remove_translation_memory_entries)
]
| bsd-3-clause |
kosgroup/odoo | addons/note/models/note.py | 2 | 5954 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.tools import html2plaintext
class Stage(models.Model):
_name = "note.stage"
_description = "Note Stage"
_order = 'sequence'
name = fields.Char('Stage Name', translate=True, required=True)
sequence = fields.Integer(help="Used to order the note stages", default=1)
user_id = fields.Many2one('res.users', string='Owner', required=True, ondelete='cascade', default=lambda self: self.env.uid, help="Owner of the note stage")
fold = fields.Boolean('Folded by Default')
class Tag(models.Model):
_name = "note.tag"
_description = "Note Tag"
name = fields.Char('Tag Name', required=True)
color = fields.Integer('Color Index')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class Note(models.Model):
_name = 'note.note'
_inherit = ['mail.thread']
_description = "Note"
_order = 'sequence'
def _get_default_stage_id(self):
return self.env['note.stage'].search([('user_id', '=', self.env.uid)], limit=1)
name = fields.Text(compute='_compute_name', string='Note Summary', store=True)
user_id = fields.Many2one('res.users', string='Owner', default=lambda self: self.env.uid)
memo = fields.Html('Note Content')
sequence = fields.Integer('Sequence')
stage_id = fields.Many2one('note.stage', compute='_compute_stage_id',
inverse='_inverse_stage_id', string='Stage')
stage_ids = fields.Many2many('note.stage', 'note_stage_rel', 'note_id', 'stage_id',
string='Stages of Users', default=_get_default_stage_id)
open = fields.Boolean(string='Active', track_visibility='onchange', default=True)
date_done = fields.Date('Date done')
color = fields.Integer(string='Color Index')
tag_ids = fields.Many2many('note.tag', 'note_tags_rel', 'note_id', 'tag_id', string='Tags')
@api.depends('memo')
def _compute_name(self):
""" Read the first line of the memo to determine the note name """
for note in self:
text = html2plaintext(note.memo) if note.memo else ''
note.name = text.strip().replace('*', '').split("\n")[0]
@api.multi
def _compute_stage_id(self):
for note in self:
for stage in note.stage_ids.filtered(lambda stage: stage.user_id == self.env.user):
note.stage_id = stage
@api.multi
def _inverse_stage_id(self):
for note in self.filtered('stage_id'):
note.stage_ids = note.stage_id + note.stage_ids.filtered(lambda stage: stage.user_id != self.env.user)
@api.model
def name_create(self, name):
return self.create({'memo': name}).name_get()[0]
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
if groupby and groupby[0] == "stage_id":
stages = self.env['note.stage'].search([('user_id', '=', self.env.uid)])
if stages: # if the user has some stages
result = [{ # notes by stage for stages user
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('stage_ids.id', '=', stage.id)],
'stage_id': (stage.id, stage.name),
'stage_id_count': self.search_count(domain + [('stage_ids', '=', stage.id)]),
'__fold': stage.fold,
} for stage in stages]
# note without user's stage
nb_notes_ws = self.search_count(domain + [('stage_ids', 'not in', stages.ids)])
if nb_notes_ws:
# add note to the first column if it's the first stage
dom_not_in = ('stage_ids', 'not in', stages.ids)
if result and result[0]['stage_id'][0] == stages[0].id:
dom_in = result[0]['__domain'].pop()
result[0]['__domain'] = domain + ['|', dom_in, dom_not_in]
result[0]['stage_id_count'] += nb_notes_ws
else:
# add the first stage column
result = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [dom_not_in],
'stage_id': (stages[0].id, stages[0].name),
'stage_id_count': nb_notes_ws,
'__fold': stages[0].name,
}] + result
else: # if stage_ids is empty, get note without user's stage
nb_notes_ws = self.search_count(domain)
if nb_notes_ws:
result = [{ # notes for unknown stage
'__context': {'group_by': groupby[1:]},
'__domain': domain,
'stage_id': False,
'stage_id_count': nb_notes_ws
}]
else:
result = []
return result
return super(Note, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)
@api.multi
def _notification_recipients(self, message, groups):
""" All users can create a new note. """
groups = super(Note, self)._notification_recipients(message, groups)
new_action_id = self.env['ir.model.data'].xmlid_to_res_id('note.action_note_note')
new_action = self._notification_link_helper('new', action_id=new_action_id)
groups['user']['actions'] = [{'url': new_action, 'title': _('New Note')}]
return groups
@api.multi
def action_close(self):
return self.write({'open': False, 'date_done': fields.date.today()})
@api.multi
def action_open(self):
return self.write({'open': True})
| gpl-3.0 |
magicrub/MissionPlanner | Lib/distutils/version.py | 59 | 11732 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
| gpl-3.0 |
ukanga/SickRage | sickbeard/search.py | 3 | 30233 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
import os
import re
import threading
import traceback
import sickbeard
from sickbeard import clients, common, db, failed_history, helpers, history, logger, notifiers, nzbSplitter, nzbget, \
sab, show_name_helpers, ui
from sickbeard.common import MULTI_EP_RESULT, Quality, SEASON_RESULT, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import AuthException, ex
from sickrage.providers.GenericProvider import GenericProvider
def _downloadResult(result):
"""
Downloads a result to the appropriate black hole folder.
:param result: SearchResult instance to download.
:return: boolean, True on success
"""
resProvider = result.provider
if resProvider is None:
logger.log("Invalid provider name - this is a coding error, report it please", logger.ERROR)
return False
# nzbs with an URL can just be downloaded from the provider
if result.resultType == "nzb":
newResult = resProvider.download_result(result)
# if it's an nzb data result
elif result.resultType == "nzbdata":
# get the final file path to the nzb
fileName = ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
logger.log("Saving NZB to " + fileName)
newResult = True
# save the data to disk
try:
with ek(open, fileName, 'w') as fileOut:
fileOut.write(result.extraInfo[0])
helpers.chmodAsParent(fileName)
except EnvironmentError as e:
logger.log("Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
newResult = False
elif result.resultType == "torrent":
newResult = resProvider.download_result(result)
else:
logger.log("Invalid provider type - this is a coding error, report it please", logger.ERROR)
newResult = False
return newResult
def snatchEpisode(result, endStatus=SNATCHED): # pylint: disable=too-many-branches, too-many-statements
"""
Contains the internal logic necessary to actually "snatch" a result that
has been found.
:param result: SearchResult instance to be snatched.
:param endStatus: the episode status that should be used for the episode object once it's snatched.
:return: boolean, True on success
"""
if result is None:
return False
result.priority = 0 # -1 = low, 0 = normal, 1 = high
if sickbeard.ALLOW_HIGH_PRIORITY:
# if it aired recently make it high priority
for curEp in result.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
result.priority = 1
endStatus = SNATCHED_PROPER if re.search(r'\b(proper|repack|real)\b', result.name, re.I) else endStatus
if result.url.startswith('magnet') or result.url.endswith('torrent'):
result.resultType = 'torrent'
# NZBs can be sent straight to SAB or saved to disk
if result.resultType in ("nzb", "nzbdata"):
if sickbeard.NZB_METHOD == "blackhole":
dlResult = _downloadResult(result)
elif sickbeard.NZB_METHOD == "sabnzbd":
dlResult = sab.sendNZB(result)
elif sickbeard.NZB_METHOD == "nzbget":
is_proper = True if endStatus == SNATCHED_PROPER else False
dlResult = nzbget.sendNZB(result, is_proper)
elif sickbeard.NZB_METHOD == "download_station":
client = clients.getClientInstance(sickbeard.NZB_METHOD)(
sickbeard.SYNOLOGY_DSM_HOST, sickbeard.SYNOLOGY_DSM_USERNAME, sickbeard.SYNOLOGY_DSM_PASSWORD)
dlResult = client.sendNZB(result)
else:
logger.log("Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
dlResult = False
# Torrents can be sent to clients or saved to disk
elif result.resultType == "torrent":
# torrents are saved to disk when blackhole mode
if sickbeard.TORRENT_METHOD == "blackhole":
dlResult = _downloadResult(result)
else:
if not result.content and not result.url.startswith('magnet'):
if result.provider.login():
result.content = result.provider.get_url(result.url, returns='content')
if result.content or result.url.startswith('magnet'):
client = clients.getClientInstance(sickbeard.TORRENT_METHOD)()
dlResult = client.sendTORRENT(result)
else:
logger.log("Torrent file content is empty", logger.WARNING)
dlResult = False
else:
logger.log("Unknown result type, unable to download it ({0!r})".format(result.resultType), logger.ERROR)
dlResult = False
if not dlResult:
return False
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.logSnatch(result)
ui.notifications.message('Episode snatched', result.name)
history.logSnatch(result)
# don't notify when we re-download an episode
sql_l = []
trakt_data = []
for curEpObj in result.episodes:
with curEpObj.lock:
if isFirstBestMatch(result):
curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
else:
curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
sql_l.append(curEpObj.get_sql())
if curEpObj.status not in Quality.DOWNLOADED:
try:
notifiers.notify_snatch("{0} from {1}".format(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN'), result.provider.name)) # pylint: disable=protected-access
except Exception:
# Without this, when notification fail, it crashes the snatch thread and SR will
# keep snatching until notification is sent
logger.log("Failed to send snatch notification", logger.DEBUG)
trakt_data.append((curEpObj.season, curEpObj.episode))
data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log("Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
if data:
notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return True
def pickBestResult(results, show): # pylint: disable=too-many-branches
"""
Find the best result out of a list of search results for a show
:param results: list of result objects
:param show: Shows we check for
:return: best result object
"""
results = results if isinstance(results, list) else [results]
logger.log("Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
bestResult = None
# find the best result for the current episode
for cur_result in results:
if show and cur_result.show is not show:
continue
# build the black And white list
if show.is_anime:
if not show.release_groups.is_valid(cur_result):
continue
logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
anyQualities, bestQualities = Quality.splitQuality(show.quality)
if cur_result.quality not in anyQualities + bestQualities:
logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
continue
if not show_name_helpers.filter_bad_releases(cur_result.name, parse=False, show=show):
continue
if hasattr(cur_result, 'size'):
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
cur_result.provider.name):
logger.log(cur_result.name + " has previously failed, rejecting it")
continue
if not bestResult:
bestResult = cur_result
elif cur_result.quality in bestQualities and (bestResult.quality < cur_result.quality or bestResult.quality not in bestQualities):
bestResult = cur_result
elif cur_result.quality in anyQualities and bestResult.quality not in bestQualities and bestResult.quality < cur_result.quality:
bestResult = cur_result
elif bestResult.quality == cur_result.quality:
if "proper" in cur_result.name.lower() or "real" in cur_result.name.lower() or "repack" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (repack/proper/real over nuked)")
bestResult = cur_result
elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (normal instead of internal)")
bestResult = cur_result
elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (x264 over xvid)")
bestResult = cur_result
if bestResult:
logger.log("Picked " + bestResult.name + " as the best", logger.DEBUG)
else:
logger.log("No result picked.", logger.DEBUG)
return bestResult
def isFinalResult(result):
"""
Checks if the given result is good enough quality that we can stop searching for other ones.
:param result: quality to check
:return: True if the result is the highest quality in both the any/best quality lists else False
"""
logger.log("Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a re-download that's higher than this then we definitely need to keep looking
if best_qualities and result.quality < max(best_qualities):
return False
# if it does not match the shows black and white list its no good
elif show_obj.is_anime and show_obj.release_groups.is_valid(result):
return False
# if there's no re-download that's higher (above) and this is the highest initial download then we're good
elif any_qualities and result.quality in any_qualities:
return True
elif best_qualities and result.quality == max(best_qualities):
return True
# if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
else:
return False
def isFirstBestMatch(result):
"""
Checks if the given result is a best quality match and if we want to stop searching providers here.
:param result: to check
:return: True if the result is the best quality match else False
"""
logger.log("Checking if we should stop searching for a better quality for for episode " + result.name,
logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities_, best_qualities = Quality.splitQuality(show_obj.quality)
return result.quality in best_qualities if best_qualities else False
def wantedEpisodes(show, fromDate):
"""
Get a list of episodes that we want to download
:param show: Show these episodes are from
:param fromDate: Search from a certain date
:return: list of wanted episodes
"""
wanted = []
if show.paused:
logger.log("Not checking for episodes of {0} because the show is paused".format(show.name), logger.DEBUG)
return wanted
allowed_qualities, preferred_qualities = common.Quality.splitQuality(show.quality)
all_qualities = list(set(allowed_qualities + preferred_qualities))
logger.log("Seeing if we need anything from " + show.name, logger.DEBUG)
con = db.DBConnection()
sql_results = con.select(
"SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
[show.indexerid, fromDate.toordinal()]
)
# check through the list of statuses to see if we want any
for result in sql_results:
cur_status, cur_quality = common.Quality.splitCompositeStatus(int(result[b"status"] or -1))
if cur_status not in {common.WANTED, common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER}:
continue
if cur_status != common.WANTED:
if preferred_qualities:
if cur_quality in preferred_qualities:
continue
elif cur_quality in allowed_qualities:
continue
epObj = show.getEpisode(result[b"season"], result[b"episode"])
epObj.wantedQuality = [i for i in all_qualities if i > cur_quality and i != common.Quality.UNKNOWN]
wanted.append(epObj)
return wanted
def searchForNeededEpisodes():
"""
Check providers for details on wanted episodes
:return: episodes we have a search hit for
"""
foundResults = {}
didSearch = False
show_list = sickbeard.showList
fromDate = datetime.date.fromordinal(1)
episodes = []
for curShow in show_list:
if not curShow.paused:
sickbeard.name_cache.buildNameCache(curShow)
episodes.extend(wantedEpisodes(curShow, fromDate))
if not episodes:
# nothing wanted so early out, ie: avoid whatever abritrarily
# complex thing a provider cache update entails, for example,
# reading rss feeds
logger.log("No episodes needed.", logger.INFO)
return foundResults.values()
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_daily]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curFoundResults = {}
try:
curFoundResults = curProvider.search_rss(episodes)
except AuthException as e:
logger.log("Authentication error: " + ex(e), logger.WARNING)
continue
except Exception as e:
logger.log("Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
# pick a single result for each episode, respecting existing results
for curEp in curFoundResults:
if not curEp.show or curEp.show.paused:
logger.log("Skipping {0} because the show is paused ".format(curEp.prettyName()), logger.DEBUG)
continue
bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
# if all results were rejected move on to the next episode
if not bestResult:
logger.log("All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
continue
# if it's already in the list (from another provider) and the newly found quality is no better then skip it
if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
continue
foundResults[curEp] = bestResult
threading.currentThread().name = origThreadName
if not didSearch:
logger.log(
"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
logger.INFO)
return foundResults.values()
def searchProviders(show, episodes, manualSearch=False, downCurQuality=False): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""
Walk providers for information on shows
:param show: Show we are looking for
:param episodes: Episodes we hope to find
:param manualSearch: Boolean, is this a manual search?
:param downCurQuality: Boolean, should we re-download currently available quality file
:return: results for search
"""
foundResults = {}
finalResults = []
didSearch = False
# build name cache for show
sickbeard.name_cache.buildNameCache(show)
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_backlog]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
threading.currentThread().name = origThreadName
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
if curProvider.anime_only and not show.is_anime:
logger.log("" + str(show.name) + " is not an anime, skipping", logger.DEBUG)
continue
foundResults[curProvider.name] = {}
searchCount = 0
search_mode = curProvider.search_mode
# Always search for episode when manually searching when in sponly
if search_mode == 'sponly' and manualSearch is True:
search_mode = 'eponly'
while True:
searchCount += 1
if search_mode == 'eponly':
logger.log("Performing episode search for " + show.name)
else:
logger.log("Performing season pack search for " + show.name)
try:
searchResults = curProvider.find_search_results(show, episodes, search_mode, manualSearch, downCurQuality)
except AuthException as error:
logger.log("Authentication error: {0!r}".format(error), logger.WARNING)
break
except Exception as error:
logger.log("Exception while searching {0}. Error: {1!r}".format(curProvider.name, error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
break
didSearch = True
if len(searchResults):
# make a list of all the results for this provider
for curEp in searchResults:
if curEp in foundResults[curProvider.name]:
foundResults[curProvider.name][curEp] += searchResults[curEp]
else:
foundResults[curProvider.name][curEp] = searchResults[curEp]
break
elif not curProvider.search_fallback or searchCount == 2:
break
if search_mode == 'sponly':
logger.log("Fallback episode search initiated", logger.DEBUG)
search_mode = 'eponly'
else:
logger.log("Fallback season pack search initiate", logger.DEBUG)
search_mode = 'sponly'
# skip to next provider if we have no results to process
if not foundResults[curProvider.name]:
continue
# pick the best season NZB
bestSeasonResult = None
if SEASON_RESULT in foundResults[curProvider.name]:
bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show)
highest_quality_overall = 0
for cur_episode in foundResults[curProvider.name]:
for cur_result in foundResults[curProvider.name][cur_episode]:
if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
highest_quality_overall = cur_result.quality
logger.log("The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
logger.DEBUG)
# see if every episode is wanted
if bestSeasonResult:
searchedSeasons = {str(x.season) for x in episodes}
# get the quality of the season nzb
seasonQual = bestSeasonResult.quality
logger.log(
"The quality of the season " + bestSeasonResult.provider.provider_type + " is " + Quality.qualityStrings[
seasonQual], logger.DEBUG)
main_db_con = db.DBConnection()
allEps = [int(x[b"episode"])
for x in main_db_con.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
[show.indexerid])]
logger.log(
"Executed query: [SELECT episode FROM tv_episodes WHERE showid = {0} AND season in {1}]".format(show.indexerid, ','.join(searchedSeasons)))
logger.log("Episode list: " + str(allEps), logger.DEBUG)
allWanted = True
anyWanted = False
for curEpNum in allEps:
for season in {x.season for x in episodes}:
if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
allWanted = False
else:
anyWanted = True
# if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
if allWanted and bestSeasonResult.quality == highest_quality_overall:
logger.log(
"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.provider_type + " " + bestSeasonResult.name)
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return [bestSeasonResult]
elif not anyWanted:
logger.log(
"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
logger.DEBUG)
else:
if bestSeasonResult.provider.provider_type == GenericProvider.NZB:
logger.log("Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
# if not, break it apart and add them as the lowest priority results
individualResults = nzbSplitter.split_result(bestSeasonResult)
for curResult in individualResults:
if len(curResult.episodes) == 1:
epNum = curResult.episodes[0].episode
elif len(curResult.episodes) > 1:
epNum = MULTI_EP_RESULT
if epNum in foundResults[curProvider.name]:
foundResults[curProvider.name][epNum].append(curResult)
else:
foundResults[curProvider.name][epNum] = [curResult]
# If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
else:
# Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
logger.log(
"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
if MULTI_EP_RESULT in foundResults[curProvider.name]:
foundResults[curProvider.name][MULTI_EP_RESULT].append(bestSeasonResult)
else:
foundResults[curProvider.name][MULTI_EP_RESULT] = [bestSeasonResult]
# go through multi-ep results and see if we really want them or not, get rid of the rest
multiResults = {}
if MULTI_EP_RESULT in foundResults[curProvider.name]:
for _multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
logger.log("Seeing if we want to bother with multi-episode result " + _multiResult.name, logger.DEBUG)
# Filter result by ignore/required/whitelist/blacklist/quality, etc
multiResult = pickBestResult(_multiResult, show)
if not multiResult:
continue
# see how many of the eps that this result covers aren't covered by single results
neededEps = []
notNeededEps = []
for epObj in multiResult.episodes:
# if we have results for the episode
if epObj.episode in foundResults[curProvider.name] and len(foundResults[curProvider.name][epObj.episode]) > 0:
notNeededEps.append(epObj.episode)
else:
neededEps.append(epObj.episode)
logger.log(
"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
logger.DEBUG)
if not neededEps:
logger.log("All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
continue
# check if these eps are already covered by another multi-result
multiNeededEps = []
multiNotNeededEps = []
for epObj in multiResult.episodes:
if epObj.episode in multiResults:
multiNotNeededEps.append(epObj.episode)
else:
multiNeededEps.append(epObj.episode)
logger.log(
"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
multiNotNeededEps), logger.DEBUG)
if not multiNeededEps:
logger.log(
"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
logger.DEBUG)
continue
# don't bother with the single result if we're going to get it with a multi result
for epObj in multiResult.episodes:
multiResults[epObj.episode] = multiResult
if epObj.episode in foundResults[curProvider.name]:
logger.log(
"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
epObj.episode) + ", removing the single-episode results from the list", logger.DEBUG)
del foundResults[curProvider.name][epObj.episode]
# of all the single ep results narrow it down to the best one for each episode
finalResults += set(multiResults.values())
for curEp in foundResults[curProvider.name]:
if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
continue
if not foundResults[curProvider.name][curEp]:
continue
# if all results were rejected move on to the next episode
bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
if not bestResult:
continue
# add result if its not a duplicate and
found = False
for i, result in enumerate(finalResults):
for bestResultEp in bestResult.episodes:
if bestResultEp in result.episodes:
if result.quality < bestResult.quality:
finalResults.pop(i)
else:
found = True
if not found:
finalResults += [bestResult]
# check that we got all the episodes we wanted first before doing a match and snatch
wantedEpCount = 0
for wantedEp in episodes:
for result in finalResults:
if wantedEp in result.episodes and isFinalResult(result):
wantedEpCount += 1
# make sure we search every provider for results unless we found everything we wanted
if wantedEpCount == len(episodes):
break
if not didSearch:
logger.log("No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
logger.INFO)
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return finalResults
| gpl-3.0 |
ghchinoy/tensorflow | tensorflow/python/tools/freeze_graph_test.py | 4 | 13446 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def testFreezeSavedModel(self):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tag_constants.SERVING
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(8, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
with self.assertRaises(ValueError):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
if __name__ == "__main__":
test.main()
| apache-2.0 |
2014cdbg4/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/types.py | 756 | 3167 | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
| gpl-2.0 |
BeZazz/lamebench | nb_third_party/dns/rdtypes/IN/WKS.py | 248 | 4116 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import socket
import struct
import dns.ipv4
import dns.rdata
_proto_tcp = socket.getprotobyname('tcp')
_proto_udp = socket.getprotobyname('udp')
class WKS(dns.rdata.Rdata):
"""WKS record
@ivar address: the address
@type address: string
@ivar protocol: the protocol
@type protocol: int
@ivar bitmap: the bitmap
@type bitmap: string
@see: RFC 1035"""
__slots__ = ['address', 'protocol', 'bitmap']
def __init__(self, rdclass, rdtype, address, protocol, bitmap):
super(WKS, self).__init__(rdclass, rdtype)
self.address = address
self.protocol = protocol
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
bits = []
for i in xrange(0, len(self.bitmap)):
byte = ord(self.bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(str(i * 8 + j))
text = ' '.join(bits)
return '%s %d %s' % (self.address, self.protocol, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
protocol = tok.get_string()
if protocol.isdigit():
protocol = int(protocol)
else:
protocol = socket.getprotobyname(protocol)
bitmap = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
serv = int(token.value)
else:
if protocol != _proto_udp and protocol != _proto_tcp:
raise NotImplementedError("protocol must be TCP or UDP")
if protocol == _proto_udp:
protocol_text = "udp"
else:
protocol_text = "tcp"
serv = socket.getservbyname(token.value, protocol_text)
i = serv // 8
l = len(bitmap)
if l < i + 1:
for j in xrange(l, i + 1):
bitmap.append('\x00')
bitmap[i] = chr(ord(bitmap[i]) | (0x80 >> (serv % 8)))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, address, protocol, bitmap)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
protocol = struct.pack('!B', self.protocol)
file.write(protocol)
file.write(self.bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + 4])
protocol, = struct.unpack('!B', wire[current + 4 : current + 5])
current += 5
rdlen -= 5
bitmap = wire[current : current + rdlen]
return cls(rdclass, rdtype, address, protocol, bitmap)
from_wire = classmethod(from_wire)
def _cmp(self, other):
sa = dns.ipv4.inet_aton(self.address)
oa = dns.ipv4.inet_aton(other.address)
v = cmp(sa, oa)
if v == 0:
sp = struct.pack('!B', self.protocol)
op = struct.pack('!B', other.protocol)
v = cmp(sp, op)
if v == 0:
v = cmp(self.bitmap, other.bitmap)
return v
| apache-2.0 |
kod3r/neon | neon/backends/tests/test_batched_dot.py | 10 | 3638 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# pylint: skip-file
"""
test batched_dot behaviors between NervanaCPU, and NervanaGPU backend
against numpy.
In NervanaGPU, it supports both N as inside dimension or as outer dimension.
In NervanaCPU, it only supports N as inside dimension, since this is what we use.
"""
import numpy as np
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
from neon.backends.tests.utils import assert_tensors_allclose
size = 32 # size input for GPU - 32, 64, 128, None=auto
def setup_test_data(X, N, C, K, dtype):
dimW = (K, C)
dimI = (X, C, N)
dimO = (X, K, N)
cpuI = np.random.uniform(-1.0, 1.0, dimI).astype(dtype)
cpuE = np.random.uniform(-1.0, 1.0, dimO).astype(dtype)
cpuW = np.random.uniform(-1.0, 1.0, dimW).astype(dtype)
# set_trace()
return cpuI, cpuE, cpuW
def run_batched_dot(lib, I, E, W, X, dtype):
devI = lib.array(I, dtype=dtype)
devE = lib.array(E, dtype=dtype)
devW = lib.array(W, dtype=dtype)
devO = lib.zeros(E.shape, dtype=dtype)
devB = lib.zeros(I.shape, dtype=dtype)
devU = lib.zeros(W.shape, dtype=dtype)
if isinstance(lib, NervanaCPU):
lib.batched_dot(devW, devI, devO) # fprop
lib.batched_dot(devW.T, devE, devB) # bprop
lib.batched_dot(devE, devI.T, devU) # update
elif isinstance(lib, NervanaGPU):
lib.batched_dot(devW, devI, devO, size=size) # fprop
lib.batched_dot(devW.T, devE, devB, size=size) # bprop
lib.batched_dot(devE, devI.T, devU, size=size) # update
else:
# set_trace()
for i in range(X):
devO[i] = np.dot(W, I[i]) # fprop
devB[i] = np.dot(W.T, E[i]) # bprop
devU += np.dot(E[i], I[i].T) # update
return devO, devB, devU
def test_batched_dot():
np.set_printoptions(threshold=8192 * 4, linewidth=600,
formatter={'int': lambda x: "%2d" % x, 'float': lambda x: "%2.0f" % x})
ng = NervanaGPU(stochastic_round=False, bench=1)
nc = NervanaCPU()
dtype = np.float32 # np.float16 or np.float32
X = 100 # Batch Size
N = 32 # Minibatch Size
C = 1536 # Input Features
K = 768 # Output Features
cpuI, cpuE, cpuW = setup_test_data(X, N, C, K, dtype)
ngO, ngB, ngU = run_batched_dot(ng, cpuI, cpuE, cpuW, X, dtype)
ncO, ncB, ncU = run_batched_dot(nc, cpuI, cpuE, cpuW, X, dtype)
npO, npB, npU = run_batched_dot(np, cpuI, cpuE, cpuW, X, dtype)
# set_trace()
assert_tensors_allclose(npO, ngO, rtol=0, atol=1e-3)
assert_tensors_allclose(npB, ngB, rtol=0, atol=1e-3)
assert_tensors_allclose(npU, ngU, rtol=0, atol=1e-3)
assert_tensors_allclose(npO, ncO, rtol=0, atol=1e-3)
assert_tensors_allclose(npB, ncB, rtol=0, atol=1e-3)
assert_tensors_allclose(npU, ncU, rtol=0, atol=1e-3)
ng.ctx.detach()
del(ng)
| apache-2.0 |
cntnboys/410Lab6 | build/django/build/lib.linux-x86_64-2.7/django/db/backends/mysql/validation.py | 65 | 1311 | from django.core import checks
from django.db.backends import BaseDatabaseValidation
class DatabaseValidation(BaseDatabaseValidation):
def check_field(self, field, **kwargs):
"""
MySQL has the following field length restriction:
No character (varchar) fields can have a length exceeding 255
characters if they have a unique index on them.
"""
from django.db import connection
errors = super(DatabaseValidation, self).check_field(field, **kwargs)
# Ignore any related fields.
if getattr(field, 'rel', None) is None:
field_type = field.db_type(connection)
# Ignore any non-concrete fields
if field_type is None:
return errors
if (field_type.startswith('varchar') # Look for CharFields...
and field.unique # ... that are unique
and (field.max_length is None or int(field.max_length) > 255)):
errors.append(
checks.Error(
('MySQL does not allow unique CharFields to have a max_length > 255.'),
hint=None,
obj=field,
id='mysql.E001',
)
)
return errors
| apache-2.0 |
jostep/tensorflow | tensorflow/python/kernel_tests/string_to_number_op_test.py | 104 | 4041 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToNumber op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
_ERROR_MESSAGE = "StringToNumberOp could not correctly convert string: "
class StringToNumberOpTest(test.TestCase):
def _test(self, tf_type, good_pairs, bad_pairs):
with self.test_session():
# Build a small testing graph.
input_string = array_ops.placeholder(dtypes.string)
output = parsing_ops.string_to_number(
input_string, out_type=tf_type)
# Check all the good input/output pairs.
for instr, outnum in good_pairs:
result, = output.eval(feed_dict={input_string: [instr]})
self.assertAllClose([outnum], [result])
# Check that the bad inputs produce the right errors.
for instr, outstr in bad_pairs:
with self.assertRaisesOpError(outstr):
output.eval(feed_dict={input_string: [instr]})
def testToFloat(self):
self._test(dtypes.float32,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", float("INF")),
("-3.40283e+38", float("-INF")),
# Less than min value of float.
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToDouble(self):
self._test(dtypes.float64,
[("0", 0), ("3", 3), ("-1", -1),
("1.12", 1.12), ("0xF", 15), (" -10.5", -10.5),
("3.40282e+38", 3.40282e+38),
# Greater than max value of float.
("3.40283e+38", 3.40283e+38),
# Less than min value of float.
("-3.40283e+38", -3.40283e+38),
("NAN", float("NAN")),
("INF", float("INF"))],
[("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt32(self):
self._test(dtypes.int32,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647)],
[ # Less than min value of int32.
("-2147483649", _ERROR_MESSAGE + "-2147483649"),
# Greater than max value of int32.
("2147483648", _ERROR_MESSAGE + "2147483648"),
("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
def testToInt64(self):
self._test(dtypes.int64,
[("0", 0), ("3", 3), ("-1", -1),
(" -10", -10),
("-2147483648", -2147483648),
("2147483647", 2147483647),
("-2147483649", -2147483649), # Less than min value of int32.
("2147483648", 2147483648)], # Greater than max value of int32.
[("2.9", _ERROR_MESSAGE + "2.9"),
("10foobar", _ERROR_MESSAGE + "10foobar")])
if __name__ == "__main__":
test.main()
| apache-2.0 |
zhwei/cabric | cabric/perm.py | 1 | 2522 | # -*- coding: utf-8 -*-
from fabric.api import *
from cabric.cmd import cmd_expanduser,cmd_su
import os
def put_public_key(path=None, user=None):
"""
Upload pub key from remote server.
Limit: Openssh standard key,must comment with user mail.
:param path:local path
:param user:remote username
:return:
"""
if os.path.exists(os.path.expanduser(path)) is False:
abort("public key not exist")
else:
# 通过解读最后注释来判断key是否存在,如果不存在注释,判断为非法的key
fp = open(os.path.expanduser(path))
pub_key = fp.read()
pos = pub_key.rfind(" ")
mail = pub_key[pos + 1:].strip()
if mail.find('@') == -1:
abort('please add comment WHO YOU ARE.')
if user:
user_path = cmd_expanduser(user)
else:
user_path = '~'
remote_root = '%s/.ssh' % user_path
remote_path = '%s/authorized_keys' % remote_root
with settings(warn_only=True):
if run('test -d %s' % remote_root).failed:
cmd_su('mkdir %s' % remote_root, user)
if user:
run('chown %s.%s %s' % (user, user, remote_root))
put(path, '/tmp/tmp.pub', mode=0644)
cmd_su('grep %s %s | cat /tmp/tmp.pub >> %s' % (mail, remote_path, remote_path),user)
if user:
run('chown %s.%s %s' % (user, user, remote_path))
pass
def put_private_key(path=None, user=None):
"""
Upload private key to remote server
Limit: Must be a standard key generate from ssh-keygen
:param path:local path
:param user:remote username
"""
if os.path.exists(os.path.expanduser(path)) is False:
abort("private key not exist")
else:
# valid key type
fp = open(os.path.expanduser(path))
private_key = fp.read()
pos = private_key.find("\n")
if private_key[0:pos].find('DSA') > -1:
dsa = True
else:
dsa = False
user_path = cmd_expanduser(user)
remote_root = '%s/.ssh' % user_path
if dsa:
remote_path = '%s/id_dsa' % remote_root
else:
remote_path = '%s/id_rsa' % remote_root
with settings(warn_only=True):
if run('test -d %s' % remote_root).failed:
if user:
run('chown -Rf %s.%s %s' % (user, user, user_path))
cmd_su('mkdir %s' % remote_root,user)
put(path, remote_path, mode=0600)
if user:
run('chown %s.%s %s' % (user, user, remote_path))
pass
| mit |
vvv1559/intellij-community | python/lib/Lib/site-packages/django/utils/_os.py | 71 | 2011 | import os
from os.path import join, normcase, normpath, abspath, isabs, sep
from django.utils.encoding import force_unicode
# Define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII
# characters in it. This isn't necessary on Windows since the
# Windows version of abspath handles this correctly. The Windows
# abspath also handles drive letters differently than the pure
# Python implementation, so it's best not to replace it.
if os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
# We need to use normcase to ensure we don't false-negative on case
# insensitive operating systems (like Windows).
base = force_unicode(base)
paths = [force_unicode(p) for p in paths]
final_path = normcase(abspathu(join(base, *paths)))
base_path = normcase(abspathu(base))
base_path_len = len(base_path)
# Ensure final_path starts with base_path and that the next character after
# the final path is os.sep (or nothing, in which case final_path must be
# equal to base_path).
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', sep):
raise ValueError('The joined path (%s) is located outside of the base '
'path component (%s)' % (final_path, base_path))
return final_path
| apache-2.0 |
sparkslabs/kamaelia_ | Sketches/DK/Kamaelia-Paint/App/XYPad.py | 3 | 22735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# * Convert to vectors?
"""
=============
XY Pad Widget
=============
An XY pad widget with a draggable, bouncing puck. Pick up data on the
"outbox" outbox to receive the position of the puck and messages indicating
when it has touched one of the sides.
Example Usage
-------------
Create an XY pad which redraws 60 times per second:
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
clock = Clock(float(1)/60).activate()
xyPad = XYPad().activate()
clock.link((clock, "outbox"), (xyPad, "newframe"))
How Does it Work?
-----------------
The component requests a display surface from the Pygame Display service
component. This is used as the surface of the XY pad. It binds listeners for
mouse click and motion to the service.
The component works in one of two different modes, bouncing and non-bouncing.
This is specified upon initialization by the bouncingPuck argument.
In the bouncing mode the puck will continue to move once it has been set into
motion by a mouse drag. If the mouse button remains down for longer than 0.1
seconds it is deemed to be a drag. In the bouncing mode the component sends a
(message, 1) tuple to the "outbox" outbox each time the puck collides with one
of the sides. The messages can be changed using the collisionMsg argument.
They default to "top", "right", "bottom", "left".
In the non-bouncing mode the puck remains stationary after it has been dragged.
Both modes send a (positionMsg, (x, y)) tuple to the "outbox" outbox if the
puck moves.
If the editable argument to the constructor is set to be false the pad will not
respond to mouse presses.
As well as being controlled by the mouse an XY pad can be controlled externally,
for example by a second XY pad. Position and velocity messages received on the
"remoteChanges" inbox are used to change the motion of the puck. Position
messages are of the form ("Position", (xPos, yPos)), and velocity messages are
of the form ("Velocity", (xVel, yVel)).
In order to allow communication between two XY pads the component outputs
position and velocity messages to the "localChanges" outbox. By connecting the
"localChanges" outbox of one XY pad to the "remoteChanges" inbox of another,
the second pad can duplicate the motion of the first.
The XY pad only redraws the surface and updates the puck position when it
receives a message on its "newframe" inbox. Note that although providing
messages more frequently here will lead to more frequent updates, it will also
lead to higher CPU usage.
The visual appearance of the pad can be specified by arguments to the
constructor. The size, position and colours are all adjustable.
If a producerFinished or shutdownMicroprocess message is received on its
"control" inbox, it is passed on out of its "signal" outbox and the component
terminates.
"""
import time
import pygame
import Axon
from Axon.Ipc import producerFinished, WaitComplete
from Kamaelia.UI.Pygame.Display import PygameDisplay
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
class XYPad(Axon.Component.component):
"""\
XYPad([bouncingPuck, position, bgcolour, fgcolour, positionMsg,
collisionMsg, size]) -> new XYPad component.
Create an XY pad widget using the Pygame Display service. Sends messages
for position and direction changes out of its "outbox" outbox.
Keyword arguments (all optional):
bouncingPuck -- whether the puck will continue to move after it has been
dragged (default=True)
position -- (x,y) position of top left corner in pixels
bgcolour -- (r,g,b) fill colour (default=(255,255,255))
fgcolor -- (r, g, b) colour of the puck and border
messagePrefix -- string to be prepended to all messages
positionMsg -- sent as the first element of a (positionMsg, 1) tuple when
the puck moves
collisionMsg -- (t, r, b, l) sent as the first element of a
(collisionMsg[i], 1) tuple when the puck hits a side
(default = ("top", "right", "bottom", "left"))
size -- (w,h) in pixels (default=(100, 100))
"""
Inboxes = {"inbox" : "Receive events from Pygame Display",
"remoteChanges" : "Receive messages to alter the state of the XY pad",
"control" : "For shutdown messages",
"callback" : "Receive callbacks from Pygame Display",
"newframe" : "Recieve messages indicating a new frame is to be drawn",
"buttons" : "Recieve interrupts from the buttons"
}
Outboxes = {"outbox" : "XY positions emitted here",
"localChanges" : "Messages indicating change in the state of the XY pad emitted here",
"signal" : "For shutdown messages",
"display_signal" : "Outbox used for communicating to the display surface"
}
def __init__(self, bouncingPuck=True, position=None,
bgcolour=(255, 255, 255), fgcolour=(0, 0, 0),
messagePrefix = "",
positionMsg="Position",
colours="RG",
selectedColour = (0,0,0),
saturator=False,
slider = False,
alpha = False,
colourSelector = False,
collisionMsg = ("Top", "Right", "Bottom", "Left"),
size=(100, 100), editable=True):
"""
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
"""
super(XYPad, self).__init__()
self.size = size
# Does the puck bounce around
self.bouncingPuck = bouncingPuck
# Is the puck currently bouncing around
self.isBouncing = False
self.selectedColour = selectedColour
self.mouseDown = False
self.clickTime = None
self.mousePositions = []
self.lastMousePos = (0, 0)
self.colourSelector = colourSelector
self.saturator = saturator
self.puckRadius = 10
self.puckPos = [self.size[0]/2, self.size[1]/2]
self.puckVel = [0, 0]
self.alpha = alpha
self.selectedAlpha = 255
self.slider = slider
self.selectedSize = 3
self.borderWidth = 5
self.bgcolour = bgcolour
self.fgcolour = fgcolour
self.colours = colours
self.messagePrefix = messagePrefix
self.positionMsg = positionMsg
self.collisionMsg = collisionMsg
self.editable = editable
self.dispRequest = {"DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"events" : (self, "inbox"),
"size": self.size,
}
if position:
self.dispRequest["position"] = position
def waitBox(self, boxName):
"""Wait for a message on boxName inbox"""
while 1:
if self.dataReady(boxName):
return
else:
yield 1
def main(self):
"""Main loop."""
# pgd = PygameDisplay( width=300, height=550 ).activate()
# PygameDisplay.setDisplayService(pgd)
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send( self.dispRequest,
"display_signal")
for _ in self.waitBox("callback"): yield 1
self.display = self.recv("callback")
# colour buttons
if self.colourSelector:
rgbutton = Button(caption="Red/Green",position=(10,170), msg = ("Colour", "RG")).activate()
rbbutton = Button(caption="Red/Blue",position=(80,170), msg = ("Colour", "RB")).activate()
gbbutton = Button(caption="Green/Blue",position=(145,170), msg = ("Colour", "GB")).activate()
self.link( (rgbutton,"outbox"), (self,"buttons") )
self.link( (rbbutton,"outbox"), (self,"buttons") )
self.link( (gbbutton,"outbox"), (self,"buttons") )
# tool buttons
circleb = Button(caption="Circle",position=(10,10), msg = (("Tool", "Circle"),)).activate()
eraseb = Button(caption="Eraser",position=(100,10), msg = (("Tool", "Eraser"),)).activate()
lineb = Button(caption="Line",position=(10,50), msg = (("Tool", "Line"),)).activate()
bucketb = Button(caption="Bucket",position=(10,90), msg = (("Tool", "Bucket"),)).activate()
eyeb = Button(caption="Eyedropper",position=(10,130), msg = (("Tool", "Eyedropper"),)).activate()
addlayerb = Button(caption="Add Layer",position=(10,540), msg = (("Layer", "Add"),)).activate()
prevlayerb = Button(caption="<-",position=(80,540), msg = (("Layer", "Prev"),)).activate()
nextlayerb = Button(caption="->",position=(110,540), msg = (("Layer", "Next"),)).activate()
dellayerb = Button(caption="Delete",position=(140,540), msg = (("Layer", "Delete"),)).activate()
self.link( (circleb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (eraseb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (lineb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (bucketb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (eyeb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (addlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (prevlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (nextlayerb,"outbox"), (self,"outbox"), passthrough = 2 )
self.link( (dellayerb,"outbox"), (self,"outbox"), passthrough = 2 )
SizePicker = XYPad(size=(255, 50), bouncingPuck = False, position = (10, 480),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255), slider = True).activate()
self.link( (SizePicker,"outbox"), (self,"outbox"), passthrough = 2 )
AlphaPicker = XYPad(size=(255, 20), bouncingPuck = False, position = (10, 575),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255), slider = True, alpha = True).activate()
self.link( (AlphaPicker,"outbox"), (self,"outbox"), passthrough = 2 )
#clock - don't really need this
FPS = 60
clock = Clock(float(1)/FPS).activate()
self.link((clock, "outbox"), (self, "newframe"))
# Initial render so we don't see a blank screen
self.drawBG()
# self.render()
if self.editable:
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEBUTTONUP,
"surface" : self.display},
"display_signal")
self.send({"ADDLISTENEVENT" : pygame.MOUSEMOTION,
"surface" : self.display},
"display_signal")
done = False
while not done:
if not self.anyReady():
self.pause()
yield 1
while self.dataReady("buttons"):
bmsg = self.recv("buttons")
if bmsg[0]=="Colour":
self.colours = bmsg[1]
self.drawBG()
while self.dataReady("control"):
cmsg = self.recv("control")
if (isinstance(cmsg, producerFinished)):
self.send(cmsg, "signal")
done = True
while self.dataReady("inbox"):
for event in self.recv("inbox"):
if event.type == pygame.MOUSEBUTTONDOWN:
self.clickTime = time.time()
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if self.display.get_rect().collidepoint(*event.pos):
self.mouseDown = True
self.isBouncing = False
self.mousePositions = []
self.puckVel = [0, 0]
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
if event.type == pygame.MOUSEBUTTONUP:
if self.mouseDown:
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if (self.bouncingPuck and
time.time() - self.clickTime > 0.1):
# Click and drag
self.isBouncing = True
if len(self.mousePositions):
for i in xrange(2):
# Use the average of the last 50
# relative mouse positions
positions = [x[i] for x in self.mousePositions]
self.puckVel[i] = sum(positions)
self.puckVel[i] /= float(len(positions))
else:
# Just a click
self.puckVel = [0, 0]
self.render()
self.send((self.messagePrefix + "Velocity",
self.puckVel), "localChanges")
self.mouseDown = False
if event.type == pygame.MOUSEMOTION and self.mouseDown:
if self.slider:
self.sliderPos = event.pos[0]
self.drawBG()
if self.display.get_rect().collidepoint(*event.pos):
# We are dragging inside the display
# Keep a buffer of 50 mouse positions
if len(self.mousePositions) > 50:
del self.mousePositions[0]
relPos = []
for i in xrange(2):
relPos.append(event.pos[i] -
self.lastMousePos[i])
self.mousePositions.append(relPos)
# Move the puck to where the mouse is and remember
# where it is
self.puckPos = list(event.pos)
self.lastMousePos = event.pos
self.send((self.messagePrefix + self.positionMsg,
(float(self.puckPos[0])/self.size[0],
float(self.puckPos[1])/self.size[1])),
"localChanges")
self.render()
if self.dataReady("remoteChanges"):
bundle = self.recv("remoteChanges")
# The action to take is given by the last section of the
# OSC address - this should maybe be done by a component and
# we just listen for ("Velocity", (xVel, yVel)) tuples
action = bundle[0].split("/")[-1]
if action == "Velocity":
if self.bouncingPuck:
self.puckVel = bundle[1]
self.isBouncing = 1
elif action == "Position":
for i in xrange(2):
self.puckPos[i] = self.size[i] * bundle[1][i]
self.render()
if self.dataReady("newframe"):
# Time to render a new frame
# Clear any backlog of render messages
while self.dataReady("newframe"):
self.recv("newframe")
# Change the direction of the puck if it hits a wall
if self.isBouncing:
self.processCollisions()
if self.isBouncing:
# Update the position
for i in xrange(2):
self.puckPos[i] += self.puckVel[i]
self.render()
def processCollisions(self):
"""
Detect whether the puck has collided with a wall, and change its
direction appropriately
"""
if self.puckPos[0] <= 0:
# Left wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[3], 1), "outbox")
if self.puckPos[0] >= self.size[0]:
# Right wall
self.puckVel[0] *= -1
self.send((self.messagePrefix + self.collisionMsg[1], 1), "outbox")
if self.puckPos[1] <= 0:
# Top wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[0], 1), "outbox")
if self.puckPos[1] >= self.size[1]:
# Bottom wall
self.puckVel[1] *= -1
self.send((self.messagePrefix + self.collisionMsg[2], 1), "outbox")
def drawBG(self):
if self.slider:
self.display.fill( (255,255,255) )
pygame.draw.rect(self.display, (0,0,0),
self.display.get_rect(), 2)
elif self.saturator:
for y in range(0, self.size[0], self.size[0]/25):
box = pygame.Rect(self.size[0]/2, y, 10, 10)
pygame.draw.rect(self.display, (self.selectedColour[0],self.selectedColour[1],self.selectedColour[2],y), box, 0)
elif self.colourSelector:
if (self.colours == "RG"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (x,y,0), box, 0)
elif (self.colours == "RB"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (x,0,y), box, 0)
elif (self.colours == "GB"):
for y in range(0, self.size[0], self.size[0]/25):
for x in range(0, self.size[1], self.size[1]/25):
box = pygame.Rect(x, y, 10, 10)
pygame.draw.rect(self.display, (0,x,y), box, 0)
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
def render(self):
"""Draw the border and puck onto the surface"""
# self.display.get_rect(), self.borderWidth)
if self.colourSelector:
if (self.colours == "RG"):
self.selectedColour = (self.puckPos[0], self.puckPos[1], 0)
elif (self.colours == "RB"):
self.selectedColour = (self.puckPos[0], 0, self.puckPos[1])
elif (self.colours == "GB"):
self.selectedColour = (0, self.puckPos[0], self.puckPos[1])
pygame.draw.rect(self.display, self.selectedColour,
self.display.get_rect(), self.borderWidth)
self.send((("colour",self.selectedColour),), "outbox")
if self.slider and not self.alpha:
# print float(self.size[1])/float(self.size[0])*self.sliderPos
self.selectedSize = float(self.size[1])/float(self.size[0])*self.sliderPos
self.send((("Size",self.selectedSize),), "outbox")
box = pygame.Rect(self.sliderPos, 0, 5, self.selectedSize)
pygame.draw.rect(self.display, (0,0,0),
box, 0)
if self.slider and self.alpha:
# print self.sliderPos
self.selectedAlpha = self.sliderPos
self.send((("Alpha",self.selectedAlpha),), "outbox")
box = pygame.Rect(self.sliderPos, 0, 5, 20)
pygame.draw.rect(self.display, (0,0,0),
box, 0)
# Puck
# pygame.draw.circle(self.display, self.fgcolour,
# [int(x) for x in self.puckPos], self.puckRadius)
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
if __name__ == "__main__":
from Kamaelia.Util.Clock import CheapAndCheerfulClock as Clock
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
# xyPad = XYPad().activate()
xyPad2 = XYPad(size=(255, 255), bouncingPuck = False, position = (70, 0),
bgcolour=(0, 0, 0), fgcolour=(255, 255, 255),
positionMsg="p2").activate()
ce = ConsoleEchoer().activate()
# clock.link((clock, "outbox"), (xyPad, "newframe"))
# xyPad.link((xyPad, "outbox"), (ce,"inbox"))
xyPad2.link((xyPad2, "outbox"), (ce,"inbox"))
Axon.Scheduler.scheduler.run.runThreads()
# Licensed to the BBC under a Contributor Agreement: JT/DK
| apache-2.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/list_tests.py | 106 | 17676 | """
Tests common to list and UserList.UserList
"""
import sys
import os
from functools import cmp_to_key
from test import support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
l0 = []
for i in range(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
with open(support.TESTFN, "w") as fo:
fo.write(str(d))
with open(support.TESTFN, "r") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, next, r)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_clear(self):
u = self.type2test([2, 3, 4])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.clear()
self.assertEqual(u, [])
u = self.type2test([])
u.append(1)
u.clear()
u.append(2)
self.assertEqual(u, [2])
self.assertRaises(TypeError, u.clear, None)
def test_copy(self):
u = self.type2test([1, 2, 3])
v = u.copy()
self.assertEqual(v, [1, 2, 3])
u = self.type2test([])
v = u.copy()
self.assertEqual(v, [])
# test that it's indeed a copy and not a reference
u = self.type2test(['a', 'b'])
v = u.copy()
v.append('i')
self.assertEqual(u, ['a', 'b'])
self.assertEqual(v, u + ['i'])
# test that it's a shallow, not a deep copy
u = self.type2test([1, 2, [3, 4], 5])
v = u.copy()
self.assertEqual(u, v)
self.assertIs(v[3], u[3])
self.assertRaises(TypeError, u.copy, None)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=cmp_to_key(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(range(12))
z.sort(key=cmp_to_key(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
if x == y:
return 0
elif x < y:
return -1
else: # x > y
return 1
self.assertRaises(ValueError, z.sort,
key=cmp_to_key(selfmodifyingComparison))
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super().test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
| lgpl-3.0 |
jakesyl/pychess | testing/draw.py | 21 | 1665 | import unittest
from pychess.Savers import pgn
from pychess.Utils.lutils import ldraw
class DrawTestCase(unittest.TestCase):
def setUp(self):
with open('gamefiles/3fold.pgn') as f1:
self.PgnFile1 = pgn.load(f1)
with open('gamefiles/bilbao.pgn') as f2:
self.PgnFile2 = pgn.load(f2)
with open('gamefiles/material.pgn') as f3:
self.PgnFile3 = pgn.load(f3)
def test1(self):
"""Testing the same position, for the third time"""
for i, game in enumerate(self.PgnFile1.games):
model = self.PgnFile1.loadToModel(i)
lboard = model.boards[-2].board
self.assertTrue(lboard.repetitionCount() < 3)
lboard = model.boards[-1].board
self.assertEqual(lboard.repetitionCount(), 3)
def test2(self):
"""Testing the 50 move rule"""
for i, game in enumerate(self.PgnFile2.games):
model = self.PgnFile2.loadToModel(i)
lboard = model.boards[-2].board
self.assertEqual(ldraw.testFifty(lboard), False)
lboard = model.boards[-1].board
self.assertEqual(ldraw.testFifty(lboard), True)
def test3(self):
"""Testing too few material"""
for i, game in enumerate(self.PgnFile3.games):
model = self.PgnFile3.loadToModel(i)
lboard = model.boards[-2].board
self.assertEqual(ldraw.testMaterial(lboard), False)
lboard = model.boards[-1].board
self.assertEqual(ldraw.testMaterial(lboard), True)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
takis/django | django/template/__init__.py | 198 | 2022 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, Template, Variable,
)
# Deprecated in Django 1.8, will be removed in Django 1.10.
from .base import resolve_variable # NOQA isort:skip
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
| bsd-3-clause |
chirilo/kitsune | kitsune/search/tests/test_plugin.py | 15 | 1279 | from django.contrib.sites.models import Site
import mock
from nose.tools import eq_
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
class OpenSearchTestCase(TestCase):
"""Test the SUMO OpenSearch plugin."""
@mock.patch.object(Site.objects, 'get_current')
def test_plugin(self, get_current):
"""The plugin loads with the correct mimetype."""
get_current.return_value.domain = 'testserver'
response = self.client.get(reverse('search.plugin',
locale='en-US'))
eq_(200, response.status_code)
assert 'expires' in response
eq_('application/opensearchdescription+xml', response['content-type'])
@mock.patch.object(Site.objects, 'get_current')
def test_localized_plugin(self, get_current):
"""Every locale gets its own plugin!"""
get_current.return_value.domain = 'testserver'
response = self.client.get(reverse('search.plugin',
locale='en-US'))
assert '/en-US/search' in response.content
response = self.client.get(reverse('search.plugin',
locale='fr'))
assert '/fr/search' in response.content
| bsd-3-clause |
CognetTestbed/COGNET_CODE | LIB_NETLINK/libnl-3-android-nogit/python/netlink/route/links/vlan.py | 13 | 1943 | #
# Copyright (c) 2011 Thomas Graf <[email protected]>
#
"""VLAN network link
"""
from __future__ import absolute_import
from ... import core as netlink
from .. import capi as capi
class VLANLink(object):
def __init__(self, link):
self._link = link
@property
@netlink.nlattr(type=int)
def id(self):
"""vlan identifier"""
return capi.rtnl_link_vlan_get_id(self._link)
@id.setter
def id(self, value):
capi.rtnl_link_vlan_set_id(self._link, int(value))
@property
@netlink.nlattr(type=str)
def flags(self):
""" VLAN flags
Setting this property will *Not* reset flags to value you supply in
Examples:
link.flags = '+xxx' # add xxx flag
link.flags = 'xxx' # exactly the same
link.flags = '-xxx' # remove xxx flag
link.flags = [ '+xxx', '-yyy' ] # list operation
"""
flags = capi.rtnl_link_vlan_get_flags(self._link)
return capi.rtnl_link_vlan_flags2str(flags, 256)[0].split(',')
def _set_flag(self, flag):
if flag.startswith('-'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_unset_flags(self._link, i)
elif flag.startswith('+'):
i = capi.rtnl_link_vlan_str2flags(flag[1:])
capi.rtnl_link_vlan_set_flags(self._link, i)
else:
i = capi.rtnl_link_vlan_str2flags(flag)
capi.rtnl_link_vlan_set_flags(self._link, i)
@flags.setter
def flags(self, value):
if type(value) is list:
for flag in value:
self._set_flag(flag)
else:
self._set_flag(value)
###################################################################
# TODO:
# - ingress map
# - egress map
def brief(self):
return 'vlan-id {0}'.format(self.id)
def init(link):
link.vlan = VLANLink(link._link)
return link.vlan
| gpl-3.0 |
pixelrebel/st2 | st2actions/st2actions/resultstracker/resultstracker.py | 5 | 4285 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import six
from collections import defaultdict
from kombu import Connection
from st2common.query.base import QueryContext
from st2common import log as logging
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.persistence.executionstate import ActionExecutionState
from st2common.transport import actionexecutionstate, consumers, publishers
from st2common.transport import utils as transport_utils
from st2common.util.loader import register_query_module
LOG = logging.getLogger(__name__)
ACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',
routing_key=publishers.CREATE_RK)
class ResultsTracker(consumers.MessageHandler):
message_type = ActionExecutionStateDB
def __init__(self, connection, queues):
super(ResultsTracker, self).__init__(connection, queues)
self._queriers = {}
self._query_threads = []
self._failed_imports = set()
def start(self, wait=False):
self._bootstrap()
super(ResultsTracker, self).start(wait=wait)
def wait(self):
super(ResultsTracker, self).wait()
for thread in self._query_threads:
thread.wait()
def shutdown(self):
super(ResultsTracker, self).shutdown()
LOG.info('Stats from queriers:')
self._print_stats()
def _print_stats(self):
for _, querier in six.iteritems(self._queriers):
if querier:
querier.print_stats()
def _bootstrap(self):
all_states = ActionExecutionState.get_all()
LOG.info('Found %d pending states in db.' % len(all_states))
query_contexts_dict = defaultdict(list)
for state_db in all_states:
try:
context = QueryContext.from_model(state_db)
except:
LOG.exception('Invalid state object: %s', state_db)
continue
query_module_name = state_db.query_module
querier = self.get_querier(query_module_name)
if querier is not None:
query_contexts_dict[querier].append(context)
for querier, contexts in six.iteritems(query_contexts_dict):
LOG.info('Found %d pending actions for query module %s', len(contexts), querier)
querier.add_queries(query_contexts=contexts)
def process(self, query_context):
querier = self.get_querier(query_context.query_module)
context = QueryContext.from_model(query_context)
querier.add_queries(query_contexts=[context])
return
def get_querier(self, query_module_name):
if (query_module_name not in self._queriers and
query_module_name not in self._failed_imports):
try:
query_module = register_query_module(query_module_name)
except:
LOG.exception('Failed importing query module: %s', query_module_name)
self._failed_imports.add(query_module_name)
self._queriers[query_module_name] = None
else:
querier = query_module.get_instance()
self._queriers[query_module_name] = querier
self._query_threads.append(eventlet.spawn(querier.start))
return self._queriers[query_module_name]
def get_tracker():
with Connection(transport_utils.get_messaging_urls()) as conn:
return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])
| apache-2.0 |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/walla.py | 27 | 2976 | # coding: utf-8
from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
xpath_text,
int_or_none,
)
class WallaIE(SubtitlesInfoExtractor):
_VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
_TEST = {
'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
'info_dict': {
'id': '2642630',
'display_id': 'one-direction-all-for-one',
'ext': 'flv',
'title': 'וואן דיירקשן: ההיסטריה',
'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
}
}
_SUBTITLE_LANGS = {
'עברית': 'heb',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
video = self._download_xml(
'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
display_id)
item = video.find('./items/item')
title = xpath_text(item, './title', 'title')
description = xpath_text(item, './synopsis', 'description')
thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
duration = int_or_none(xpath_text(item, './duration', 'duration'))
subtitles = {}
for subtitle in item.findall('./subtitles/subtitle'):
lang = xpath_text(subtitle, './title')
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src')
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
formats = []
for quality in item.findall('./qualities/quality'):
format_id = xpath_text(quality, './title')
fmt = {
'url': 'rtmp://wafla.walla.co.il/vod',
'play_path': xpath_text(quality, './src'),
'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
'page_url': url,
'ext': 'flv',
'format_id': xpath_text(quality, './title'),
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
wrouesnel/ansible | lib/ansible/plugins/callback/full_skip.py | 25 | 2290 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppresses tasks if all hosts skipped
description:
- Use this plugin when you dont care about any output for tasks that were completly skipped
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuation
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
self.outlines = []
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_playbook_on_task_start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []
| gpl-3.0 |
Sergiojimenez/criticas_del_doctor_Mabuse | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| mit |
jithinbp/pslab-desktop-apps | psl_res/GUI/Z_SCHOOL_LEVEL/A_voltage_fundamentals/K_LDR.py | 2 | 3951 | #!/usr/bin/python
"""
::
This experiment is used to study........
"""
from __future__ import print_function
from PSL_Apps.utilitiesClass import utilitiesClass
from PSL_Apps.templates import ui_template_graph_nofft as template_graph_nofft
import numpy as np
from PyQt4 import QtGui,QtCore
import pyqtgraph as pg
import sys,functools,time
params = {
'image' : 'ldr.png',
'name':"Light Dependent\nResistor",
'hint':'''
Observe the workings of a light dependent Resistor.<br>
Use it to study the 50 Hz fluctuation of flourescent lamps.
'''
}
class AppWindow(QtGui.QMainWindow, template_graph_nofft.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
from PSL.analyticsClass import analyticsClass
self.math = analyticsClass()
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot=self.add2DPlot(self.plot_area,enableMenu=False)
self.enableCrossHairs(self.plot)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot.setLabel('left','Resistance', units=u"\u03A9",**labelStyle)
self.plot.setLabel('bottom','Time', units='S',**labelStyle)
self.tg=30.
self.max_samples=1000
self.samples = self.max_samples
self.plot.setLimits(yMax=50e3,yMin=0,xMin=0,xMax=1e-6*self.tg*self.samples)
self.plot.setYRange(1e3,30e3)
self.timer = self.newTimer()
self.legend = self.plot.addLegend(offset=(-10,30))
self.curve1 = self.addCurve(self.plot,'RESISTANCE (SEN)')
self.WidgetLayout.setAlignment(QtCore.Qt.AlignLeft)
#Control widgets
self.sqr = self.dialIcon(TITLE='SQR1',MIN=10,MAX=300,FUNC=self.I.sqr1,UNITS='Hz',TOOLTIP='Frequency of square wave generator #1\n0 for switched off, Max for On state')
self.WidgetLayout.addWidget(self.sqr)
self.voltmeter = self.displayIcon(TITLE = 'Average Resistance',UNITS=u"\u03A9",TOOLTIP='')
self.WidgetLayout.addWidget(self.voltmeter)
self.addPauseButton(self.bottomLayout,self.pause)
self.running=True
self.paused=False
self.timer.singleShot(100,self.run)
def pause(self,v):
self.paused = v
def run(self):
if not self.running: return
if self.paused:
self.timer.singleShot(100,self.run)
return
try:
self.I.capture_traces(1,self.samples,self.tg,'SEN',trigger=False)
if self.running:self.timer.singleShot(self.samples*self.I.timebase*1e-3+10,self.plotData)
except:
pass
def plotData(self):
if not self.running: return
try:
n=0
while(not self.I.oscilloscope_progress()[0]):
time.sleep(0.1)
n+=1
if n>10:
self.timer.singleShot(100,self.run)
return
self.I.__fetch_channel__(1)
V = np.clip(self.I.achans[0].get_yaxis(),0,3.2)
I = (3.3-V)/5.1e3
R = V/I
self.curve1.setData(self.I.achans[0].get_xaxis()*1e-6,R,connect='finite')
self.voltmeter.setValue(self.math.RMS(R))
self.displayCrossHairData(self.plot,False,self.samples,self.I.timebase,[V],[(0,255,0)])
if self.running:self.timer.singleShot(100,self.run)
except Exception as e:
print (e)
def crossHairEvent(self,plot,evt):
pos = evt[0].scenePos() ## using signal proxy turns original arguments into a tuple
if plot.sceneBoundingRect().contains(pos):
plot.mousePoint = plot.getPlotItem().vb.mapSceneToView(pos)
plot.vLine.setPos(plot.mousePoint.x())
plot.hLine.setPos(plot.mousePoint.y())
self.displayCrossHairData(plot,False,self.samples,self.I.timebase,[self.I.achans[0].get_yaxis()],[(0,255,0)])
def saveData(self):
self.saveDataWindow([self.curve1],self.plot)
def closeEvent(self, event):
self.running=False
self.timer.stop()
self.finished=True
def __del__(self):
self.timer.stop()
print('bye')
if __name__ == "__main__":
from PSL import sciencelab
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=sciencelab.connect())
myapp.show()
sys.exit(app.exec_())
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/Crypto/PublicKey/pubkey.py | 125 | 8221 | #
# pubkey.py : Internal functions for public key operations
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling, Paul Swartz, and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
#
__revision__ = "$Id$"
import types, warnings
from Crypto.Util.number import *
# Basic public key class
class pubkey:
"""An abstract class for a public key object.
:undocumented: __getstate__, __setstate__, __eq__, __ne__, validate
"""
def __init__(self):
pass
def __getstate__(self):
"""To keep key objects platform-independent, the key data is
converted to standard Python long integers before being
written out. It will then be reconverted as necessary on
restoration."""
d=self.__dict__
for key in self.keydata:
if d.has_key(key): d[key]=long(d[key])
return d
def __setstate__(self, d):
"""On unpickling a key object, the key data is converted to the big
number representation being used, whether that is Python long
integers, MPZ objects, or whatever."""
for key in self.keydata:
if d.has_key(key): self.__dict__[key]=bignum(d[key])
def encrypt(self, plaintext, K):
"""Encrypt a piece of data.
:Parameter plaintext: The piece of data to encrypt.
:Type plaintext: byte string or long
:Parameter K: A random parameter required by some algorithms
:Type K: byte string or long
:Return: A tuple with two items. Each item is of the same type as the
plaintext (string or long).
"""
wasString=0
if isinstance(plaintext, types.StringType):
plaintext=bytes_to_long(plaintext) ; wasString=1
if isinstance(K, types.StringType):
K=bytes_to_long(K)
ciphertext=self._encrypt(plaintext, K)
if wasString: return tuple(map(long_to_bytes, ciphertext))
else: return ciphertext
def decrypt(self, ciphertext):
"""Decrypt a piece of data.
:Parameter ciphertext: The piece of data to decrypt.
:Type ciphertext: byte string, long or a 2-item tuple as returned by `encrypt`
:Return: A byte string if ciphertext was a byte string or a tuple
of byte strings. A long otherwise.
"""
wasString=0
if not isinstance(ciphertext, types.TupleType):
ciphertext=(ciphertext,)
if isinstance(ciphertext[0], types.StringType):
ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1
plaintext=self._decrypt(ciphertext)
if wasString: return long_to_bytes(plaintext)
else: return plaintext
def sign(self, M, K):
"""Sign a piece of data.
:Parameter M: The piece of data to encrypt.
:Type M: byte string or long
:Parameter K: A random parameter required by some algorithms
:Type K: byte string or long
:Return: A tuple with two items.
"""
if (not self.has_private()):
raise TypeError('Private key not available in this object')
if isinstance(M, types.StringType): M=bytes_to_long(M)
if isinstance(K, types.StringType): K=bytes_to_long(K)
return self._sign(M, K)
def verify (self, M, signature):
"""Verify the validity of a signature.
:Parameter M: The expected message.
:Type M: byte string or long
:Parameter signature: The signature to verify.
:Type signature: tuple with two items, as return by `sign`
:Return: True if the signature is correct, False otherwise.
"""
if isinstance(M, types.StringType): M=bytes_to_long(M)
return self._verify(M, signature)
# alias to compensate for the old validate() name
def validate (self, M, signature):
warnings.warn("validate() method name is obsolete; use verify()",
DeprecationWarning)
def blind(self, M, B):
"""Blind a message to prevent certain side-channel attacks.
:Parameter M: The message to blind.
:Type M: byte string or long
:Parameter B: Blinding factor.
:Type B: byte string or long
:Return: A byte string if M was so. A long otherwise.
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
blindedmessage=self._blind(M, B)
if wasString: return long_to_bytes(blindedmessage)
else: return blindedmessage
def unblind(self, M, B):
"""Unblind a message after cryptographic processing.
:Parameter M: The encoded message to unblind.
:Type M: byte string or long
:Parameter B: Blinding factor.
:Type B: byte string or long
"""
wasString=0
if isinstance(M, types.StringType):
M=bytes_to_long(M) ; wasString=1
if isinstance(B, types.StringType): B=bytes_to_long(B)
unblindedmessage=self._unblind(M, B)
if wasString: return long_to_bytes(unblindedmessage)
else: return unblindedmessage
# The following methods will usually be left alone, except for
# signature-only algorithms. They both return Boolean values
# recording whether this key's algorithm can sign and encrypt.
def can_sign (self):
"""Tell if the algorithm can deal with cryptographic signatures.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required to generate a signature.
:Return: boolean
"""
return 1
def can_encrypt (self):
"""Tell if the algorithm can deal with data encryption.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required to decrypt data.
:Return: boolean
"""
return 1
def can_blind (self):
"""Tell if the algorithm can deal with data blinding.
This property concerns the *algorithm*, not the key itself.
It may happen that this particular key object hasn't got
the private information required carry out blinding.
:Return: boolean
"""
return 0
# The following methods will certainly be overridden by
# subclasses.
def size (self):
"""Tell the maximum number of bits that can be handled by this key.
:Return: int
"""
return 0
def has_private (self):
"""Tell if the key object contains private components.
:Return: bool
"""
return 0
def publickey (self):
"""Construct a new key carrying only the public information.
:Return: A new `pubkey` object.
"""
return self
def __eq__ (self, other):
"""__eq__(other): 0, 1
Compare us to other for equality.
"""
return self.__getstate__() == other.__getstate__()
def __ne__ (self, other):
"""__ne__(other): 0, 1
Compare us to other for inequality.
"""
return not self.__eq__(other)
| gpl-2.0 |
eMerzh/Diamond-1 | src/diamond/handler/g_metric.py | 52 | 2760 | # coding=utf-8
"""
Emulate a gmetric client for usage with
[Ganglia Monitoring System](http://ganglia.sourceforge.net/)
"""
from Handler import Handler
import logging
try:
import gmetric
except ImportError:
gmetric = None
class GmetricHandler(Handler):
"""
Implements the abstract Handler class, sending data the same way that
gmetric does.
"""
def __init__(self, config=None):
"""
Create a new instance of the GmetricHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
if gmetric is None:
logging.error("Failed to load gmetric module")
return
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.port = int(self.config['port'])
self.protocol = self.config['protocol']
if not self.protocol:
self.protocol = 'udp'
# Initialize
self.gmetric = gmetric.Gmetric(self.host, self.port, self.protocol)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GmetricHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'protocol': 'udp or tcp',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GmetricHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8651,
'protocol': 'udp',
})
return config
def __del__(self):
"""
Destroy instance of the GmetricHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to a gmond instance
"""
# Just send the data as a string
self._send(metric)
def _send(self, metric):
"""
Send data to gmond.
"""
metric_name = self.get_name_from_path(metric.path)
tmax = "60"
dmax = "0"
slope = "both"
# FIXME: Badness, shouldn't *assume* double type
metric_type = "double"
units = ""
group = ""
self.gmetric.send(metric_name,
metric.value,
metric_type,
units,
slope,
tmax,
dmax,
group)
def _close(self):
"""
Close the connection
"""
self.gmetric = None
| mit |
groschovskiy/lerigos_music | Server/API/lib/bson/min_key.py | 55 | 1324 | # Copyright 2010-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation for the MongoDB internal MinKey type.
"""
class MinKey(object):
"""MongoDB internal MinKey type.
.. versionchanged:: 2.7
``MinKey`` now implements comparison operators.
"""
_type_marker = 255
def __eq__(self, other):
return isinstance(other, MinKey)
def __hash__(self):
return hash(self._type_marker)
def __ne__(self, other):
return not self == other
def __le__(self, dummy):
return True
def __lt__(self, other):
return not isinstance(other, MinKey)
def __ge__(self, other):
return isinstance(other, MinKey)
def __gt__(self, dummy):
return False
def __repr__(self):
return "MinKey()"
| apache-2.0 |
sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/AcceptanceTests/file_tests.py | 2 | 5671 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import io
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath, sep, pardir
cwd = dirname(realpath(__file__))
root = realpath(join(cwd , pardir, pardir, pardir, pardir, pardir))
sys.path.append(join(root, "ClientRuntimes" , "Python", "msrest"))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyFile"))
from msrest.exceptions import DeserializationError
from autorestswaggerbatfileservice import AutoRestSwaggerBATFileService
from autorestswaggerbatfileservice.models import ErrorException
class FileTests(unittest.TestCase):
def test_files(self):
client = AutoRestSwaggerBATFileService(base_url="http://localhost:3000")
client.config.connection.data_block_size = 1000
def test_callback(data, response, progress=[0]):
self.assertTrue(len(data) > 0)
self.assertIsNotNone(response)
self.assertFalse(response._content_consumed)
total = float(response.headers['Content-Length'])
if total < 4096:
progress[0] += len(data)
print("Downloading... {}%".format(int(progress[0]*100/total)))
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
client.config.connection.data_block_size = 4096
file_length = 0
with io.BytesIO() as file_handle:
stream = client.files.get_empty_file(callback=test_callback)
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
def add_headers(adapter, request, response, *args, **kwargs):
response.headers['Content-Length'] = str(3000 * 1024 * 1024)
file_length = 0
client._client.add_hook('response', add_headers)
stream = client.files.get_file_large(callback=test_callback)
#for data in stream:
# file_length += len(data)
#self.assertEqual(file_length, 3000 * 1024 * 1024)
def test_files_raw(self):
def test_callback(data, response, progress=[0]):
self.assertTrue(len(data) > 0)
self.assertIsNotNone(response)
self.assertFalse(response._content_consumed)
total = float(response.headers.get('Content-Length', 0))
if total:
progress[0] += len(data)
print("Downloading... {}%".format(int(progress[0]*100/total)))
client = AutoRestSwaggerBATFileService(base_url="http://localhost:3000")
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertNotEqual(file_length, 0)
sample_file = realpath(
join(cwd, pardir, pardir, pardir, "NodeJS",
"NodeJS.Tests", "AcceptanceTests", "sample.png"))
with open(sample_file, 'rb') as data:
sample_data = hash(data.read())
self.assertEqual(sample_data, hash(file_handle.getvalue()))
file_length = 0
with io.BytesIO() as file_handle:
response = client.files.get_empty_file(raw=True, callback=test_callback)
stream = response.output
for data in stream:
file_length += len(data)
file_handle.write(data)
self.assertEqual(file_length, 0)
if __name__ == '__main__':
unittest.main() | mit |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/src/class/oc_edit.py | 21 | 3384 | # pylint: skip-file
# flake8: noqa
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.separator = separator
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, edits, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
yed = Yedit(filename=file_name, content=data, separator=self.separator)
# Keep this for compatibility
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if not results['changed']:
return results
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, edits, force=force, sep=self.separator)
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocedit = Edit(params['kind'],
params['namespace'],
params['name'],
kubeconfig=params['kubeconfig'],
separator=params['separator'],
verbose=params['debug'])
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
return {"failed": True, 'msg': api_rval}
########
# Update
########
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
api_rval = ocedit.update(params['file_name'],
params['content'],
params['edits'],
params['force'],
params['file_format'])
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
if 'updated' in api_rval and not api_rval['updated']:
return {"changed": False, 'results': api_rval, 'state': 'present'}
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
return {"failed": True, 'msg': api_rval}
return {"changed": True, 'results': api_rval, 'state': 'present'}
| apache-2.0 |
FederatedAI/FATE | python/fate_test/fate_test/flow_test/flow_process.py | 1 | 17149 | import json
import os
import tarfile
import time
from contextlib import closing
from datetime import datetime
import requests
def get_dict_from_file(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
json_info = json.load(f)
return json_info
class Base(object):
def __init__(self, server_url, component_name):
self.config = None
self.dsl = None
self.guest_party_id = None
self.host_party_id = None
self.job_id = None
self.model_id = None
self.model_version = None
self.server_url = server_url
self.component_name = component_name
def set_config(self, guest_party_id, host_party_id, arbiter_party_id, path, work_mode):
self.config = get_dict_from_file(path)
self.config["initiator"]["party_id"] = guest_party_id[0]
self.config["role"]["guest"] = guest_party_id
self.config["role"]["host"] = host_party_id
if self.config["job_parameters"].get("common"):
self.config["job_parameters"]["common"]["work_mode"] = work_mode
else:
self.config["job_parameters"]["work_mode"] = work_mode
if "arbiter" in self.config["role"]:
self.config["role"]["arbiter"] = arbiter_party_id
self.guest_party_id = guest_party_id
self.host_party_id = host_party_id
return self.config
def set_dsl(self, path):
self.dsl = get_dict_from_file(path)
return self.dsl
def submit(self):
post_data = {'job_runtime_conf': self.config, 'job_dsl': self.dsl}
print(f"start submit job, data:{post_data}")
response = requests.post("/".join([self.server_url, "job", "submit"]), json=post_data)
if response.status_code == 200 and not response.json().get('retcode'):
self.job_id = response.json().get("jobId")
print(f"submit job success: {response.json()}")
self.model_id = response.json().get("data").get("model_info").get("model_id")
self.model_version = response.json().get("data").get("model_info").get("model_version")
return True
else:
print(f"submit job failed: {response.text}")
return False
def query_job(self):
post_data = {'job_id': self.job_id}
response = requests.post("/".join([self.server_url, "job", "query"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
return response.json().get("data")[0].get("f_status")
return False
def wait_success(self, timeout=60 * 10):
for i in range(timeout // 10):
time.sleep(10)
status = self.query_job()
print("job {} status is {}".format(self.job_id, status))
if status and status == "success":
return True
if status and status in ["canceled", "timeout", "failed"]:
return False
return False
def get_component_output_data(self, output_path=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
if not output_path:
output_path = './output/data'
os.makedirs(os.path.dirname(output_path), exist_ok=True)
tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(post_data['job_id'], post_data['component_name'],
post_data['role'], post_data['party_id'])
extract_dir = os.path.join(output_path, tar_file_name.replace('.tar.gz', ''))
print("start get component output dat")
with closing(
requests.get("/".join([self.server_url, "tracking", "component/output/data/download"]), json=post_data,
stream=True)) as response:
if response.status_code == 200:
try:
download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
print(f'get component output path {extract_dir}')
except:
print(f"get component output data failed")
return False
def get_output_data_table(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
response = requests.post("/".join([self.server_url, "tracking", "component/output/data/table"]), json=post_data)
result = {}
try:
if response.status_code == 200:
result["name"] = response.json().get("data")[0].get("table_name")
result["namespace"] = response.json().get("data")[0].get("namespace")
except Exception as e:
raise RuntimeError(f"output data table error: {response}") from e
return result
def get_table_info(self, table_name):
post_data = {
"name": table_name['name'],
"namespace": table_name['namespace']
}
response = requests.post("/".join([self.server_url, "table", "table_info"]), json=post_data)
try:
if response.status_code == 200:
table_count = response.json().get("data").get("count")
else:
raise RuntimeError(f"get table info failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return table_count
def get_auc(self):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
try:
if response.status_code == 200:
auc = response.json().get("data").get("train").get(self.component_name).get("data")[0][1]
else:
raise RuntimeError(f"get metrics failed: {response}")
except Exception as e:
raise RuntimeError(f"get table count error: {response}") from e
return auc
class TrainLRModel(Base):
def get_component_metrics(self, metric_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": "evaluation_0"
}
response = requests.post("/".join([self.server_url, "tracking", "component/metric/all"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = metric_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component metrics success, path is:{os.path.abspath(file)}")
else:
print(f"get component metrics:{response.json()}")
return False
def get_component_output_model(self, model_output_path, file=None):
post_data = {
"job_id": self.job_id,
"role": "guest",
"party_id": self.guest_party_id[0],
"component_name": self.component_name
}
print(f"request component output model: {post_data}")
response = requests.post("/".join([self.server_url, "tracking", "component/output/model"]), json=post_data)
if response.status_code == 200:
if response.json().get("data"):
if not file:
file = model_output_path.format(self.job_id)
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as fp:
json.dump(response.json().get("data"), fp)
print(f"save component output model success, path is:{os.path.abspath(file)}")
else:
print(f"get component output model:{response.json()}")
return False
class PredictLRMode(Base):
def set_predict(self, guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, path, work_mode):
self.set_config(guest_party_id, host_party_id, arbiter_party_id, path, work_mode)
if self.config["job_parameters"].get("common"):
self.config["job_parameters"]["common"]["model_id"] = model_id
self.config["job_parameters"]["common"]["model_version"] = model_version
else:
self.config["job_parameters"]["model_id"] = model_id
self.config["job_parameters"]["model_version"] = model_version
def download_from_request(http_response, tar_file_name, extract_dir):
with open(tar_file_name, 'wb') as fw:
for chunk in http_response.iter_content(1024):
if chunk:
fw.write(chunk)
tar = tarfile.open(tar_file_name, "r:gz")
file_names = tar.getnames()
for file_name in file_names:
tar.extract(file_name, extract_dir)
tar.close()
os.remove(tar_file_name)
def train_job(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, train_dsl_path, server_url, work_mode,
component_name, metric_output_path, model_output_path, constant_auc):
train = TrainLRModel(server_url, component_name)
train.set_config(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, work_mode)
train.set_dsl(train_dsl_path)
status = train.submit()
if status:
is_success = train.wait_success(timeout=600)
if is_success:
train.get_component_metrics(metric_output_path)
train.get_component_output_model(model_output_path)
train.get_component_output_data()
train_auc = train.get_auc()
assert abs(constant_auc - train_auc) <= 1e-4, 'The training result is wrong, auc: {}'.format(train_auc)
train_data_count = train.get_table_info(train.get_output_data_table())
return train, train_data_count
return False
def predict_job(guest_party_id, host_party_id, arbiter_party_id, predict_conf_path, predict_dsl_path, model_id,
model_version, server_url, work_mode, component_name):
predict = PredictLRMode(server_url, component_name)
predict.set_predict(guest_party_id, host_party_id, arbiter_party_id, model_id, model_version, predict_conf_path,
work_mode)
predict.set_dsl(predict_dsl_path)
status = predict.submit()
if status:
is_success = predict.wait_success(timeout=600)
if is_success:
predict.get_component_output_data()
predict_data_count = predict.get_table_info(predict.get_output_data_table())
return predict, predict_data_count
return False
class UtilizeModel:
def __init__(self, model_id, model_version, server_url):
self.model_id = model_id
self.model_version = model_version
self.deployed_model_version = None
self.service_id = None
self.server_url = server_url
def deploy_model(self):
post_data = {
"model_id": self.model_id,
"model_version": self.model_version
}
response = requests.post("/".join([self.server_url, "model", "deploy"]), json=post_data)
print(f'Request data of deploy model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of model deploy request: {json.dumps(resp_data, indent=4)}')
if resp_data.get("retcode", 100) == 0:
self.deployed_model_version = resp_data.get("data", {}).get("model_version")
else:
raise Exception(f"Model {self.model_id} {self.model_version} deploy failed, "
f"details: {resp_data.get('retmsg')}")
else:
raise Exception(f"Request model deploy api failed, status code: {response.status_code}")
def load_model(self):
post_data = {
"job_id": self.deployed_model_version
}
response = requests.post("/".join([self.server_url, "model", "load"]), json=post_data)
print(f'Request data of load model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response of load model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
return True
raise Exception(f"Load model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model load api failed, status code: {response.status_code}")
def bind_model(self):
post_data = {
"job_id": self.deployed_model_version,
"service_id": f"auto_test_{datetime.strftime(datetime.now(), '%Y%m%d%H%M%S')}"
}
response = requests.post("/".join([self.server_url, "model", "bind"]), json=post_data)
print(f'Request data of bind model request: {json.dumps(post_data, indent=4)}')
if response.status_code == 200:
resp_data = response.json()
print(f'Response data of bind model request: {json.dumps(resp_data, indent=4)}')
if not resp_data.get('retcode'):
self.service_id = post_data.get('service_id')
return True
raise Exception(f"Bind model {self.model_id} {self.deployed_model_version} failed, "
f"details: {resp_data.get('retmsg')}")
raise Exception(f"Request model bind api failed, status code: {response.status_code}")
def online_predict(self, online_serving):
serving_url = f"http://{online_serving}/federation/1.0/inference"
post_data = {
"head": {
"serviceId": self.service_id
},
"body": {
"featureData": {
"phone_num": "18576635456",
},
"sendToRemoteFeatureData": {
"device_type": "imei",
"phone_num": "18576635456",
"encrypt_type": "raw"
}
}
}
headers = {"Content-Type": "application/json"}
response = requests.post(serving_url, json=post_data, headers=headers)
print(f"Request data of online predict request: {json.dumps(post_data, indent=4)}")
if response.status_code == 200:
print(f"Online predict successfully, response: {json.dumps(response.json(), indent=4)}")
else:
print(f"Online predict successfully, details: {response.text}")
def run_fate_flow_test(config_json):
guest_party_id = config_json['guest_party_id']
host_party_id = config_json['host_party_id']
arbiter_party_id = config_json['arbiter_party_id']
train_conf_path = config_json['train_conf_path']
train_dsl_path = config_json['train_dsl_path']
server_url = config_json['server_url']
online_serving = config_json['online_serving']
work_mode = config_json['work_mode']
constant_auc = config_json['train_auc']
component_name = config_json['component_name']
metric_output_path = config_json['metric_output_path']
model_output_path = config_json['model_output_path']
print('submit train job')
# train
train, train_count = train_job(guest_party_id, host_party_id, arbiter_party_id, train_conf_path, train_dsl_path,
server_url, work_mode, component_name, metric_output_path, model_output_path, constant_auc)
if not train:
print('train job run failed')
return False
print('train job success')
# deploy
print('start deploy model')
utilize = UtilizeModel(train.model_id, train.model_version, server_url)
utilize.deploy_model()
print('deploy model success')
# predict
predict_conf_path = config_json['predict_conf_path']
predict_dsl_path = config_json['predict_dsl_path']
model_id = train.model_id
model_version = utilize.deployed_model_version
print('start submit predict job')
predict, predict_count = predict_job(guest_party_id, host_party_id, arbiter_party_id, predict_conf_path,
predict_dsl_path, model_id, model_version, server_url, work_mode, component_name)
if not predict:
print('predict job run failed')
return False
if train_count != predict_count:
print('Loss of forecast data')
return False
print('predict job success')
# load model
utilize.load_model()
# bind model
utilize.bind_model()
# online predict
utilize.online_predict(online_serving=online_serving)
| apache-2.0 |
Lambdanaut/crits | crits/campaigns/forms.py | 15 | 2173 | from django import forms
from django.forms.widgets import HiddenInput
from crits.campaigns.campaign import Campaign
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names
class AddCampaignForm(forms.Form):
"""
Django form for adding a new Campaign.
"""
error_css_class = 'error'
required_css_class = 'required'
campaign = forms.CharField(widget=forms.TextInput, required=True)
aliases = forms.CharField(widget=forms.TextInput, required=False)
description = forms.CharField(widget=forms.TextInput, required=False)
def __init__(self, *args, **kwargs):
super(AddCampaignForm, self).__init__(*args, **kwargs)
add_bucketlist_to_form(self)
add_ticket_to_form(self)
class TTPForm(forms.Form):
"""
Django form for adding/editing a Campaign TTP.
"""
error_css_class = 'error'
required_css_class = 'required'
ttp = forms.CharField(
widget=forms.Textarea(attrs={'cols': '35',
'rows': '5'}),
required=True)
class CampaignForm(forms.Form):
"""
Django form for attributing a Campaign to another object.
The list of names comes from :func:`get_item_names`.
Confidence can be one of "low", "medium", or "high".
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.ChoiceField(widget=forms.Select, required=True)
confidence = forms.ChoiceField(widget=forms.Select, required=True)
description = forms.CharField(widget=forms.Textarea(), required=False)
date = forms.CharField(widget=HiddenInput, required=False)
related = forms.BooleanField(
help_text="Apply to all first level related objects.",
initial=False,
required=False)
def __init__(self, *args, **kwargs):
super(CampaignForm, self).__init__(*args, **kwargs)
self.fields['confidence'].choices = [
('low', 'low'),
('medium', 'medium'),
('high', 'high'),
]
self.fields['name'].choices = [
(c.name, c.name) for c in get_item_names(Campaign, True)]
| mit |
monarch-initiative/monarch-app | tests/behave/steps/selenium-forms.py | 3 | 1359 | ####
#### Steps for operating on the various forms and their results.
####
from behave import *
###
### radio button click
###
@given('I click the "{id}" radio button')
def step_impl(context, id):
webelt = context.browser.find_element_by_id(id)
webelt.click()
###
### Submission.
###
## Submit analyze phenotype.
@when('I submit analyze phenotype')
def step_impl(context):
webelt = context.browser.find_element_by_id('analyze-submit')
webelt.click()
## Submit navbar search.
@given('I submit navbar search')
def step_impl(context):
#print(context.browser.title)
webelt = context.browser.find_element_by_id('search_form')
webelt.submit()
###
### Example for input for a possible text area form.
###
@given('I input "{text}" into the textarea "{eid}"')
def step_impl(context, text, eid):
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(text)
@given('I input the following text into the textarea "{eid}"')
def step_impl(context, eid):
input_box_text = context.text
webelt = context.browser.find_element_by_id(eid)
webelt.send_keys(input_box_text)
@when('I submit the form by clicking XPath "{xpath}"')
def step_impl(context, xpath):
## xpath like "/html/body/div[2]/div[4]/div/div/form/div[2]/button"
webelt = context.browser.find_element_by_xpath(xpath)
webelt.click()
| bsd-3-clause |
Distrotech/intellij-community | python/lib/Lib/site-packages/django/conf/locale/no/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
yousrabk/mne-python | mne/viz/tests/test_misc.py | 17 | 4858 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate)
from mne.datasets import testing
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate)
from mne.utils import requires_nibabel, run_tests_if_main, slow_test
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
return io.Raw(raw_fname, preload=True)
def _get_events():
return read_events(event_fname)
def test_plot_cov():
"""Test plotting of covariances
"""
raw = _get_raw()
cov = read_cov(cov_fname)
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours
"""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
def test_plot_events():
"""Test plotting events
"""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@slow_test
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate
"""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes
"""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
run_tests_if_main()
| bsd-3-clause |
350dotorg/Django | django/contrib/gis/db/models/fields.py | 400 | 11157 | from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if not connection.alias in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if not srid in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
# Geodetic units.
geodetic_units = ('Decimal Degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entrie in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection) in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, basestring) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name):
super(GeometryField, self).contribute_to_class(cls, name)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class' : forms.GeometryField,
'null' : self.null,
'geom_type' : self.geom_type,
'srid' : self.srid,
}
defaults.update(kwargs)
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
description = _("Geometry collection")
| bsd-3-clause |
osgcc/ryzom | nel/tools/build_gamedata/processes/clodbank/3_install.py | 3 | 1753 | #!/usr/bin/python
#
# \file 3_install.py
# \brief Install clodbank
# \date 2009-03-10 13:13GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Install clodbank
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Install clodbank")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
printLog(log, ">>> Install clodbank <<<")
srcDir = ExportBuildDirectory + "/" + ClodBankBuildDirectory
mkPath(log, srcDir)
destDir = InstallDirectory + "/" + ShapeInstallDirectory
mkPath(log, destDir)
copyFilesNoTreeIfNeeded(log, srcDir, destDir)
printLog(log, "")
log.close()
# end of file
| agpl-3.0 |
royc1/gpdb | gpMgmt/bin/gppylib/mainUtils.py | 19 | 22742 | # Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
mainUtils.py
------------
This file provides a rudimentary framework to support top-level option
parsing, initialization and cleanup logic common to multiple programs.
It also implements workarounds to make other modules we use like
GpCoverage() work properly.
The primary interface function is 'simple_main'. For an example of
how it is expected to be used, see gprecoverseg.
It is anticipated that the functionality of this file will grow as we
extend common functions of our gp utilities. Please keep this in mind
and try to avoid placing logic for a specific utility here.
"""
import os, sys, signal, errno, yaml
gProgramName = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2, 5, 0):
sys.exit(
'''Error: %s is supported on Python versions 2.5 or greater
Please upgrade python installed on this machine.''' % gProgramName)
from gppylib import gplog
from gppylib.commands import gp, unix
from gppylib.commands.base import ExecutionError
from gppylib.system import configurationInterface, configurationImplGpdb, fileSystemInterface, \
fileSystemImplOs, osInterface, osImplNative, faultProberInterface, faultProberImplGpdb
from optparse import OptionGroup, OptionParser, SUPPRESS_HELP
from gppylib.gpcoverage import GpCoverage
from lockfile.pidlockfile import PIDLockFile, LockTimeout
def getProgramName():
"""
Return the name of the current top-level program from sys.argv[0]
or the programNameOverride option passed to simple_main via mainOptions.
"""
global gProgramName
return gProgramName
class SimpleMainLock:
"""
Tools like gprecoverseg prohibit running multiple instances at the same time
via a simple lock file created in the MASTER_DATA_DIRECTORY. This class takes
care of the work to manage this lock as appropriate based on the mainOptions
specified.
Note that in some cases, the utility may want to recursively invoke
itself (e.g. gprecoverseg -r). To handle this, the caller may specify
the name of an environment variable holding the pid already acquired by
the parent process.
"""
def __init__(self, mainOptions):
self.pidfilename = mainOptions.get('pidfilename', None) # the file we're using for locking
self.parentpidvar = mainOptions.get('parentpidvar', None) # environment variable holding parent pid
self.parentpid = None # parent pid which already has the lock
self.ppath = None # complete path to the lock file
self.pidlockfile = None # PIDLockFile object
self.pidfilepid = None # pid of the process which has the lock
self.locktorelease = None # PIDLockFile object we should release when done
if self.parentpidvar is not None and self.parentpidvar in os.environ:
self.parentpid = int(os.environ[self.parentpidvar])
if self.pidfilename is not None:
self.ppath = os.path.join(gp.get_masterdatadir(), self.pidfilename)
self.pidlockfile = PIDLockFile( self.ppath )
def acquire(self):
"""
Attempts to acquire the lock this process needs to proceed.
Returns None on successful acquisition of the lock or
the pid of the other process which already has the lock.
"""
# nothing to do if utiliity requires no locking
if self.pidlockfile is None:
return None
# look for a lock file
self.pidfilepid = self.pidlockfile.read_pid()
if self.pidfilepid is not None:
# we found a lock file
# allow the process to proceed if the locker was our parent
if self.pidfilepid == self.parentpid:
return None
# cleanup stale locks
try:
os.kill(self.pidfilepid, signal.SIG_DFL)
except OSError, exc:
if exc.errno == errno.ESRCH:
self.pidlockfile.break_lock()
self.pidfilepid = None
# try and acquire the lock
try:
self.pidlockfile.acquire(1)
except LockTimeout:
self.pidfilepid = self.pidlockfile.read_pid()
return self.pidfilepid
# we have the lock
# prepare for a later call to release() and take good
# care of the process environment for the sake of our children
self.locktorelease = self.pidlockfile
self.pidfilepid = self.pidlockfile.read_pid()
if self.parentpidvar is not None:
os.environ[self.parentpidvar] = str(self.pidfilepid)
return None
def release(self):
"""
Releases the lock this process acquired.
"""
if self.locktorelease is not None:
self.locktorelease.release()
self.locktorelease = None
#
# exceptions we handle specially by the simple_main framework.
#
class ProgramArgumentValidationException(Exception):
"""
Throw this out to main to have the message possibly
printed with a help suggestion.
"""
def __init__(self, msg, shouldPrintHelp=False):
"init"
Exception.__init__(self)
self.__shouldPrintHelp = shouldPrintHelp
self.__msg = msg
def shouldPrintHelp(self):
"shouldPrintHelp"
return self.__shouldPrintHelp
def getMessage(self):
"getMessage"
return self.__msg
class ExceptionNoStackTraceNeeded(Exception):
"""
Our code throws this exception when we encounter a condition
we know can arise which demands immediate termination.
"""
pass
class UserAbortedException(Exception):
"""
UserAbortedException should be thrown when a user decides to stop the
program (at a y/n prompt, for example).
"""
pass
def simple_main( createOptionParserFn, createCommandFn, mainOptions=None) :
"""
createOptionParserFn : a function that takes no arguments and returns an OptParser
createCommandFn : a function that takes two argument (the options and the args (those that are not processed into
options) and returns an object that has "run" and "cleanup" functions. Its "run" function must
run and return an exit code. "cleanup" will be called to clean up before the program exits;
this can be used to clean up, for example, to clean up a worker pool
mainOptions can include: forceQuietOutput (map to bool),
programNameOverride (map to string)
suppressStartupLogMessage (map to bool)
useHelperToolLogging (map to bool)
setNonuserOnToolLogger (map to bool, defaults to false)
pidfilename (string)
parentpidvar (string)
"""
coverage = GpCoverage()
coverage.start()
try:
simple_main_internal(createOptionParserFn, createCommandFn, mainOptions)
finally:
coverage.stop()
coverage.generate_report()
def simple_main_internal(createOptionParserFn, createCommandFn, mainOptions):
"""
If caller specifies 'pidfilename' in mainOptions then we manage the
specified pid file within the MASTER_DATA_DIRECTORY before proceeding
to execute the specified program and we clean up the pid file when
we're done.
"""
sml = None
if mainOptions is not None and 'pidfilename' in mainOptions:
sml = SimpleMainLock(mainOptions)
otherpid = sml.acquire()
if otherpid is not None:
logger = gplog.get_default_logger()
logger.error("An instance of %s is already running (pid %s)" % (getProgramName(), otherpid))
return
# at this point we have whatever lock we require
try:
simple_main_locked(createOptionParserFn, createCommandFn, mainOptions)
finally:
if sml is not None:
sml.release()
def simple_main_locked(createOptionParserFn, createCommandFn, mainOptions):
"""
Not to be called externally -- use simple_main instead
"""
logger = gplog.get_default_logger()
configurationInterface.registerConfigurationProvider( configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog())
fileSystemInterface.registerFileSystemProvider( fileSystemImplOs.GpFileSystemProviderUsingOs())
osInterface.registerOsProvider( osImplNative.GpOsProviderUsingNative())
faultProberInterface.registerFaultProber( faultProberImplGpdb.GpFaultProberImplGpdb())
commandObject = None
parser = None
forceQuiet = mainOptions is not None and mainOptions.get("forceQuietOutput")
options = None
if mainOptions is not None and mainOptions.get("programNameOverride"):
global gProgramName
gProgramName = mainOptions.get("programNameOverride")
suppressStartupLogMessage = mainOptions is not None and mainOptions.get("suppressStartupLogMessage")
useHelperToolLogging = mainOptions is not None and mainOptions.get("useHelperToolLogging")
nonuser = True if mainOptions is not None and mainOptions.get("setNonuserOnToolLogger") else False
exit_status = 1
# NOTE: if this logic is changed then also change test_main in testUtils.py
try:
execname = getProgramName()
hostname = unix.getLocalHostname()
username = unix.getUserName()
parser = createOptionParserFn()
(options, args) = parser.parse_args()
if useHelperToolLogging:
gplog.setup_helper_tool_logging(execname, hostname, username)
else:
gplog.setup_tool_logging(execname, hostname, username,
logdir=options.ensure_value("logfileDirectory", None), nonuser=nonuser )
if forceQuiet:
gplog.quiet_stdout_logging()
else:
if options.ensure_value("verbose", False):
gplog.enable_verbose_logging()
if options.ensure_value("quiet", False):
gplog.quiet_stdout_logging()
if options.ensure_value("masterDataDirectory", None) is not None:
options.master_data_directory = os.path.abspath(options.masterDataDirectory)
if not suppressStartupLogMessage:
logger.info("Starting %s with args: %s" % (gProgramName, ' '.join(sys.argv[1:])))
commandObject = createCommandFn(options, args)
exitCode = commandObject.run()
exit_status = exitCode
except ProgramArgumentValidationException, e:
if e.shouldPrintHelp():
parser.print_help()
logger.error("%s: error: %s" %(gProgramName, e.getMessage()))
exit_status = 2
except ExceptionNoStackTraceNeeded, e:
logger.error( "%s error: %s" % (gProgramName, e))
exit_status = 2
except UserAbortedException, e:
logger.info("User abort requested, Exiting...")
exit_status = 4
except ExecutionError, e:
logger.fatal("Error occurred: %s\n Command was: '%s'\n"
"rc=%d, stdout='%s', stderr='%s'" %\
(e.summary,e.cmd.cmdStr, e.cmd.results.rc, e.cmd.results.stdout,
e.cmd.results.stderr ))
exit_status = 2
except Exception, e:
if options is None:
logger.exception("%s failed. exiting...", gProgramName)
else:
if options.ensure_value("verbose", False):
logger.exception("%s failed. exiting...", gProgramName)
else:
logger.fatal("%s failed. (Reason='%s') exiting..." % (gProgramName, e))
exit_status = 2
except KeyboardInterrupt:
exit_status = 2
finally:
if commandObject:
commandObject.cleanup()
sys.exit(exit_status)
def addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption, includeUsageOption=False):
"""
Add the standard options for help and logging
to the specified parser object.
"""
parser.set_usage('%prog [--help] [options] ')
parser.remove_option('-h')
addTo = parser
addTo.add_option('-h', '-?', '--help', action='help',
help='show this help message and exit')
if includeUsageOption:
parser.add_option('--usage', action="briefhelp")
addTo = OptionGroup(parser, "Logging Options")
parser.add_option_group(addTo)
addTo.add_option('-v', '--verbose', action='store_true',
help='debug output.')
addTo.add_option('-q', '--quiet', action='store_true',
help='suppress status messages')
addTo.add_option("-l", None, dest="logfileDirectory", metavar="<directory>", type="string",
help="Logfile directory")
if includeNonInteractiveOption:
addTo.add_option('-a', dest="interactive" , action='store_false', default=True,
help="quiet mode, do not require user input for confirmations")
def addMasterDirectoryOptionForSingleClusterProgram(addTo):
"""
Add the -d master directory option to the specified parser object
which is intended to provide the value of the master data directory.
For programs that operate on multiple clusters at once, this function/option
is not appropriate.
"""
addTo.add_option('-d', '--master_data_directory', type='string',
dest="masterDataDirectory",
metavar="<master data directory>",
help="Optional. The master host data directory. If not specified, the value set"\
"for $MASTER_DATA_DIRECTORY will be used.")
#
# YamlMain
#
def get_yaml(targetclass):
"get_yaml"
# doc - class's doc string
# pos - where YAML starts in doc
# ystr - YAML string extracted from doc
if not hasattr(targetclass, '_yaml') or targetclass._yaml is None:
doc = targetclass.__doc__
pos = doc.find('%YAML')
assert pos >= 0, "targetclass doc string is missing %YAML plan"
ystr = doc[pos:].replace('\n ','\n')
targetclass._yaml = yaml.load(ystr)
return targetclass._yaml
class YamlMain:
"YamlMain"
def __init__(self):
"Parse arguments based on yaml docstring"
self.current = None
self.plan = None
self.scenario_name = None
self.logger = None
self.logfilename = None
self.errmsg = None
self.parser = YamlOptions(self).parser
self.options, self.args = self.parser.parse_args()
self.options.quiet = self.options.q
self.options.verbose = self.options.v
#
# simple_main interface
#
def __call__(self, *args):
"Allows us to use self as the create_parser and create_program functions in call to simple_main"
return self
def parse_args(self):
"Called by simple_main to obtain results from parser returned by create_parser"
return self.options, self.args
def run(self):
"Called by simple_main to execute the program returned by create_program"
self.plan = Plan(self)
self.scenario_name = self.plan.name
self.logger = self.plan.logger
self.logfilename = self.plan.logfilename
self.errmsg = self.plan.errmsg
self.current = []
self.plan.run()
def cleanup(self):
"Called by simple_main to cleanup after program returned by create_program finishes"
pass
def simple(self):
"Delegates setup and control to mainUtils.simple_main"
simple_main(self, self)
#
# option parsing
#
class YamlOptions:
"YamlOptions"
def __init__(self, target):
"""
Scan the class doc string of the given object, looking for the %YAML
containing the option specification. Parse the YAML and setup the
corresponding OptionParser object.
"""
# target - options object (input)
# gname - option group name
self.y = get_yaml(target.__class__)
self.parser = OptionParser( description=self.y['Description'], version='%prog version $Revision$')
self.parser.remove_option('-h')
self.parser.set_usage(self.y['Usage'])
self.opty = self.y['Options']
for gname in self.opty.get('Groups', []):
self._register_group(gname)
def _register_group(self, gname):
"""
Register options for the specified option group name to the OptionParser
using an OptionGroup unless the group name starts with 'Help' in which
case we just register the options with the top level OptionParser object.
"""
# gname - option group name (input)
# gy - option group YAML object
# grp - option group object
# tgt - where to add options (parser or option group)
# optkey - comma separated list of option flags
# optval - help string or dict with detailed option settings
# listargs - list of option flags (e.g. ['-h', '--help'])
# dictargs - key/value arguments to add_option
gy = self.opty.get(gname, None)
if gname.startswith('Help'):
grp = None
tgt = self.parser
else:
grp = OptionGroup(self.parser, gname)
tgt = grp
for optkey, optval in gy.items():
listargs = optkey.split(',')
if type(optval) == type(''):
# short form: optval is just a help string
dictargs = {
'action': 'store_true',
'help': optval
}
else:
# optval is the complete option specification
dictargs = optval
# hide hidden options
if dictargs.get('help','').startswith('hidden'):
dictargs['help'] = SUPPRESS_HELP
#print 'adding', listargs, dictargs
tgt.add_option(*listargs, **dictargs)
if grp is not None:
self.parser.add_option_group(grp)
#
# plan execution
#
class Task:
"Task"
def __init__(self, key, name, subtasks=None):
self.Key = key # task key
self.Name = name # task name
self.SubTasks = subtasks # subtasks, if any
self.Func = None # task function, set by _task
def _print(self, main, prefix):
print '%s %s %s:' % (prefix, self.Key, self.Name)
def _debug(self, main, prefix):
main.logger.debug('Execution Plan:%s %s %s%s' % (prefix, self.Key, self.Name, ':' if self.SubTasks else ''))
def _run(self, main, prefix):
main.logger.debug(' Now Executing:%s %s %s' % (prefix, self.Key, self.Name))
if self.Func:
self.Func()
class Exit(Exception):
def __init__(self, rc, code=None, call_support=False):
Exception.__init__(self)
self.code = code
self.prm = sys._getframe(1).f_locals
self.rc = rc
self.call_support = call_support
class Plan:
"Plan"
def __init__(self, main):
"""
Create cached yaml from class doc string of the given object,
looking for the %YAML indicating the beginning of the object's YAML plan and parse it.
Build the plan stages and tasks for the specified scenario.
"""
# main - object with yaml scenarios (input)
# sy - Stage yaml
self.logger = gplog.get_default_logger()
self.logfilename = gplog.get_logfile()
self.main = main
self.y = get_yaml(main.__class__)
self.name = main.options.scenario
if not self.name:
self.name = self.y['Default Scenario']
self.scenario = self.y['Scenarios'][self.name]
self.errors = self.y['Errors']
self.Tasks = [ self._task(ty) for ty in self.scenario ]
def _task(self, ty):
"Invoked by __init__ to build a top-level task from the YAML"
# ty - Task yaml (input)
# tyk - Task yaml key
# tyv - Task yaml value
# sty - Sub Task yaml
# t - Task (returned)
for tyk, tyv in ty.items():
key, workers = tyk.split(None, 1)
subtasks = [ self._subtask(sty) for sty in tyv ]
t = Task(key, workers, subtasks)
return t
def _subtask(self, sty):
"Invoked by _stage to build a task from the YAML"
# sty - Sub Task yaml (input)
# st - Sub Task (returned)
key, rest = sty.split(None, 1)
st = Task(key, rest)
fn = st.Name.lower().replace(' ','_')
try:
st.Func = getattr(self.main, fn)
except AttributeError, e:
raise Exception("Failed to lookup '%s' for sub task '%s': %s" % (fn, st.Name, str(e)))
return st
def _dotasks(self, subtasks, prefix, action):
"Apply an action to each subtask recursively"
# st - Sub Task
for st in subtasks or []:
self.main.current.append(st)
action(st, self.main, prefix)
self._dotasks(st.SubTasks, ' '+prefix, action)
self.main.current.pop()
def _print(self):
"Print in YAML form."
print '%s:' % self.name
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._print(m,p))
def run(self):
"Run the stages and tasks."
self.logger.debug('Execution Plan: %s' % self.name)
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._debug(m,p))
self.logger.debug(' Now Executing: %s' % self.name)
try:
self._dotasks(self.Tasks, ' -', lambda t,m,p:t._run(m,p))
except Exit, e:
self.exit(e.code, e.prm, e.rc, e.call_support)
def errmsg(self, code, prm={}):
"Return a formatted error message"
return self.errors[code] % prm
def exit(self, code=None, prm={}, rc=1, call_support=False):
"Terminate the application"
if code:
msg = self.errmsg(code, prm)
self.logger.error(msg)
if call_support:
self.logger.error('Please send %s to Greenplum support.' % self.logfilename)
self.logger.debug('exiting with status %(rc)s' % locals())
sys.exit(rc)
| apache-2.0 |
endlessm/chromium-browser | third_party/catapult/third_party/cloudstorage/cloudstorage/api_utils.py | 11 | 12009 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Util functions and classes for cloudstorage_api."""
__all__ = ['set_default_retry_params',
'RetryParams',
]
import copy
import httplib
import logging
import math
import os
import threading
import time
import urllib
try:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
except ImportError:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
_RETRIABLE_EXCEPTIONS = (urlfetch.DownloadError,
urlfetch_errors.InternalTransientError,
apiproxy_errors.Error,
app_identity.InternalError,
app_identity.BackendDeadlineExceeded)
_thread_local_settings = threading.local()
_thread_local_settings.default_retry_params = None
def set_default_retry_params(retry_params):
"""Set a default RetryParams for current thread current request."""
_thread_local_settings.default_retry_params = copy.copy(retry_params)
def _get_default_retry_params():
"""Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams.
"""
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default)
def _quote_filename(filename):
"""Quotes filename to use as a valid URI path.
Args:
filename: user provided filename. /bucket/filename.
Returns:
The filename properly quoted to use as URI's path component.
"""
return urllib.quote(filename)
def _unquote_filename(filename):
"""Unquotes a valid URI path back to its filename.
This is the opposite of _quote_filename.
Args:
filename: a quoted filename. /bucket/some%20filename.
Returns:
The filename unquoted.
"""
return urllib.unquote(filename)
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600))
class _RetryWrapper(object):
"""A wrapper that wraps retry logic around any tasklet."""
def __init__(self,
retry_params,
retriable_exceptions=_RETRIABLE_EXCEPTIONS,
should_retry=lambda r: False):
"""Init.
Args:
retry_params: an RetryParams instance.
retriable_exceptions: a list of exception classes that are retriable.
should_retry: a function that takes a result from the tasklet and returns
a boolean. True if the result should be retried.
"""
self.retry_params = retry_params
self.retriable_exceptions = retriable_exceptions
self.should_retry = should_retry
@ndb.tasklet
def run(self, tasklet, **kwds):
"""Run a tasklet with retry.
The retry should be transparent to the caller: if no results
are successful, the exception or result from the last retry is returned
to the caller.
Args:
tasklet: the tasklet to run.
**kwds: keywords arguments to run the tasklet.
Raises:
The exception from running the tasklet.
Returns:
The result from running the tasklet.
"""
start_time = time.time()
n = 1
while True:
e = None
result = None
got_result = False
try:
result = yield tasklet(**kwds)
got_result = True
if not self.should_retry(result):
raise ndb.Return(result)
except runtime.DeadlineExceededError:
logging.debug(
'Tasklet has exceeded request deadline after %s seconds total',
time.time() - start_time)
raise
except self.retriable_exceptions, e:
pass
if n == 1:
logging.debug('Tasklet is %r', tasklet)
delay = self.retry_params.delay(n, start_time)
if delay <= 0:
logging.debug(
'Tasklet failed after %s attempts and %s seconds in total',
n, time.time() - start_time)
if got_result:
raise ndb.Return(result)
elif e is not None:
raise e
else:
assert False, 'Should never reach here.'
if got_result:
logging.debug(
'Got result %r from tasklet.', result)
else:
logging.debug(
'Got exception "%r" from tasklet.', e)
logging.debug('Retry in %s seconds.', delay)
n += 1
yield tasklets.sleep(delay)
class RetryParams(object):
"""Retry configuration parameters."""
_DEFAULT_USER_AGENT = 'App Engine Python GCS Client'
@datastore_rpc._positional(1)
def __init__(self,
backoff_factor=2.0,
initial_delay=0.1,
max_delay=10.0,
min_retries=3,
max_retries=6,
max_retry_period=30.0,
urlfetch_timeout=None,
save_access_token=False,
_user_agent=None,
memcache_access_token=True):
"""Init.
This object is unique per request per thread.
Library will retry according to this setting when App Engine Server
can't call urlfetch, urlfetch timed out, or urlfetch got a 408 or
500-600 response.
Args:
backoff_factor: exponential backoff multiplier.
initial_delay: seconds to delay for the first retry.
max_delay: max seconds to delay for every retry.
min_retries: min number of times to retry. This value is automatically
capped by max_retries.
max_retries: max number of times to retry. Set this to 0 for no retry.
max_retry_period: max total seconds spent on retry. Retry stops when
this period passed AND min_retries has been attempted.
urlfetch_timeout: timeout for urlfetch in seconds. Could be None,
in which case the value will be chosen by urlfetch module.
save_access_token: persist access token to datastore to avoid
excessive usage of GetAccessToken API. In addition to this, the token
will be cached in process, and may also be cached in memcache (see
memcache_access_token param). However, storing in Datastore can still
be useful in the event that memcache is unavailable.
_user_agent: The user agent string that you want to use in your requests.
memcache_access_token: cache access token in memcache to avoid excessive
usage of GetAccessToken API.
"""
self.backoff_factor = self._check('backoff_factor', backoff_factor)
self.initial_delay = self._check('initial_delay', initial_delay)
self.max_delay = self._check('max_delay', max_delay)
self.max_retry_period = self._check('max_retry_period', max_retry_period)
self.max_retries = self._check('max_retries', max_retries, True, int)
self.min_retries = self._check('min_retries', min_retries, True, int)
if self.min_retries > self.max_retries:
self.min_retries = self.max_retries
self.urlfetch_timeout = None
if urlfetch_timeout is not None:
self.urlfetch_timeout = self._check('urlfetch_timeout', urlfetch_timeout)
self.save_access_token = self._check('save_access_token', save_access_token,
True, bool)
self.memcache_access_token = self._check('memcache_access_token',
memcache_access_token,
True,
bool)
self._user_agent = _user_agent or self._DEFAULT_USER_AGENT
self._request_id = os.getenv('REQUEST_LOG_ID')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val
def belong_to_current_request(self):
return os.getenv('REQUEST_LOG_ID') == self._request_id
def delay(self, n, start_time):
"""Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up.
"""
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay)
def _run_until_rpc():
"""Eagerly evaluate tasklets until it is blocking on some RPC.
Usually ndb eventloop el isn't run until some code calls future.get_result().
When an async tasklet is called, the tasklet wrapper evaluates the tasklet
code into a generator, enqueues a callback _help_tasklet_along onto
the el.current queue, and returns a future.
_help_tasklet_along, when called by the el, will
get one yielded value from the generator. If the value if another future,
set up a callback _on_future_complete to invoke _help_tasklet_along
when the dependent future fulfills. If the value if a RPC, set up a
callback _on_rpc_complete to invoke _help_tasklet_along when the RPC fulfills.
Thus _help_tasklet_along drills down
the chain of futures until some future is blocked by RPC. El runs
all callbacks and constantly check pending RPC status.
"""
el = eventloop.get_event_loop()
while el.current:
el.run0()
def _eager_tasklet(tasklet):
"""Decorator to turn tasklet to run eagerly."""
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper
| bsd-3-clause |
kdani3/searx | searx/engines/bing_images.py | 3 | 2634 | """
Bing (Images)
@website https://www.bing.com/images
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, img_src
@todo currently there are up to 35 images receive per page,
because bing does not parse count=10.
limited response to 10 images
"""
from urllib import urlencode
from lxml import html
from yaml import load
import re
# engine dependent config
categories = ['images']
paging = True
safesearch = True
# search-url
base_url = 'https://www.bing.com/'
search_string = 'images/search?{query}&count=10&first={offset}'
thumb_url = "https://www.bing.com/th?id={ihk}"
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
# required for cookie
if params['language'] == 'all':
language = 'en-US'
else:
language = params['language'].replace('_', '-')
search_path = search_string.format(
query=urlencode({'q': query}),
offset=offset)
params['cookies']['SRCHHPGUSR'] = \
'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0] +\
'&ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
params['url'] = base_url + search_path
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.content)
# init regex for yaml-parsing
p = re.compile('({|,)([a-z]+):(")')
# parse results
for result in dom.xpath('//div[@class="dg_u"]'):
link = result.xpath('./a')[0]
# parse yaml-data (it is required to add a space, to make it parsable)
yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
title = link.attrib.get('t1')
ihk = link.attrib.get('ihk')
# url = 'http://' + link.attrib.get('t3')
url = yaml_data.get('surl')
img_src = yaml_data.get('imgurl')
# append result
results.append({'template': 'images.html',
'url': url,
'title': title,
'content': '',
'thumbnail_src': thumb_url.format(ihk=ihk),
'img_src': img_src})
# TODO stop parsing if 10 images are found
if len(results) >= 10:
break
# return results
return results
| agpl-3.0 |
cjhak/b2share | invenio/modules/communities/signals.py | 14 | 3231 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
User community signals - useful for hooking into the community
creation process.
"""
from blinker import Namespace
_signals = Namespace()
before_save_collection = _signals.signal('before-save-collection')
"""
This signal is sent right before collection is saved.
Sender is the community. Extra data pass is:
* is_new
* provisional
"""
after_save_collection = _signals.signal('after-save-collection')
"""
This signal is sent right after a collection is saved.
Sender is the community. Extra data pass is:
* collection
* provisional
"""
before_save_collections = _signals.signal('before-save-collections')
"""
This signal is sent right before all collections are saved.
Sender is the community.
"""
after_save_collections = _signals.signal('after-save-collections')
"""
This signal is sent right after all collections are saved.
Sender is the community.
"""
before_delete_collection = _signals.signal('before-delete-collection')
"""
This signal is sent right before a collection is deleted.
Sender is the community. Extra data pass is:
* collection
* provisional
"""
after_delete_collection = _signals.signal('after-delete-collection')
"""
This signal is sent right after a collection is deleted.
Sender is the community. Extra data pass is:
* provisional
"""
before_delete_collections = _signals.signal('before-delete-collections')
"""
This signal is sent right before all collections are deleted.
Sender is the community.
"""
after_delete_collections = _signals.signal('after-delete-collections')
"""
This signal is sent right after all collections are deleted.
Sender is the community.
"""
pre_curation = _signals.signal('pre-curation')
"""
This signal is sent right before a record is accepted or rejected.
Sender is the user community. Extra data pass is:
* action: accept or reject
* recid: Record ID
* pretend: True if record changes is actually not persisted
"""
post_curation = _signals.signal('post-curation')
"""
This signal is sent right after a record is accepted or rejected.
Sender is the user community.
* action: accept or reject
* recid: Record ID
* record: Record which was uploaded
* pretend: True if record changes is actually not persisted
Note, the record which was accept/reject is most likely not updated
yet in the database, since bibupload has to run first.
"""
curate_record = _signals.signal('curate-record')
"""
This signal is sent right before curation process removes a record.
"""
| gpl-2.0 |
getstackd/stackd | vendor/boost-context/tools/build/v2/test/static_and_shared_library.py | 44 | 1094 | #!/usr/bin/python
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2003, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "")
t.write("lib/c.cpp", "int bar() { return 0; }\n")
t.write("lib/jamfile.jam", """\
static-lib auxilliary1 : c.cpp ;
lib auxilliary2 : c.cpp ;
""")
def reset():
t.rm("lib/bin")
t.run_build_system(subdir='lib')
t.expect_addition("lib/bin/$toolset/debug/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=shared"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug/" * BoostBuild.List("c.obj "
"auxilliary1.lib auxilliary2.dll"))
t.expect_nothing_more()
reset()
t.run_build_system(["link=static"], subdir="lib")
t.expect_addition("lib/bin/$toolset/debug/link-static/" * BoostBuild.List(
"c.obj auxilliary1.lib auxilliary2.lib"))
t.expect_nothing_more()
t.cleanup()
| mit |
indhub/mxnet | tools/coreml/converter/_mxnet_converter.py | 41 | 8850 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import _layers
import coremltools as _coremltools
import coremltools.models.datatypes as _datatypes
from coremltools.models import neural_network as _neural_network
import json as _json
import mxnet as _mxnet
import numpy as _np
_MXNET_LAYER_REGISTRY = {
'FullyConnected' : _layers.convert_dense,
'Activation' : _layers.convert_activation,
'SoftmaxOutput' : _layers.convert_softmax,
'Convolution' : _layers.convert_convolution,
'Pooling' : _layers.convert_pooling,
'Flatten' : _layers.convert_flatten,
'transpose' : _layers.convert_transpose,
'Concat' : _layers.convert_concat,
'BatchNorm' : _layers.convert_batchnorm,
'elemwise_add' : _layers.convert_elementwise_add,
'Reshape' : _layers.convert_reshape,
'Deconvolution' : _layers.convert_deconvolution,
}
_MXNET_SKIP_LAYERS = [
'_MulScalar',
'Dropout',
]
def _mxnet_remove_batch(input_data):
for blob in input_data:
input_data[blob] = _np.reshape(input_data[blob], input_data[blob].shape[1:])
return input_data
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
"""
Check the difference between predictions from MXNet and CoreML.
"""
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print "First few predictions from CoreML : %s" % e_out[0:10]
print "First few predictions from MXNet : %s" % e_out[0:10]
print "L2 Error on random data %s" % error
return error
def _set_input_output_layers(builder, input_names, output_names):
input_layers_indices = []
output_layers_indices = []
layers = builder.spec.neuralNetwork.layers
for idx, l in enumerate(layers):
if set(input_names).intersection(l.input):
input_layers_indices.append(idx)
if set(output_names).intersection(l.output):
output_layers_indices.append(idx)
builder.input_layers_indices = input_layers_indices
builder.output_layers_indices = output_layers_indices
builder.input_layers_is1d = [False for _ in input_names]
builder.output_layers_is1d = [False for _ in output_names]
def _get_layer_converter_fn(layer):
"""Get the right converter function for MXNet
"""
if layer in _MXNET_LAYER_REGISTRY:
return _MXNET_LAYER_REGISTRY[layer]
else:
raise TypeError("MXNet layer of type %s is not supported." % layer)
def convert(model, input_shape, order = None, class_labels = None, mode = None, preprocessor_args = None):
"""Convert an MXNet model to the protobuf spec.
Parameters
----------
model: MXNet model
A trained MXNet neural network model.
order: Order of inputs
class_labels: A string or list of strings.
As a string it represents the name of the file which contains the classification labels (one per line).
As a list of strings it represents a list of categories that map the index of the output of a neural network to labels in a classifier.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model.
When mode = 'classifier', a NeuralNetworkClassifier spec will be constructed.
When mode = 'regressor', a NeuralNetworkRegressor spec will be constructed.
**kwargs :
Provide keyword arguments for:
- input shapes. Supplied as a dictionary object with keyword "input_shape".
- pre-processing arguments: Supplied as a dictionary object with keyword "preprocessor_args". The parameters in the dictionary
tell the converted coreml model how to pre-process any input before an inference is run on it.
For the list of pre-processing arguments see
http://pythonhosted.org/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
Returns
-------
model: A coreml model.
"""
if not isinstance(input_shape, dict):
raise TypeError("Must provide a dictionary for input shape. e.g input_shape={'data':(3,224,224)}")
def remove_batch(dim):
return dim[1:]
if order is None:
input_names = input_shape.keys()
input_dims = map(remove_batch, input_shape.values())
else:
names = input_shape.keys()
shapes = map(remove_batch, input_shape.values())
input_names = [names[i] for i in order]
input_dims = [shapes[i] for i in order]
net = model.symbol
# Infer shapes and store in a dictionary
shapes = net.infer_shape(**input_shape)
arg_names = net.list_arguments()
output_names = net.list_outputs()
aux_names = net.list_auxiliary_states()
shape_dict = {}
for idx, op in enumerate(arg_names):
shape_dict[op] = shapes[0][idx]
for idx, op in enumerate(output_names):
shape_dict[op] = shapes[1][idx]
for idx, op in enumerate(aux_names):
shape_dict[op] = shapes[2][idx]
# Get the inputs and outputs
output_dims = shapes[1]
input_types = [_datatypes.Array(*dim) for dim in input_dims]
output_types = [_datatypes.Array(*dim) for dim in output_dims]
# Make the builder
input_features = zip(input_names, input_types)
output_features = zip(output_names, output_types)
builder = _neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
# Get out the layers
net = _json.loads(net.tojson())
nodes = net['nodes']
for i, node in enumerate(nodes):
node['id'] = i
if node['name'] in shape_dict:
node['shape'] = shape_dict[node['name']]
node['outputs'] = []
if 'inputs' in node:
for ip in node['inputs']:
nodes[ip[0]]['outputs'].append([i, 0])
else:
node['inputs'] = []
# Mark the head nodes
for head in net['heads']:
head_id = head[0]
head_node = nodes[head_id]
head_node['outputs'] = [head]
head_node['name'] += "_output"
head_node['shape'] = shape_dict[head_node['name']]
# For skipped layers, make sure nodes are modified
for node in nodes:
op = node['op']
inputs = node['inputs']
outputs = node['outputs']
if op in _MXNET_SKIP_LAYERS:
nodes[inputs[0][0]]['outputs'][0] = outputs[0]
nodes[outputs[0][0]]['inputs'][0] = inputs[0]
# Find the input and output names for this node
for idx, node in enumerate(nodes):
op = node['op']
if op == 'null' or op in _MXNET_SKIP_LAYERS:
continue
name = node['name']
print("%d : %s, %s" % (idx, name, op))
converter_func = _get_layer_converter_fn(op)
converter_func(net, node, model, builder)
# Set the right inputs and outputs
_set_input_output_layers(builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
if preprocessor_args is not None:
builder.set_pre_processing_parameters(**preprocessor_args)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError("synset variable of unknown type. Type found: %s. Expected either string or list of strings." % type(class_labels))
builder.set_class_labels(class_labels = labels)
# Return the model
return _coremltools.models.MLModel(builder.spec) | apache-2.0 |
apache/incubator-singa | tool/opencl/clsrc_to_str.py | 3 | 3166 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''Extract Opencl source code into c++ strings, for runtime use.
This file is executed only if .cl files are updated.
It is executed in the ROOT folder of SINGA source repo.
'''
from future.utils import iteritems
distribution = "./src/core/tensor/distribution.cl"
tensormath = "./src/core/tensor/tensor_math_opencl.cl"
im2col = "./src/model/layer/im2col.cl"
pooling = "./src/model/layer/pooling.cl"
files = {"distribution_str": distribution, "tensormath_str": tensormath,
"im2col_str": im2col, "pooling_str": pooling}
if __name__ == "__main__":
fullpath = './src/core/device/opencl_func.h'
with open(fullpath, 'w') as fout:
fout.write("// This file is auto-generated by tool/opencl/clsrc_to_str."
" do not edit manually.\n")
license = """
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
fout.write(license)
fout.write("#ifdef USE_OPENCL\n\n")
fout.write("#include <string>\n\n")
fout.write("namespace singa {\n namespace opencl {\n")
for name, path in iteritems(files):
with open(path, 'r') as fin:
src = fin.read()
src = repr(src)
src = src[1:-1]
src = src.replace('\"', '\\"') # Escape double quotes
src = src.replace('\\t', '') # Strip out tabs
fout.write("const std::string " + name + " = \"")
fout.write(src)
fout.write("\";")
fout.write("\n } // namespace opencl \n} // namespace singa\n\n")
fout.write("#endif")
fout.close()
| apache-2.0 |
AndKe/MAVProxy | MAVProxy/modules/mavproxy_mmap/mmap_server.py | 6 | 1971 | import BaseHTTPServer
import json
import os.path
import thread
import urlparse
DOC_DIR = os.path.join(os.path.dirname(__file__), 'mmap_app')
class Server(BaseHTTPServer.HTTPServer):
def __init__(self, handler, address='', port=9999, module_state=None):
BaseHTTPServer.HTTPServer.__init__(self, (address, port), handler)
self.allow_reuse_address = True
self.module_state = module_state
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
scheme, host, path, params, query, frag = urlparse.urlparse(self.path)
if path == '/data':
state = self.server.module_state
data = {'lat': state.lat,
'lon': state.lon,
'heading': state.heading,
'alt': state.alt,
'airspeed': state.airspeed,
'groundspeed': state.groundspeed}
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(data))
else:
# Remove leading '/'.
path = path[1:]
# Ignore all directories. E.g. for ../../bar/a.txt serve
# DOC_DIR/a.txt.
unused_head, path = os.path.split(path)
# for / serve index.html.
if path == '':
path = 'index.html'
content = None
error = None
try:
import pkg_resources
name = __name__
if name == "__main__":
name = "MAVProxy.modules.mavproxy_mmap.????"
content = pkg_resources.resource_stream(name, "mmap_app/%s" % path).read()
except IOError as e:
error = str(e)
if content:
self.send_response(200)
self.end_headers()
self.wfile.write(content)
else:
self.send_response(404)
self.end_headers()
self.wfile.write('Error: %s' % (error,))
def start_server(address, port, module_state):
server = Server(
Handler, address=address, port=port, module_state=module_state)
thread.start_new_thread(server.serve_forever, ())
return server
| gpl-3.0 |
udacity/deep-learning | language-translation/problem_unittests.py | 1 | 20559 | import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import itertools
import collections
import helper
def _print_success_message():
print('Tests Passed')
def test_text_to_ids(text_to_ids):
test_source_text = 'new jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .'
test_target_text = 'new jersey est parfois calme pendant l\' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .'
test_source_text = test_source_text.lower()
test_target_text = test_target_text.lower()
source_vocab_to_int, source_int_to_vocab = helper.create_lookup_tables(test_source_text)
target_vocab_to_int, target_int_to_vocab = helper.create_lookup_tables(test_target_text)
test_source_id_seq, test_target_id_seq = text_to_ids(test_source_text, test_target_text, source_vocab_to_int, target_vocab_to_int)
assert len(test_source_id_seq) == len(test_source_text.split('\n')),\
'source_id_text has wrong length, it should be {}.'.format(len(test_source_text.split('\n')))
assert len(test_target_id_seq) == len(test_target_text.split('\n')), \
'target_id_text has wrong length, it should be {}.'.format(len(test_target_text.split('\n')))
target_not_iter = [type(x) for x in test_source_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter,\
'Element in source_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
target_not_iter = [type(x) for x in test_target_id_seq if not isinstance(x, collections.Iterable)]
assert not target_not_iter, \
'Element in target_id_text is not iteratable. Found type {}'.format(target_not_iter[0])
source_changed_length = [(words, word_ids)
for words, word_ids in zip(test_source_text.split('\n'), test_source_id_seq)
if len(words.split()) != len(word_ids)]
assert not source_changed_length,\
'Source text changed in size from {} word(s) to {} id(s): {}'.format(
len(source_changed_length[0][0].split()), len(source_changed_length[0][1]), source_changed_length[0][1])
target_missing_end = [word_ids for word_ids in test_target_id_seq if word_ids[-1] != target_vocab_to_int['<EOS>']]
assert not target_missing_end,\
'Missing <EOS> id at the end of {}'.format(target_missing_end[0])
target_bad_size = [(words.split(), word_ids)
for words, word_ids in zip(test_target_text.split('\n'), test_target_id_seq)
if len(word_ids) != len(words.split()) + 1]
assert not target_bad_size,\
'Target text incorrect size. {} should be length {}'.format(
target_bad_size[0][1], len(target_bad_size[0][0]) + 1)
source_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_source_text.split('\n') for word in sentence.split()],
itertools.chain.from_iterable(test_source_id_seq))
if source_vocab_to_int[word] != word_id]
assert not source_bad_id,\
'Source word incorrectly converted from {} to id {}.'.format(source_bad_id[0][0], source_bad_id[0][1])
target_bad_id = [(word, word_id)
for word, word_id in zip(
[word for sentence in test_target_text.split('\n') for word in sentence.split()],
[word_id for word_ids in test_target_id_seq for word_id in word_ids[:-1]])
if target_vocab_to_int[word] != word_id]
assert not target_bad_id,\
'Target word incorrectly converted from {} to id {}.'.format(target_bad_id[0][0], target_bad_id[0][1])
_print_success_message()
def test_model_inputs(model_inputs):
with tf.Graph().as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input is not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets is not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate is not a Placeholder.'
assert keep_prob.op.type == 'Placeholder', \
'Keep Probability is not a Placeholder.'
assert target_sequence_length.op.type == 'Placeholder', \
'Target Sequence Length is not a Placeholder.'
assert max_target_sequence_length.op.type == 'Max', \
'Max Target Sequence Length is not a Max type.'
assert source_sequence_length.op.type == 'Placeholder', \
'Source Sequence Length is not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
assert target_sequence_length.name == 'target_sequence_length:0',\
'Target Sequence Length has bad name. Found name {}'.format(target_sequence_length.name)
assert source_sequence_length.name == 'source_sequence_length:0',\
'Source Sequence Length has bad name. Found name {}'.format(source_sequence_length.name)
assert keep_prob.name == 'keep_prob:0', \
'Keep Probability has bad name. Found name {}'.format(keep_prob.name)
assert tf.assert_rank(input_data, 2, message='Input data has wrong rank')
assert tf.assert_rank(targets, 2, message='Targets has wrong rank')
assert tf.assert_rank(lr, 0, message='Learning Rate has wrong rank')
assert tf.assert_rank(keep_prob, 0, message='Keep Probability has wrong rank')
assert tf.assert_rank(target_sequence_length, 1, message='Target Sequence Length has wrong rank')
assert tf.assert_rank(max_target_sequence_length, 0, message='Max Target Sequence Length has wrong rank')
assert tf.assert_rank(source_sequence_length, 1, message='Source Sequence Lengthhas wrong rank')
_print_success_message()
def test_encoding_layer(encoding_layer):
rnn_size = 512
batch_size = 64
num_layers = 3
source_sequence_len = 22
source_vocab_size = 20
encoding_embedding_size = 30
with tf.Graph().as_default():
rnn_inputs = tf.placeholder(tf.int32, [batch_size,
source_sequence_len])
source_sequence_length = tf.placeholder(tf.int32,
(None,),
name='source_sequence_length')
keep_prob = tf.placeholder(tf.float32)
enc_output, states = encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size)
assert len(states) == num_layers,\
'Found {} state(s). It should be {} states.'.format(len(states), num_layers)
bad_types = [type(state) for state in states if not isinstance(state, tf.contrib.rnn.LSTMStateTuple)]
assert not bad_types,\
'Found wrong type: {}'.format(bad_types[0])
bad_shapes = [state_tensor.get_shape()
for state in states
for state_tensor in state
if state_tensor.get_shape().as_list() not in [[None, rnn_size], [batch_size, rnn_size]]]
assert not bad_shapes,\
'Found wrong shape: {}'.format(bad_shapes[0])
_print_success_message()
def test_decoding_layer(decoding_layer):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
dec_input = tf.placeholder(tf.int32, [batch_size, sequence_length])
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
encoder_state = (state, state, state)
train_decoder_output, infer_logits_output = decoding_layer( dec_input,
encoder_state,
target_sequence_length_p,
max_target_sequence_length,
rnn_size,
num_layers,
target_vocab_to_int,
vocab_size,
batch_size,
keep_prob,
embedding_size)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
def test_seq2seq_model(seq2seq_model):
batch_size = 64
vocab_size = 300
embedding_size = 100
sequence_length = 22
rnn_size = 512
num_layers = 3
target_vocab_to_int = {'<EOS>': 1, '<GO>': 3}
with tf.Graph().as_default():
dec_input = tf.placeholder(tf.int32, [batch_size, sequence_length])
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
dec_embeddings = tf.placeholder(tf.float32, [vocab_size, embedding_size])
keep_prob = tf.placeholder(tf.float32)
enc_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
target_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
keep_prob = tf.placeholder(tf.float32)
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
train_decoder_output, infer_logits_output = seq2seq_model( input_data,
target_data,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length_p,
max_target_sequence_length,
vocab_size,
vocab_size,
embedding_size,
embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
# input_data, target_data, keep_prob, batch_size, sequence_length,
# 200, target_vocab_size, 64, 80, rnn_size, num_layers, target_vocab_to_int)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
def test_sentence_to_seq(sentence_to_seq):
sentence = 'this is a test sentence'
vocab_to_int = {'<PAD>': 0, '<EOS>': 1, '<UNK>': 2, 'this': 3, 'is': 6, 'a': 5, 'sentence': 4}
output = sentence_to_seq(sentence, vocab_to_int)
assert len(output) == 5,\
'Wrong length. Found a length of {}'.format(len(output))
assert output[3] == 2,\
'Missing <UNK> id.'
assert np.array_equal(output, [3, 6, 5, 2, 4]),\
'Incorrect ouput. Found {}'.format(output)
_print_success_message()
def test_process_encoding_input(process_encoding_input):
batch_size = 2
seq_length = 3
target_vocab_to_int = {'<GO>': 3}
with tf.Graph().as_default():
target_data = tf.placeholder(tf.int32, [batch_size, seq_length])
dec_input = process_encoding_input(target_data, target_vocab_to_int, batch_size)
assert dec_input.get_shape() == (batch_size, seq_length),\
'Wrong shape returned. Found {}'.format(dec_input.get_shape())
test_target_data = [[10, 20, 30], [40, 18, 23]]
with tf.Session() as sess:
test_dec_input = sess.run(dec_input, {target_data: test_target_data})
assert test_dec_input[0][0] == target_vocab_to_int['<GO>'] and\
test_dec_input[1][0] == target_vocab_to_int['<GO>'],\
'Missing GO Id.'
_print_success_message()
def test_decoding_layer_train(decoding_layer_train):
batch_size = 64
vocab_size = 1000
embedding_size = 200
sequence_length = 22
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
# dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
keep_prob = tf.placeholder(tf.float32)
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob=keep_prob)
output_layer = Dense(vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
name='output_layer')
# output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
encoder_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
train_decoder_output = decoding_layer_train(encoder_state, dec_cell,
dec_embed_input,
target_sequence_length_p,
max_target_sequence_length,
output_layer,
keep_prob)
# encoder_state, dec_cell, dec_embed_input, sequence_length,
# decoding_scope, output_fn, keep_prob)
assert isinstance(train_decoder_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(train_decoder_output))
assert train_decoder_output.rnn_output.get_shape().as_list() == [batch_size, None, vocab_size], \
'Wrong shape returned. Found {}'.format(train_decoder_output.rnn_output.get_shape())
_print_success_message()
def test_decoding_layer_infer(decoding_layer_infer):
batch_size = 64
vocab_size = 1000
sequence_length = 22
embedding_size = 200
rnn_size = 512
num_layers = 3
with tf.Graph().as_default():
with tf.variable_scope("decoding") as decoding_scope:
dec_embeddings = tf.Variable(tf.random_uniform([vocab_size, embedding_size]))
dec_embed_input = tf.placeholder(tf.float32, [batch_size, sequence_length, embedding_size])
keep_prob = tf.placeholder(tf.float32)
target_sequence_length_p = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length_p, name='max_target_len')
for layer in range(num_layers):
with tf.variable_scope('decoder_{}'.format(layer)):
lstm = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
dec_cell = tf.contrib.rnn.DropoutWrapper(lstm,
input_keep_prob=keep_prob)
output_layer = Dense(vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1),
name='output_layer')
# output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
encoder_state = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(tf.float32, [None, rnn_size]),
tf.placeholder(tf.float32, [None, rnn_size]))
infer_logits_output = decoding_layer_infer( encoder_state,
dec_cell,
dec_embeddings,
1,
2,
max_target_sequence_length,
vocab_size,
output_layer,
batch_size,
keep_prob)
# encoder_state, dec_cell, dec_embeddings, 10, 20,
# sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
assert isinstance(infer_logits_output, tf.contrib.seq2seq.BasicDecoderOutput),\
'Found wrong type: {}'.format(type(infer_logits_output))
assert infer_logits_output.sample_id.get_shape().as_list() == [batch_size, None], \
'Wrong shape returned. Found {}'.format(infer_logits_output.sample_id.get_shape())
_print_success_message()
| mit |
runtimejs/runtime | deps/v8/tools/release/merge_to_branch.py | 13 | 10549 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from collections import OrderedDict
import sys
from common_includes import *
def IsSvnNumber(rev):
return rev.isdigit() and len(rev) < 8
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
if self._options.force:
os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
elif self._options.step == 0: # pragma: no cover
self.Die("A merge is already in progress")
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
if self._options.branch:
self["merge_to_branch"] = self._options.branch
else: # pragma: no cover
self.Die("Please specify a branch to merge to")
self.CommonPrepare()
self.PrepareBranch()
class CreateBranch(Step):
MESSAGE = "Create a fresh branch for the patch."
def RunStep(self):
self.GitCreateBranch(self.Config("BRANCHNAME"),
self.vc.RemoteBranch(self["merge_to_branch"]))
class SearchArchitecturePorts(Step):
MESSAGE = "Search for corresponding architecture ports."
def RunStep(self):
self["full_revision_list"] = list(OrderedDict.fromkeys(
self._options.revisions))
port_revision_list = []
for revision in self["full_revision_list"]:
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="Port %s" % revision,
branch=self.vc.RemoteMasterBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
# Is this revision included in the original revision list?
if git_hash in self["full_revision_list"]:
print("Found port of %s -> %s (already included): %s"
% (revision, git_hash, revision_title))
else:
print("Found port of %s -> %s: %s"
% (revision, git_hash, revision_title))
port_revision_list.append(git_hash)
# Do we find any port?
if len(port_revision_list) > 0:
if self.Confirm("Automatically add corresponding ports (%s)?"
% ", ".join(port_revision_list)):
#: 'y': Add ports to revision list.
self["full_revision_list"].extend(port_revision_list)
class CreateCommitMessage(Step):
MESSAGE = "Create commit message."
def RunStep(self):
# Stringify: ["abcde", "12345"] -> "abcde, 12345"
self["revision_list"] = ", ".join(self["full_revision_list"])
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
action_text = "Merged %s"
# The commit message title is added below after the version is specified.
msg_pieces = [
"\n".join(action_text % s for s in self["full_revision_list"]),
]
msg_pieces.append("\n\n")
for commit_hash in self["full_revision_list"]:
patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
msg_pieces.append("%s\n\n" % patch_merge_desc)
bugs = []
for commit_hash in self["full_revision_list"]:
msg = self.GitLog(n=1, git_hash=commit_hash)
for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
bugs.extend(s.strip() for s in bug.split(","))
bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs)))
if bug_aggregate:
msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate)
self["new_commit_msg"] = "".join(msg_pieces)
class ApplyPatches(Step):
MESSAGE = "Apply patches for selected revisions."
def RunStep(self):
for commit_hash in self["full_revision_list"]:
print("Applying patch for %s to %s..."
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
if self._options.patch:
self.ApplyPatch(self._options.patch)
class PrepareVersion(Step):
MESSAGE = "Prepare version file."
def RunStep(self):
# This is used to calculate the patch level increment.
self.ReadAndPersistVersion()
class IncrementVersion(Step):
MESSAGE = "Increment version number."
def RunStep(self):
new_patch = str(int(self["patch"]) + 1)
if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
"EDITOR.)" % VERSION_FILE):
text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_patch,
text)
TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
else:
self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
self.ReadAndPersistVersion("new_")
self["version"] = "%s.%s.%s.%s" % (self["new_major"],
self["new_minor"],
self["new_build"],
self["new_patch"])
class CommitLocal(Step):
MESSAGE = "Commit to local branch."
def RunStep(self):
# Add a commit message title.
self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
def RunStep(self):
self.GitCheckout(self.Config("BRANCHNAME"))
self.WaitForLGTM()
self.GitPresubmit()
self.vc.CLLand()
class TagRevision(Step):
MESSAGE = "Create the tag."
def RunStep(self):
print "Creating tag %s" % self["version"]
self.vc.Tag(self["version"],
self.vc.RemoteBranch(self["merge_to_branch"]),
self["commit_title"])
class CleanUp(Step):
MESSAGE = "Cleanup."
def RunStep(self):
self.CommonCleanup()
print "*** SUMMARY ***"
print "version: %s" % self["version"]
print "branch: %s" % self["merge_to_branch"]
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
"master to other branches, including candidates.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
parser.add_argument("-f", "--force",
help="Delete sentinel file.",
default=False, action="store_true")
parser.add_argument("-m", "--message",
help="A commit message for the patch.")
parser.add_argument("-p", "--patch",
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
print "Either a patch file or revision numbers must be specified"
return False
if not options.message:
print "You must specify a merge comment if no patches are specified"
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
options.cc = "[email protected]"
# Make sure to use git hashes in the new workflows.
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
print "Please provide full git hashes of the patches to merge."
print "Got: %s" % revision
return False
return True
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
"PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/v8-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
"COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
return [
Preparation,
CreateBranch,
SearchArchitecturePorts,
CreateCommitMessage,
ApplyPatches,
PrepareVersion,
IncrementVersion,
CommitLocal,
UploadStep,
CommitRepository,
TagRevision,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(MergeToBranch().Run())
| apache-2.0 |
molobrakos/home-assistant | homeassistant/components/websocket_api/http.py | 5 | 6900 | """View to accept incoming websocket connection."""
import asyncio
from contextlib import suppress
from functools import partial
import json
import logging
from aiohttp import web, WSMsgType
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.helpers.json import JSONEncoder
from .const import (
MAX_PENDING_MSG, CANCELLATION_ERRORS, URL, ERR_UNKNOWN_ERROR,
SIGNAL_WEBSOCKET_CONNECTED, SIGNAL_WEBSOCKET_DISCONNECTED,
DATA_CONNECTIONS)
from .auth import AuthPhase, auth_required_message
from .error import Disconnect
from .messages import error_message
JSON_DUMP = partial(json.dumps, cls=JSONEncoder, allow_nan=False)
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request):
"""Handle an incoming websocket connection."""
return await WebSocketHandler(
request.app['hass'], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock = None
self._to_write = asyncio.Queue(maxsize=MAX_PENDING_MSG, loop=hass.loop)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger(
"{}.connection.{}".format(__name__, id(self)))
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, ConnectionResetError,
*CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
try:
await self.wsock.send_json(message, dumps=JSON_DUMP)
except (ValueError, TypeError) as err:
self._logger.error('Unable to serialize to JSON: %s\n%s',
err, message)
await self.wsock.send_json(error_message(
message['id'], ERR_UNKNOWN_ERROR,
'Invalid JSON in response'))
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error("Client exceeded max pending messages [2]: %s",
MAX_PENDING_MSG)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self):
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected")
# Py3.7+
if hasattr(asyncio, 'current_task'):
# pylint: disable=no-member
self._handle_task = asyncio.current_task()
else:
self._handle_task = asyncio.Task.current_task(loop=self.hass.loop)
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop)
self._writer_task = self.hass.async_create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError:
disconnect_warn = \
'Did not receive auth message within 10 seconds'
raise Disconnect
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
raise Disconnect
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
raise Disconnect
self._logger.debug("Received %s", msg)
connection = await auth.async_handle(msg)
self.hass.data[DATA_CONNECTIONS] = \
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
elif msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
break
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
break
self._logger.debug("Received %s", msg)
connection.async_handle(msg)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED)
return wsock
| apache-2.0 |
Renzo-Olivares/android_kk_kernel_htc_monarudo | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
CopeX/odoo | addons/account/report/account_analytic_entries_report.py | 306 | 3879 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class analytic_entries_report(osv.osv):
_name = "analytic.entries.report"
_description = "Analytic Entries Statistics"
_auto = False
_columns = {
'date': fields.date('Date', readonly=True),
'user_id': fields.many2one('res.users', 'User',readonly=True),
'name': fields.char('Description', size=64, readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'account_id': fields.many2one('account.analytic.account', 'Account', required=False),
'general_account_id': fields.many2one('account.account', 'General Account', required=True),
'journal_id': fields.many2one('account.analytic.journal', 'Journal', required=True),
'move_id': fields.many2one('account.move.line', 'Move', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'amount': fields.float('Amount', readonly=True),
'unit_amount': fields.integer('Unit Amount', readonly=True),
'nbr': fields.integer('# Entries', readonly=True), # TDE FIXME master: rename into nbr_entries
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'analytic_entries_report')
cr.execute("""
create or replace view analytic_entries_report as (
select
min(a.id) as id,
count(distinct a.id) as nbr,
a.date as date,
a.user_id as user_id,
a.name as name,
analytic.partner_id as partner_id,
a.company_id as company_id,
a.currency_id as currency_id,
a.account_id as account_id,
a.general_account_id as general_account_id,
a.journal_id as journal_id,
a.move_id as move_id,
a.product_id as product_id,
a.product_uom_id as product_uom_id,
sum(a.amount) as amount,
sum(a.unit_amount) as unit_amount
from
account_analytic_line a, account_analytic_account analytic
where analytic.id = a.account_id
group by
a.date, a.user_id,a.name,analytic.partner_id,a.company_id,a.currency_id,
a.account_id,a.general_account_id,a.journal_id,
a.move_id,a.product_id,a.product_uom_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
angad/libjingle-mac | scons-2.2.0/engine/SCons/Tool/ipkg.py | 14 | 2532 | """SCons.Tool.ipkg
Tool-specific initialization for ipkg.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The ipkg tool calls the ipkg-build. Its only argument should be the
packages fake_root.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ipkg.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import SCons.Builder
def generate(env):
"""Add Builders and construction variables for ipkg to an Environment."""
try:
bld = env['BUILDERS']['Ipkg']
except KeyError:
bld = SCons.Builder.Builder( action = '$IPKGCOM',
suffix = '$IPKGSUFFIX',
source_scanner = None,
target_scanner = None)
env['BUILDERS']['Ipkg'] = bld
env['IPKG'] = 'ipkg-build'
env['IPKGCOM'] = '$IPKG $IPKGFLAGS ${SOURCE}'
env['IPKGUSER'] = os.popen('id -un').read().strip()
env['IPKGGROUP'] = os.popen('id -gn').read().strip()
env['IPKGFLAGS'] = SCons.Util.CLVar('-o $IPKGUSER -g $IPKGGROUP')
env['IPKGSUFFIX'] = '.ipk'
def exists(env):
return env.Detect('ipkg-build')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
SebDieBln/QGIS | python/ext-libs/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| gpl-2.0 |
rajathkumarmp/BinPy | BinPy/algorithms/ExpressionConvert.py | 4 | 9668 | def makeCompatible(expr):
'''Used by convertExpression to convert logical operators to english words.'''
expr = expr.replace('~&', ' NAND ')
expr = expr.replace('~|', ' NOR ')
expr = expr.replace('~^', ' XNOR ')
expr = expr.replace('&', ' AND ')
expr = expr.replace('|', ' OR ')
expr = expr.replace('~', ' NOT ')
expr = expr.replace('^', ' XOR ')
return '((' + expr + '))'
def createList(expr):
'''Creates a list which can be used by convertExpression for conversion.'''
list1 = expr.split('(')
list2 = []
list3 = []
while ('' in list1):
list1.remove('')
for string in list1:
l = string.split()
list2.extend(l)
for string in list2:
sublist = []
if ')' in string:
while ')' in string:
index = string.find(')')
sublist.append(string[:index])
sublist.append(')')
string = string[index + 1:]
sublist.append(string)
list3.extend(sublist)
else:
list3.extend([string])
while ('' in list3):
list3.remove('')
return (list3)
def mergeNot(case, expr):
'''Combines NOR gate with othes to minimize the number of gates used.'''
if expr[-1] == ')':
index = expr.find('(')
gate = expr[:index].upper()
if gate == 'OR' and case == 'general':
return 'NOR' + expr[index:]
elif gate == 'AND' and case == 'general':
return 'NAND' + expr[index:]
elif gate == 'NOT':
return expr[index + 1:-1]
elif gate == 'XOR'and case == 'general':
return 'XNOR' + expr[index:]
elif gate == 'XNOR'and case == 'general':
return 'XOR' + expr[index:]
elif gate == 'NAND'and case == 'general':
return 'AND' + expr[index:]
elif gate == 'NOR'and case == 'general':
return 'OR' + expr[index:]
return 'NOT(' + expr + ')'
def to_and_or_not(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only OR, NOT, or AND gates'''
if gate == 'AND' or gate == 'OR':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NAND':
return 'NOT(AND(' + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NOR':
return 'NOT(OR(' + '(' + op1 + ', ' + op2 + ')'
elif gate == 'XOR':
return ('OR(AND(' + op1 + ', ' + mergeNot('general', op2)
+ '), AND(' + mergeNot('general', op1) + ', ' + op2 + '))')
elif gate == 'XNOR':
return (
'OR(AND(' +
mergeNot(
'general',
op1) +
', ' +
mergeNot(
'general',
op2) +
'), AND(' +
op1 +
', ' +
op2 +
'))')
def to_nand(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only NAND gates'''
if gate == 'AND':
return 'NOT(NAND(' + op1 + ', ' + op2 + '))'
elif gate == 'OR':
return ('NAND(' + mergeNot('special', op1) + ', '
+ mergeNot('special', op2) + ')')
elif gate == 'NAND':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NOR':
return 'NOT(' + to_nand('OR', op1, op2) + ')'
elif gate == 'XOR':
return (
'NAND(NAND(' +
op1 +
', NAND(' +
op1 +
', ' +
op2 +
')), NAND(' +
op2 +
', NAND(' +
op1 +
', ' +
op2 +
')))')
elif gate == 'XNOR':
return 'NOT(' + to_nand('XOR', op1, op2) + ')'
def to_nor(gate, op1, op2):
'''Converts a general two input gate and two of its operands to use only NOR gates'''
if gate == 'OR':
return 'NOT(NOR(' + op1 + ', ' + op2 + '))'
elif gate == 'AND':
return ('NOR(' + mergeNot('special', op1) + ', '
+ mergeNot('special', op2) + ')')
elif gate == 'NOR':
return gate + '(' + op1 + ', ' + op2 + ')'
elif gate == 'NAND':
return 'NOT(' + to_nor('AND', op1, op2) + ')'
elif gate == 'XNOR':
return ('NOR(NOR(' + op1 + ', NOR(' + op1 + ', '
+ op2 + ')), NOR(' + op2 + ', NOR(' + op1 + ', ' + op2 + ')))')
elif gate == 'XOR':
return 'NOT(' + to_nor('XNOR', op1, op2) + ')'
def remove_not(gate, exp):
'''Converts a NOT gate and its operand to use the specified gate only.
The input gate must be NAND or NOR only.'''
while 'NOT' in exp:
index = exp.find('NOT(')
index2 = index
index3 = exp.find('(', index)
while True:
index2 = exp.find(')', index2 + 1)
index3 = exp.find('(', index3 + 1)
if index3 == -1 or index3 > index2:
break
exp = exp[:index] + gate + '(' + exp[index + 4:index2] + \
', ' + exp[index + 4:index2] + ')' + exp[index2 + 1:]
return exp
def convertExpression(expr, two_input=0, only_nand=0,
only_nor=0, only_and_or_not=0):
''' Converts logical expression to an implementable form.
Make two_input 1 if only two input gates must be used.
Make only_nand 1 if only 2 input nand gates must be used.
Make only_nor 1 if only 2 input nor gates must be used.
Make only_and_or_not 1 if only 2 input AND, OR and NOTs be used.
Error occurs if more than one variable is put to 1.
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)')
OR(AND(NOT(a), NOT(b)), AND(C, NOT(d), E, F))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', two_input=1)
OR(AND(NOT(a), NOT(b)), AND(C, AND(NOT(d), E)))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_nand=1)
NAND(NAND(NAND(a, a), NAND(b, b)), NAND(C, NAND(NAND(NAND(d, d), E), NAND(NAND(d, d), E))))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_nor=1)
NOR(NOR(NOR(a, b), NOR(NOR(C, C), NOR(NOR(d, NOR(E, E)),...
NOR(d, NOR(E, E))))), NOR(NOR(a, b), NOR(NOR(C, C), NOR(NOR(d, NOR(E, E)), NOR(d, NOR(E, E))))))
convertExpression('( NOT(a) and NOT(b)) or (C and Not(d) and E and F)', only_and_or_not=1)
OR(AND(NOT(a), NOT(b)), AND(C, AND(NOT(d), AND(E, F))))
'''
expr = makeCompatible(expr)
list1 = createList(expr)
while ')' in list1:
index = list1.index(')')
if index != len(list1) - 1 and list1[index + 1] == ')':
last = 0
else:
last = 1
if len(list1) > 1:
op2 = list1.pop(index - 1)
gate = list1.pop(index - 2)
gate = gate.upper()
if gate != 'NOT':
try:
op1 = list1.pop(index - 3)
except:
list1.insert(index - 1, gate)
list1.insert(index - 2, op2)
break
previous_gate = op1[:len(gate)]
previous_gate = previous_gate.upper()
next_gate = op2[:len(gate)]
next_gate = next_gate.upper()
if (two_input == 0 and gate != 'NAND'and gate != 'NOR')and (
only_nand == 0 and only_nor == 0 and only_and_or_not == 0):
if (gate == previous_gate) and (gate == next_gate.upper()):
new_element = gate + \
'(' + op1[len(gate) + 1:-1] + \
', ' + op2[len(gate) + 1:-1] + ')'
elif (gate == previous_gate) and (gate != next_gate.upper()):
new_element = gate + \
'(' + op1[len(gate) + 1:-1] + ', ' + op2 + ')'
elif (gate != previous_gate) and (gate == next_gate.upper()):
new_element = gate + \
'(' + op1 + ', ' + op2[len(gate) + 1:-1] + ')'
else:
new_element = gate + '(' + op1 + ', ' + op2 + ')'
else:
if only_nand == 0 and only_nor == 0 and only_and_or_not == 0:
new_element = gate + '(' + op1 + ', ' + op2 + ')'
elif only_nand == 1 and only_nor == 0 and only_and_or_not == 0:
new_element = to_nand(gate, op1, op2)
elif only_nand == 0 and only_nor == 1 and only_and_or_not == 0:
new_element = to_nor(gate, op1, op2)
elif only_nand == 0 and only_nor == 0 and only_and_or_not == 1:
new_element = to_and_or_not(gate, op1, op2)
else:
raise Exception("Invalid Input")
list1.insert(index - 3, new_element)
if (last != 1) or list1.index(')') == 1:
temp1 = list1.index(')')
temp2 = list1.pop(temp1)
else:
if only_nand == 0 and only_nor == 0 and only_and_or_not == 0:
new_element = mergeNot('general', op2)
else:
new_element = mergeNot('special', op2)
list1.insert(index - 2, new_element)
temp1 = list1.index(')')
temp2 = list1.pop(temp1)
if list1.count(')') == len(list1) - 1:
break
if only_nand == 1:
return (remove_not('NAND', list1[0]))
elif only_nor == 1:
return (remove_not('NOR', list1[0]))
else:
return (list1[0])
| bsd-3-clause |
AnishShah/tensorflow | tensorflow/python/data/util/nest.py | 17 | 17827 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(_six.iterkeys(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_tensorflow.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_tensorflow.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_tensorflow.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, dict):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(_six.iterkeys(input_tree)),
list(_six.iterkeys(shallow_tree))))
input_tree = list(sorted(_six.iteritems(input_tree)))
shallow_tree = list(sorted(_six.iteritems(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function, therefore, will return something with the same base structure
as `shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
| apache-2.0 |
zielmicha/pam_ssh | pam_ssh.py | 1 | 1482 | import os
import sys
import pwd
import socket
import json
auth_token = None
def rpc(name, args):
sock = socket.socket(socket.AF_UNIX)
sock.connect('/var/run/pam_ssh.sock')
f = sock.makefile('r+')
f.write(json.dumps([name, args]) + '\n')
f.flush()
resp = int(f.readline())
return resp
def pam_sm_authenticate(pamh, flags, argv):
global auth_token
username = pamh.get_user()
pw = pwd.getpwnam(username)
if pw.pw_uid < 1000:
return pamh.PAM_AUTH_ERR
auth_token = pamh.authtok
if len(auth_token) > 1024:
return pamh.PAM_AUTH_ERR
if not auth_token:
return pamh.PAM_AUTH_ERR
code = rpc('auth', dict(user=username, auth_token=auth_token))
if code == 0:
return pamh.PAM_SUCCESS
else:
return pamh.PAM_AUTH_ERR
def pam_sm_setcred(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_acct_mgmt(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_open_session(pamh, flags, argv):
user = pamh.get_user()
pw = pwd.getpwnam(user)
token = auth_token
if pw.pw_uid < 1000:
return pamh.PAM_SUCCESS
code = rpc('open_session', dict(user=user,
auth_token=auth_token))
if code == 0:
return pamh.PAM_SUCCESS
else:
return pamh.PAM_AUTH_ERR
def pam_sm_close_session(pamh, flags, argv):
return pamh.PAM_SUCCESS
def pam_sm_chauthtok(pamh, flags, argv):
return pamh.PAM_SUCCESS
| mit |
nkrishnaswami/census | uscensus/data/whooshindex.py | 1 | 3248 | from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from whoosh.analysis.filters import StopFilter
from whoosh.analysis import (KeywordAnalyzer, StandardAnalyzer)
from whoosh.filedb.filestore import FileStorage, RamStorage
from whoosh.fields import Schema, KEYWORD, ID, TEXT
from whoosh.qparser import QueryParser
from whoosh.writing import AsyncWriter
KWAnalyzer = KeywordAnalyzer(lowercase=True) | StopFilter()
Analyzer = StandardAnalyzer()
ApiSchemaFields = OrderedDict((
('api_id', ID(unique=True, stored=True)),
('title', KEYWORD(analyzer=KWAnalyzer)),
('description', TEXT(analyzer=Analyzer)),
('geographies', KEYWORD(analyzer=KWAnalyzer)),
('concepts', KEYWORD(analyzer=KWAnalyzer)),
('keywords', KEYWORD(analyzer=KWAnalyzer)),
('tags', KEYWORD(analyzer=KWAnalyzer)),
('variables', KEYWORD(analyzer=KWAnalyzer)),
('vintage', ID),
))
VariableSchemaFields = OrderedDict((
('api_id', ID(stored=True)),
('variable', ID(stored=True)),
('group', ID(stored=True)),
('label', TEXT(analyzer=Analyzer)),
('concept', KEYWORD(analyzer=Analyzer)),
))
class Index(object):
"""Census API metadata indexer."""
def __init__(self, name, schema_fields, dflt_query_field, path=None):
"""Initialize Whoosh index specified fields.
Arguments:
* schema_fields: an OrderedDict of column names to whoosh
field types.
* path: if specified, the path in which to create a
persistent index. If not specified, index to RAM.
"""
self.schema_fields = schema_fields
# Initialize index
fs = FileStorage(path).create() if path else RamStorage()
if fs.index_exists():
self.index = fs.open_index(name)
schema = self.index.schema()
else:
schema = Schema(**self.schema_fields)
self.index = fs.create_index(schema, name)
self.qparser = QueryParser(dflt_query_field,
schema=schema)
self.writer = None
def __enter__(self):
self.writer = AsyncWriter(
self.index, writerargs=dict(limitmb=1000))
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.writer.cancel()
else:
self.writer.commit()
def add(self, iterator, **kwargs):
"""Add entries to the index
Arguments:
* iterator: iterator over tuples of field metadata, viz.
api_id, title, description, variables, geographies, concepts,
keywords, tags, and vintage.
"""
for vals in iterator:
self.writer.add_document(
**dict(zip(self.schema_fields, vals)))
def query(self, querystring):
"""Find API IDs matching querystring"""
query = self.qparser.parse(querystring)
with self.index.searcher() as searcher:
results = searcher.search(query, limit=None)
ret = []
for hit in results:
val = dict(hit.items())
val['score'] = hit.score
ret.append(val)
return ret
| apache-2.0 |
arpitprogressive/arpittest | apps/admin/migrations/0012_auto__add_field_subsector_career_guide.py | 1 | 13328 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubSector.career_guide'
db.add_column(u'admin_subsector', 'career_guide',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SubSector.career_guide'
db.delete_column(u'admin_subsector', 'career_guide')
models = {
'admin.company': {
'Meta': {'object_name': 'Company'},
'company_type': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.job': {
'Meta': {'object_name': 'Job'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_internship': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'job_role': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.QualificationPack']"}),
'job_title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
u'admin.logentry': {
'Meta': {'ordering': "(u'-action_time',)", 'object_name': 'LogEntry', 'db_table': "u'django_admin_log'"},
'action_flag': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'action_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'admin.occupation': {
'Meta': {'object_name': 'Occupation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'})
},
'admin.occupationalstandard': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'OccupationalStandard'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'knowledge': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'performace_criteria': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'scope': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'skills': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.qualificationpack': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'QualificationPack'},
'alias': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '9', 'blank': 'True'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experience': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'job_role': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'max_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'min_educational_qualification': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'next_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.QualificationPack']", 'null': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'nveqf_level': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'occupation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Occupation']"}),
'os_compulsory': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'os_compulsory'", 'blank': 'True', 'to': "orm['admin.OccupationalStandard']"}),
'os_optional': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'os_optional'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['admin.OccupationalStandard']"}),
'role_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['admin.Track']", 'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
'career_guide': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobility_map': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"})
},
'admin.track': {
'Meta': {'object_name': 'Track'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['admin'] | bsd-3-clause |
okolisny/integration_tests | cfme/tests/cloud_infra_common/test_snapshots_rest.py | 1 | 4663 | # -*- coding: utf-8 -*-
import pytest
import fauxfactory
from cfme import test_requirements
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.common.vm import VM
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils import error, testgen
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.rest import assert_response
from cfme.utils.version import current_version
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.uncollectif(
lambda: current_version() < '5.8'),
pytest.mark.long_running,
pytest.mark.tier(2),
test_requirements.snapshot
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc,
[VMwareProvider, OpenStackProvider])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope='module')
@pytest.yield_fixture(scope='module')
def vm_obj(provider, setup_provider_modscope, small_template_modscope):
"""Creates new VM or instance"""
vm_name = random_vm_name('snpsht')
new_vm = VM.factory(vm_name, provider, template_name=small_template_modscope.name)
if not provider.mgmt.does_vm_exist(vm_name):
new_vm.create_on_provider(find_in_cfme=True, allow_skip='default')
yield new_vm
try:
provider.mgmt.delete_vm(new_vm.name)
except Exception:
logger.warning("Failed to delete vm `{}`.".format(new_vm.name))
@pytest.fixture(scope='module')
def collection(appliance, provider):
"""Returns "vms" or "instances" collection based on provider type"""
if provider.one_of(InfraProvider):
return appliance.rest_api.collections.vms
return appliance.rest_api.collections.instances
@pytest.yield_fixture(scope='function')
def vm_snapshot(appliance, collection, vm_obj):
"""Creates VM/instance snapshot using REST API
Returns:
Tuple with VM and snapshot resources in REST API
"""
uid = fauxfactory.gen_alphanumeric(8)
snap_name = 'snpshot_{}'.format(uid)
vm = collection.get(name=vm_obj.name)
vm.snapshots.action.create(
name=snap_name,
description='snapshot {}'.format(uid),
memory=False)
assert_response(appliance)
snap, __ = wait_for(
lambda: vm.snapshots.find_by(name=snap_name) or False,
num_sec=600, delay=5)
snap = snap[0]
yield vm, snap
collection.reload()
to_delete = vm.snapshots.find_by(name=snap_name)
if to_delete:
vm.snapshots.action.delete(to_delete[0])
class TestRESTSnapshots(object):
"""Tests actions with VM/instance snapshots using REST API"""
def test_create_snapshot(self, vm_snapshot):
"""Creates VM/instance snapshot using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
vm.snapshots.get(name=snapshot.name)
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_snapshot_from_detail(self, appliance, vm_snapshot, method):
"""Deletes VM/instance snapshot from detail using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
if method == 'post':
del_action = snapshot.action.delete.POST
else:
del_action = snapshot.action.delete.DELETE
del_action()
assert_response(appliance)
snapshot.wait_not_exists(num_sec=300, delay=5)
with error.expected('ActiveRecord::RecordNotFound'):
del_action()
assert_response(appliance, http_status=404)
def test_delete_snapshot_from_collection(self, appliance, vm_snapshot):
"""Deletes VM/instance snapshot from collection using REST API
Metadata:
test_flag: rest
"""
vm, snapshot = vm_snapshot
vm.snapshots.action.delete.POST(snapshot)
assert_response(appliance)
snapshot.wait_not_exists(num_sec=300, delay=5)
with error.expected('ActiveRecord::RecordNotFound'):
vm.snapshots.action.delete.POST(snapshot)
assert_response(appliance, http_status=404)
@pytest.mark.uncollectif(lambda provider:
not provider.one_of(InfraProvider) or current_version() < '5.8')
def test_revert_snapshot(self, appliance, vm_snapshot):
"""Reverts VM/instance snapshot using REST API
Metadata:
test_flag: rest
"""
__, snapshot = vm_snapshot
snapshot.action.revert()
assert_response(appliance)
| gpl-2.0 |
geerlingguy/ansible-modules-extras | monitoring/bigpanda.py | 20 | 5211 | #!/usr/bin/python
DOCUMENTATION = '''
---
module: bigpanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
version_added: "1.8"
description:
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
options:
component:
description:
- "The name of the component being deployed. Ex: billing"
required: true
alias: name
version:
description:
- The deployment version.
required: true
token:
description:
- API token.
required: true
state:
description:
- State of the deployment.
required: true
choices: ['started', 'finished', 'failed']
hosts:
description:
- Name of affected host name. Can be a list.
required: false
default: machine's hostname
alias: host
env:
description:
- The environment name, typically 'production', 'staging', etc.
required: false
owner:
description:
- The person responsible for the deployment.
required: false
description:
description:
- Free text description of the deployment.
required: false
url:
description:
- Base URL of the API server.
required: False
default: https://api.bigpanda.io
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
...
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished
or using a deployment object:
- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started
register: deployment
- bigpanda: state=finished
args: deployment
If outside servers aren't reachable from your machine, use local_action and pass the hostname:
- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started
register: deployment
...
- local_action: bigpanda state=finished
args: deployment
'''
# ===========================================
# Module execution.
#
import socket
def main():
module = AnsibleModule(
argument_spec=dict(
component=dict(required=True, aliases=['name']),
version=dict(required=True),
token=dict(required=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']),
env=dict(required=False),
owner=dict(required=False),
description=dict(required=False),
message=dict(required=False),
source_system=dict(required=False, default='ansible'),
validate_certs=dict(default='yes', type='bool'),
url=dict(required=False, default='https://api.bigpanda.io'),
),
supports_check_mode=True,
check_invalid_arguments=False,
)
token = module.params['token']
state = module.params['state']
url = module.params['url']
# Build the common request body
body = dict()
for k in ('component', 'version', 'hosts'):
v = module.params[k]
if v is not None:
body[k] = v
if not isinstance(body['hosts'], list):
body['hosts'] = [body['hosts']]
# Insert state-specific attributes to body
if state == 'started':
for k in ('source_system', 'env', 'owner', 'description'):
v = module.params[k]
if v is not None:
body[k] = v
request_url = url + '/data/events/deployments/start'
else:
message = module.params['message']
if message is not None:
body['errorMessage'] = message
if state == 'finished':
body['status'] = 'success'
else:
body['status'] = 'failure'
request_url = url + '/data/events/deployments/end'
# Build the deployment object we return
deployment = dict(token=token, url=url)
deployment.update(body)
if 'errorMessage' in deployment:
message = deployment.pop('errorMessage')
deployment['message'] = message
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True, **deployment)
# Send the data to bigpanda
data = json.dumps(body)
headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'}
try:
response, info = fetch_url(module, request_url, data=data, headers=headers)
if info['status'] == 200:
module.exit_json(changed=True, **deployment)
else:
module.fail_json(msg=json.dumps(info))
except Exception, e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
xuvw/viewfinder | backend/prod/babysitter.py | 13 | 1670 | #!/usr/bin/env python
#
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Babysitter server starts instances of servers defined in a deployment
template.
Each server instance is started, monitored, and restarted as
necessary. Log files for each server are archived to S3 as
appropriate, custom cloud watch metrics are reported, and AWS SNS is
used to notify of any unrecoverable failures.
Start(): Launch the babysitter application (called from main)
"""
__author__ = '[email protected] (Spencer Kimball)'
import sys
from tornado import ioloop, options, template
from viewfinder.backend.base import admin_server, basic_auth, handler
options.define("babysitter_port", default=1025,
help="Port for babysitter status")
class _MainHandler(basic_auth.BasicAuthHandler):
"""Displays the servers being babysat, with status information."""
_TEMPLATE = template.Template("""
<html>
<title>Babysitter Status</title>
<body>Admin: {{ user }}</body>
</html>
""")
@handler.authenticated()
def get(self):
self.write(_MainHandler._TEMPLATE.generate(
user=self.get_current_user()))
def Start(servers=None):
"""Starts the babysitter tornado web server with SSL.
:arg servers: server deployment specification.
"""
print "in babysitter"
options.parse_command_line()
babysitter = admin_server.AdminServer(
handlers=[(r"/", _MainHandler), ],
port=options.options.babysitter_port)
print "connect to babysitter via https://{0}:{1}/".format(
'localhost', options.options.babysitter_port)
ioloop.IOLoop.instance().start()
def main():
Start()
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
akarki15/mozillians | vendor-local/lib/python/tablib/packages/openpyxl3/writer/workbook.py | 116 | 10769 | # file openpyxl/writer/workbook.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write the workbook global settings to the archive."""
# package imports
from ..shared.xmltools import Element, SubElement
from ..cell import absolute_coordinate
from ..shared.xmltools import get_document_content
from ..shared.ooxml import NAMESPACES, ARC_CORE, ARC_WORKBOOK, \
ARC_APP, ARC_THEME, ARC_STYLE, ARC_SHARED_STRINGS
from ..shared.date_time import datetime_to_W3CDTF
def write_properties_core(properties):
"""Write the core properties to xml."""
root = Element('cp:coreProperties', {'xmlns:cp': NAMESPACES['cp'],
'xmlns:xsi': NAMESPACES['xsi'], 'xmlns:dc': NAMESPACES['dc'],
'xmlns:dcterms': NAMESPACES['dcterms'],
'xmlns:dcmitype': NAMESPACES['dcmitype'], })
SubElement(root, 'dc:creator').text = properties.creator
SubElement(root, 'cp:lastModifiedBy').text = properties.last_modified_by
SubElement(root, 'dcterms:created', \
{'xsi:type': 'dcterms:W3CDTF'}).text = \
datetime_to_W3CDTF(properties.created)
SubElement(root, 'dcterms:modified',
{'xsi:type': 'dcterms:W3CDTF'}).text = \
datetime_to_W3CDTF(properties.modified)
return get_document_content(root)
def write_content_types(workbook):
"""Write the content-types xml."""
root = Element('Types', {'xmlns': 'http://schemas.openxmlformats.org/package/2006/content-types'})
SubElement(root, 'Override', {'PartName': '/' + ARC_THEME, 'ContentType': 'application/vnd.openxmlformats-officedocument.theme+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_STYLE, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml'})
SubElement(root, 'Default', {'Extension': 'rels', 'ContentType': 'application/vnd.openxmlformats-package.relationships+xml'})
SubElement(root, 'Default', {'Extension': 'xml', 'ContentType': 'application/xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_WORKBOOK, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_APP, 'ContentType': 'application/vnd.openxmlformats-officedocument.extended-properties+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_CORE, 'ContentType': 'application/vnd.openxmlformats-package.core-properties+xml'})
SubElement(root, 'Override', {'PartName': '/' + ARC_SHARED_STRINGS, 'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml'})
drawing_id = 1
chart_id = 1
for sheet_id, sheet in enumerate(workbook.worksheets):
SubElement(root, 'Override',
{'PartName': '/xl/worksheets/sheet%d.xml' % (sheet_id + 1),
'ContentType': 'application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml'})
if sheet._charts:
SubElement(root, 'Override',
{'PartName' : '/xl/drawings/drawing%d.xml' % (sheet_id + 1),
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawing+xml'})
drawing_id += 1
for chart in sheet._charts:
SubElement(root, 'Override',
{'PartName' : '/xl/charts/chart%d.xml' % chart_id,
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawingml.chart+xml'})
chart_id += 1
if chart._shapes:
SubElement(root, 'Override',
{'PartName' : '/xl/drawings/drawing%d.xml' % drawing_id,
'ContentType' : 'application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml'})
drawing_id += 1
return get_document_content(root)
def write_properties_app(workbook):
"""Write the properties xml."""
worksheets_count = len(workbook.worksheets)
root = Element('Properties', {'xmlns': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
'xmlns:vt': 'http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes'})
SubElement(root, 'Application').text = 'Microsoft Excel'
SubElement(root, 'DocSecurity').text = '0'
SubElement(root, 'ScaleCrop').text = 'false'
SubElement(root, 'Company')
SubElement(root, 'LinksUpToDate').text = 'false'
SubElement(root, 'SharedDoc').text = 'false'
SubElement(root, 'HyperlinksChanged').text = 'false'
SubElement(root, 'AppVersion').text = '12.0000'
# heading pairs part
heading_pairs = SubElement(root, 'HeadingPairs')
vector = SubElement(heading_pairs, 'vt:vector',
{'size': '2', 'baseType': 'variant'})
variant = SubElement(vector, 'vt:variant')
SubElement(variant, 'vt:lpstr').text = 'Worksheets'
variant = SubElement(vector, 'vt:variant')
SubElement(variant, 'vt:i4').text = '%d' % worksheets_count
# title of parts
title_of_parts = SubElement(root, 'TitlesOfParts')
vector = SubElement(title_of_parts, 'vt:vector',
{'size': '%d' % worksheets_count, 'baseType': 'lpstr'})
for ws in workbook.worksheets:
SubElement(vector, 'vt:lpstr').text = '%s' % ws.title
return get_document_content(root)
def write_root_rels(workbook):
"""Write the relationships xml."""
root = Element('Relationships', {'xmlns':
'http://schemas.openxmlformats.org/package/2006/relationships'})
SubElement(root, 'Relationship', {'Id': 'rId1', 'Target': ARC_WORKBOOK,
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument'})
SubElement(root, 'Relationship', {'Id': 'rId2', 'Target': ARC_CORE,
'Type': 'http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties'})
SubElement(root, 'Relationship', {'Id': 'rId3', 'Target': ARC_APP,
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/extended-properties'})
return get_document_content(root)
def write_workbook(workbook):
"""Write the core workbook xml."""
root = Element('workbook', {'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xml:space': 'preserve', 'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
SubElement(root, 'fileVersion', {'appName': 'xl', 'lastEdited': '4',
'lowestEdited': '4', 'rupBuild': '4505'})
SubElement(root, 'workbookPr', {'defaultThemeVersion': '124226',
'codeName': 'ThisWorkbook'})
book_views = SubElement(root, 'bookViews')
SubElement(book_views, 'workbookView', {'activeTab': '%d' % workbook.get_index(workbook.get_active_sheet()),
'autoFilterDateGrouping': '1', 'firstSheet': '0', 'minimized': '0',
'showHorizontalScroll': '1', 'showSheetTabs': '1',
'showVerticalScroll': '1', 'tabRatio': '600',
'visibility': 'visible'})
# worksheets
sheets = SubElement(root, 'sheets')
for i, sheet in enumerate(workbook.worksheets):
sheet_node = SubElement(sheets, 'sheet', {'name': sheet.title,
'sheetId': '%d' % (i + 1), 'r:id': 'rId%d' % (i + 1)})
if not sheet.sheet_state == sheet.SHEETSTATE_VISIBLE:
sheet_node.set('state', sheet.sheet_state)
# named ranges
defined_names = SubElement(root, 'definedNames')
for named_range in workbook.get_named_ranges():
name = SubElement(defined_names, 'definedName',
{'name': named_range.name})
# as there can be many cells in one range, generate the list of ranges
dest_cells = []
cell_ids = []
for worksheet, range_name in named_range.destinations:
cell_ids.append(workbook.get_index(worksheet))
dest_cells.append("'%s'!%s" % (worksheet.title.replace("'", "''"),
absolute_coordinate(range_name)))
# for local ranges, we must check all the cells belong to the same sheet
base_id = cell_ids[0]
if named_range.local_only and all([x == base_id for x in cell_ids]):
name.set('localSheetId', '%s' % base_id)
# finally write the cells list
name.text = ','.join(dest_cells)
SubElement(root, 'calcPr', {'calcId': '124519', 'calcMode': 'auto',
'fullCalcOnLoad': '1'})
return get_document_content(root)
def write_workbook_rels(workbook):
"""Write the workbook relationships xml."""
root = Element('Relationships', {'xmlns':
'http://schemas.openxmlformats.org/package/2006/relationships'})
for i in range(len(workbook.worksheets)):
SubElement(root, 'Relationship', {'Id': 'rId%d' % (i + 1),
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet',
'Target': 'worksheets/sheet%s.xml' % (i + 1)})
rid = len(workbook.worksheets) + 1
SubElement(root, 'Relationship',
{'Id': 'rId%d' % rid, 'Target': 'sharedStrings.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings'})
SubElement(root, 'Relationship',
{'Id': 'rId%d' % (rid + 1), 'Target': 'styles.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles'})
SubElement(root, 'Relationship',
{'Id': 'rId%d' % (rid + 2), 'Target': 'theme/theme1.xml',
'Type': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme'})
return get_document_content(root)
| bsd-3-clause |
chrismeyersfsu/ansible-modules-core | network/nxos/nxos_vxlan_vtep.py | 27 | 17268 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep
version_added: "2.2"
short_description: Manages VXLAN Network Virtualization Endpoint (NVE).
description:
- Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface
that terminates VXLAN tunnels.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- The module is used to manage NVE properties, not to create NVE
interfaces. Use M(nxos_interface) if you wish to do so.
- C(state=absent) removes the interface.
- Default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
description:
description:
- Description of the NVE interface.
required: false
default: null
host_reachability:
description:
- Specify mechanism for host reachability advertisement.
required: false
choices: ['true', 'false']
default: null
shutdown:
description:
- Administratively shutdown the NVE interface.
required: false
choices: ['true','false']
default: false
source_interface:
description:
- Specify the loopback interface whose IP address should be
used for the NVE interface.
required: false
default: null
source_interface_hold_down_time:
description:
- Suppresses advertisement of the NVE loopback address until
the overlay has converged.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vxlan_vtep:
interface: nve1
description: default
host_reachability: default
source_interface: Loopback0
source_interface_hold_down_time: 30
shutdown: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"description": "simple description", "host_reachability": true,
"interface": "nve1", "shutdown": true, "source_interface": "loopback0",
"source_interface_hold_down_time": "30"}
existing:
description: k/v pairs of existing VXLAN VTEP configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of VXLAN VTEP configuration after module execution
returned: verbose mode
type: dict
sample: {"description": "simple description", "host_reachability": true,
"interface": "nve1", "shutdown": true, "source_interface": "loopback0",
"source_interface_hold_down_time": "30"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "source-interface loopback0",
"source-interface hold-down-time 30", "description simple description",
"shutdown", "host-reachability protocol bgp"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = [
'shutdown',
'host_reachability'
]
PARAM_TO_COMMAND_KEYMAP = {
'description': 'description',
'host_reachability': 'host-reachability protocol bgp',
'interface': 'interface',
'shutdown': 'shutdown',
'source_interface': 'source-interface',
'source_interface_hold_down_time': 'source-interface hold-down-time'
}
PARAM_TO_DEFAULT_KEYMAP = {
'description': False,
'shutdown': True,
}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M)
value = False
if arg == 'shutdown':
try:
if NO_SHUT_REGEX.search(config):
value = False
elif REGEX.search(config):
value = True
except TypeError:
value = False
else:
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M)
SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P<value>\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if arg == 'description':
if NO_DESC_REGEX.search(config):
value = ''
elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
elif arg == 'source_interface':
for line in config.splitlines():
try:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = SOURCE_INTF_REGEX.search(config).group('value').strip()
break
except AttributeError:
value = ''
else:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
interface_string = 'interface {0}'.format(module.params['interface'].lower())
parents = [interface_string]
config = netcfg.get_section(parents)
if config:
for arg in args:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface'].lower()
else:
if interface_string in str(netcfg):
existing['interface'] = module.params['interface'].lower()
for arg in args:
existing[arg] = ''
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def fix_commands(commands, module):
source_interface_command = ''
no_source_interface_command = ''
for command in commands:
if 'no source-interface hold-down-time' in command:
pass
elif 'source-interface hold-down-time' in command:
pass
elif 'no source-interface' in command:
no_source_interface_command = command
elif 'source-interface' in command:
source_interface_command = command
if source_interface_command:
commands.pop(commands.index(source_interface_command))
commands.insert(0, source_interface_command)
if no_source_interface_command:
commands.pop(commands.index(no_source_interface_command))
commands.append(no_source_interface_command)
return commands
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
module.exit_json(commands=commands)
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
commands = fix_commands(commands, module)
parents = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=parents)
else:
if not existing and module.params['interface']:
commands = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def state_absent(module, existing, proposed, candidate):
commands = ['no interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
description=dict(required=False, type='str'),
host_reachability=dict(required=False, type='bool'),
shutdown=dict(required=False, type='bool'),
source_interface=dict(required=False, type='str'),
source_interface_hold_down_time=dict(required=False, type='str'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
interface = module.params['interface'].lower()
args = [
'interface',
'description',
'host_reachability',
'shutdown',
'source_interface',
'source_interface_hold_down_time'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
if key in BOOL_PARAMS:
value = False
else:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not existing:
WARNINGS.append("The proposed NVE interface did not exist. "
"It's recommended to use nxos_interface to create "
"all logical interfaces.")
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
robotlinker/robotlinker_core | src/rosbridge_suite/rosbridge_server/src/tornado/curl_httpclient.py | 20 | 19862 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Non-blocking HTTP client implementation using pycurl."""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import logging
import pycurl
import threading
import time
from tornado import httputil
from tornado import ioloop
from tornado.log import gen_log
from tornado import stack_context
from tornado.escape import utf8, native_str
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
from tornado.util import bytes_type
try:
from io import BytesIO # py3
except ImportError:
from cStringIO import StringIO as BytesIO # py2
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [_curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
super(CurlAsyncHTTPClient, self).close()
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
reason=info['headers'].get("X-Http-Reason", None),
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create():
curl = pycurl.Curl()
if gen_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.items()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: request.header_callback(native_str(line)))
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers,
native_str(line)))
if request.streaming_callback:
write_function = request.streaming_callback
else:
write_function = buffer.write
if bytes_type is str: # py2
curl.setopt(pycurl.WRITEFUNCTION, write_function)
else: # py3
# Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
# a fork/port. That version has a bug in which it passes unicode
# strings instead of bytes to the WRITEFUNCTION. This means that
# if you use a WRITEFUNCTION (which tornado always does), you cannot
# download arbitrary binary data. This needs to be fixed in the
# ported pycurl package, but in the meantime this lambda will
# make it work for downloading (utf8) text.
curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.decompress_response:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
curl.unsetopt(pycurl.PROXYUSERPWD)
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
else:
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
if request.body is None:
raise AssertionError(
'Body must not be empty for "%s" request'
% request.method)
request_buffer = BytesIO(utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
elif request.method == "GET":
if request.body is not None:
raise AssertionError('Body must be empty for GET request')
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
curl.setopt(pycurl.USERPWD, native_str(userpwd))
gen_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
gen_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
try:
(__, __, reason) = httputil.parse_response_start_line(header_line)
header_line = "X-Http-Reason: %s" % reason
except httputil.HTTPInputError:
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
gen_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
gen_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
gen_log.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
| apache-2.0 |
jankeromnes/depot_tools | third_party/gsutil/oauth2_plugin/__init__.py | 51 | 1121 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Package marker file."""
| bsd-3-clause |
mcalmer/spacewalk | backend/common/rhn_pkg.py | 10 | 4128 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
from rhn.i18n import bstr
from spacewalk.common import checksum
def get_package_header(filename=None, file_obj=None, fd=None):
# pylint: disable=E1103
if filename is not None:
stream = open(filename, mode='rb')
need_close = True
elif file_obj is not None:
stream = file_obj
else:
stream = os.fdopen(os.dup(fd), "r")
need_close = True
if stream.name.endswith('.deb'):
packaging = 'deb'
elif stream.name.endswith('.rpm'):
packaging = 'rpm'
else:
packaging = 'mpm'
a_pkg = package_from_stream(stream, packaging)
a_pkg.read_header()
if need_close:
stream.close()
return a_pkg.header
def package_from_stream(stream, packaging):
if packaging == 'deb':
from spacewalk.common import rhn_deb
a_pkg = rhn_deb.DEB_Package(stream)
elif packaging == 'rpm':
from spacewalk.common import rhn_rpm
a_pkg = rhn_rpm.RPM_Package(stream)
elif packaging == 'mpm':
from spacewalk.common import rhn_mpm
a_pkg = rhn_mpm.MPM_Package(stream)
else:
a_pkg = None
return a_pkg
def package_from_filename(filename):
if filename.endswith('.deb'):
packaging = 'deb'
elif filename.endswith('.rpm') or filename.endswith('.hdr'):
packaging = 'rpm'
else:
packaging = 'mpm'
stream = open(filename, mode='rb')
return package_from_stream(stream, packaging)
BUFFER_SIZE = 16384
DEFAULT_CHECKSUM_TYPE = 'md5'
class A_Package:
"""virtual class that implements shared methods for RPM/MPM/DEB package object"""
# pylint: disable=R0902
def __init__(self, input_stream=None):
self.header = None
self.header_start = 0
self.header_end = 0
self.input_stream = input_stream
self.checksum_type = DEFAULT_CHECKSUM_TYPE
self.checksum = None
self.payload_stream = None
self.payload_size = None
def read_header(self):
"""reads header from self.input_file"""
pass
def save_payload(self, output_stream):
"""saves payload to output_stream"""
c_hash = checksum.getHashlibInstance(self.checksum_type, False)
if output_stream:
output_start = output_stream.tell()
self._stream_copy(self.input_stream, output_stream, c_hash)
self.checksum = c_hash.hexdigest()
if output_stream:
self.payload_stream = output_stream
self.payload_size = output_stream.tell() - output_start
def payload_checksum(self):
# just read and compute checksum
start = self.input_stream.tell()
self.save_payload(None)
self.payload_size = self.input_stream.tell() - start + self.header_end
self.payload_stream = self.input_stream
@staticmethod
def _stream_copy(source, dest, c_hash=None):
"""copies data from the source stream to the destination stream"""
while True:
buf = source.read(BUFFER_SIZE)
if not buf:
break
if dest:
dest.write(buf)
if c_hash:
c_hash.update(buf)
@staticmethod
def _read_bytes(stream, amt):
ret = bstr('')
while amt:
buf = stream.read(min(amt, BUFFER_SIZE))
if not buf:
return ret
ret = ret + buf
amt = amt - len(buf)
return ret
class InvalidPackageError(Exception):
pass
| gpl-2.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/distutils/tests/test_bdist_wininst.py | 47 | 1101 | """Tests for distutils.command.bdist_wininst."""
import unittest
import os
from distutils.dist import Distribution
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
dist = Distribution()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assert_(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
test_support.run_unittest(test_suite())
| apache-2.0 |
ah744/ScaffCC_RKQC | clang/tools/scan-view/Reporter.py | 65 | 8135 | """Methods for reporting bugs."""
import subprocess, sys, os
__all__ = ['ReportFailure', 'BugReport', 'getReporters']
#
class ReportFailure(Exception):
"""Generic exception for failures in bug reporting."""
def __init__(self, value):
self.value = value
# Collect information about a bug.
class BugReport:
def __init__(self, title, description, files):
self.title = title
self.description = description
self.files = files
# Reporter interfaces.
import os
import email, mimetypes, smtplib
from email import encoders
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#===------------------------------------------------------------------------===#
# ReporterParameter
#===------------------------------------------------------------------------===#
class ReporterParameter:
def __init__(self, n):
self.name = n
def getName(self):
return self.name
def getValue(self,r,bugtype,getConfigOption):
return getConfigOption(r.getName(),self.getName())
def saveConfigValue(self):
return True
class TextParameter (ReporterParameter):
def getHTML(self,r,bugtype,getConfigOption):
return """\
<tr>
<td class="form_clabel">%s:</td>
<td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
class SelectionParameter (ReporterParameter):
def __init__(self, n, values):
ReporterParameter.__init__(self,n)
self.values = values
def getHTML(self,r,bugtype,getConfigOption):
default = self.getValue(r,bugtype,getConfigOption)
return """\
<tr>
<td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
%s
</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
<option value="%s"%s>%s</option>"""%(o[0],
o[0] == default and ' selected="selected"' or '',
o[1]) for o in self.values]))
#===------------------------------------------------------------------------===#
# Reporters
#===------------------------------------------------------------------------===#
class EmailReporter:
def getName(self):
return 'Email'
def getParameters(self):
return map(lambda x:TextParameter(x),['To', 'From', 'SMTP Server', 'SMTP Port'])
# Lifted from python email module examples.
def attachFile(self, outer, path):
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(path)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
outer.attach(msg)
def fileReport(self, report, parameters):
mainMsg = """\
BUG REPORT
---
Title: %s
Description: %s
"""%(report.title, report.description)
if not parameters.get('To'):
raise ReportFailure('No "To" address specified.')
if not parameters.get('From'):
raise ReportFailure('No "From" address specified.')
msg = MIMEMultipart()
msg['Subject'] = 'BUG REPORT: %s'%(report.title)
# FIXME: Get config parameters
msg['To'] = parameters.get('To')
msg['From'] = parameters.get('From')
msg.preamble = mainMsg
msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
for file in report.files:
self.attachFile(msg, file)
try:
s = smtplib.SMTP(host=parameters.get('SMTP Server'),
port=parameters.get('SMTP Port'))
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.close()
except:
raise ReportFailure('Unable to send message via SMTP.')
return "Message sent!"
class BugzillaReporter:
def getName(self):
return 'Bugzilla'
def getParameters(self):
return map(lambda x:TextParameter(x),['URL','Product'])
def fileReport(self, report, parameters):
raise NotImplementedError
class RadarClassificationParameter(SelectionParameter):
def __init__(self):
SelectionParameter.__init__(self,"Classification",
[['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
['3', 'Performance'], ['4', 'UI/Usability'],
['6', 'Serious Bug'], ['7', 'Other']])
def saveConfigValue(self):
return False
def getValue(self,r,bugtype,getConfigOption):
if bugtype.find("leak") != -1:
return '3'
elif bugtype.find("dereference") != -1:
return '2'
elif bugtype.find("missing ivar release") != -1:
return '3'
else:
return '7'
class RadarReporter:
@staticmethod
def isAvailable():
# FIXME: Find this .scpt better
path = os.path.join(os.path.dirname(__file__),'Resources/GetRadarVersion.scpt')
try:
p = subprocess.Popen(['osascript',path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
return False
data,err = p.communicate()
res = p.wait()
# FIXME: Check version? Check for no errors?
return res == 0
def getName(self):
return 'Radar'
def getParameters(self):
return [ TextParameter('Component'), TextParameter('Component Version'),
RadarClassificationParameter() ]
def fileReport(self, report, parameters):
component = parameters.get('Component', '')
componentVersion = parameters.get('Component Version', '')
classification = parameters.get('Classification', '')
personID = ""
diagnosis = ""
config = ""
if not component.strip():
component = 'Bugs found by clang Analyzer'
if not componentVersion.strip():
componentVersion = 'X'
script = os.path.join(os.path.dirname(__file__),'Resources/FileRadar.scpt')
args = ['osascript', script, component, componentVersion, classification, personID, report.title,
report.description, diagnosis, config] + map(os.path.abspath, report.files)
# print >>sys.stderr, args
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
raise ReportFailure("Unable to file radar (AppleScript failure).")
data, err = p.communicate()
res = p.wait()
if res:
raise ReportFailure("Unable to file radar (AppleScript failure).")
try:
values = eval(data)
except:
raise ReportFailure("Unable to process radar results.")
# We expect (int: bugID, str: message)
if len(values) != 2 or not isinstance(values[0], int):
raise ReportFailure("Unable to process radar results.")
bugID,message = values
bugID = int(bugID)
if not bugID:
raise ReportFailure(message)
return "Filed: <a href=\"rdar://%d/\">%d</a>"%(bugID,bugID)
###
def getReporters():
reporters = []
if RadarReporter.isAvailable():
reporters.append(RadarReporter())
reporters.append(EmailReporter())
return reporters
| bsd-2-clause |
varlog00/Sigil | src/Resource_Files/plugin_launchers/python/sigil_gumboc_tags.py | 6 | 4774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals
TagNames = [
"A",
"ABBR",
"ACRONYM",
"ADDRESS",
"ALTGLYPH",
"ALTGLYPHDEF",
"ALTGLYPHITEM",
"ANIMATE",
"ANIMATECOLOR",
"ANIMATEMOTION",
"ANIMATETRANSFORM",
"ANNOTATION_XML",
"APPLET",
"AREA",
"ARTICLE",
"ASIDE",
"AUDIO",
"B",
"BASE",
"BASEFONT",
"BDI",
"BDO",
"BGSOUND",
"BIG",
"BLINK",
"BLOCKQUOTE",
"BODY",
"BR",
"BUTTON",
"CANVAS",
"CAPTION",
"CENTER",
"CIRCLE",
"CITE",
"CLIPPATH",
"CODE",
"COL",
"COLGROUP",
"COLOR_PROFILE",
"CURSOR",
"DATA",
"DATALIST",
"DD",
"DEFS",
"DEL",
"DESC",
"DETAILS",
"DFN",
"DIR",
"DIV",
"DL",
"DT",
"ELLIPSE",
"EM",
"EMBED",
"FEBLEND",
"FECOLORMATRIX",
"FECOMPONENTTRANSFER",
"FECOMPOSITE",
"FECONVOLVEMATRIX",
"FEDIFFUSELIGHTING",
"FEDISPLACEMENTMAP",
"FEDISTANTLIGHT",
"FEFLOOD",
"FEFUNCA",
"FEFUNCB",
"FEFUNCG",
"FEFUNCR",
"FEGAUSSIANBLUR",
"FEIMAGE",
"FEMERGE",
"FEMERGENODE",
"FEMORPHOLOGY",
"FEOFFSET",
"FEPOINTLIGHT",
"FESPECTACTUALRLIGHTING",
"FESPOTLIGHT",
"FETILE",
"FETURBULENCE",
"FIELDSET",
"FIGCAPTION",
"FIGURE",
"FILTER",
"FONT",
"FONT_FACE",
"FONT_FACE_FORMAT",
"FONT_FACE_NAME",
"FONT_FACE_SRC",
"FONT_FACE_URI",
"FOOTER",
"FOREIGNOBJECT",
"FORM",
"FRAME",
"FRAMESET",
"G",
"GLYPH",
"GLYPHREF",
"H1",
"H2",
"H3",
"H4",
"H5",
"H6",
"HEAD",
"HEADER",
"HGROUP",
"HKERN",
"HR",
"HTML",
"I",
"IFRAME",
"IMAGE",
"IMG",
"INPUT",
"INS",
"ISINDEX",
"KBD",
"KEYGEN",
"LABEL",
"LEGEND",
"LI",
"LINE",
"LINEARGRADIENT",
"LINK",
"LISTING",
"MACTION",
"MAIN",
"MALIGNGROUP",
"MALIGNMARK",
"MAP",
"MARK",
"MARKER",
"MARQUEE",
"MASK",
"MATH",
"MENCLOSE",
"MENU",
"MENUITEM",
"MERROR",
"META",
"METADATA",
"METER",
"MFENCED",
"MFRAC",
"MGLYPH",
"MI",
"MISSING_GLYPH",
"MLABELEDTR",
"MLONGDIV",
"MMULTISCRIPTS",
"MN",
"MO",
"MOVER",
"MPADDED",
"MPATH",
"MPHANTOM",
"MPRESCRIPTS",
"MROOT",
"MROW",
"MS",
"MSCARRIES",
"MSCARRY",
"MSGROUP",
"MSLINE",
"MSPACE",
"MSQRT",
"MSROW",
"MSTACK",
"MSTYLE",
"MSUB",
"MSUBSUP",
"MSUP",
"MTABLE",
"MTD",
"MTEXT",
"MTR",
"MULTICOL",
"MUNDER",
"MUNDEROVER",
"NAV",
"NEXTID",
"NOBR",
"NOEMBED",
"NOFRAMES",
"NONE",
"NOSCRIPT",
"OBJECT",
"OL",
"OPTGROUP",
"OPTION",
"OUTPUT",
"P",
"PARAM",
"PATH",
"PATTERN",
"PLAINTEXT",
"POLYGON",
"POLYLINE",
"PRE",
"PROGRESS",
"Q",
"RADIALGRADIENT",
"RB",
"RECT",
"RP",
"RT",
"RTC",
"RUBY",
"S",
"SAMP",
"SCRIPT",
"SECTION",
"SELECT",
"SEMANTICS",
"SET",
"SMALL",
"SOURCE",
"SPACER",
"SPAN",
"STOP",
"STRIKE",
"STRONG",
"STYLE",
"SUB",
"SUMMARY",
"SUP",
"SVG",
"SWITCH",
"SYMBOL",
"TABLE",
"TBODY",
"TD",
"TEMPLATE",
"TEXT",
"TEXTPATH",
"TEXTAREA",
"TFOOT",
"TH",
"THEAD",
"TIME",
"TITLE",
"TR",
"TRACK",
"TREF",
"TSPAN",
"TT",
"U",
"UL",
"USE",
"VAR",
"VIDEO",
"VIEW",
"VKERN",
"WBR",
"XMP",
]
| gpl-3.0 |
chamikaramj/beam | sdks/python/apache_beam/transforms/write_ptransform_test.py | 9 | 4178 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the write transform."""
import logging
import unittest
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.util import assert_that, is_empty
class _TestSink(iobase.Sink):
TEST_INIT_RESULT = 'test_init_result'
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
def initialize_write(self):
if self.return_init_result:
return _TestSink.TEST_INIT_RESULT
def finalize_write(self, init_result, writer_results):
self.init_result_at_finalize = init_result
self.write_results_at_finalize = writer_results
def open_writer(self, init_result, uid):
writer = _TestWriter(init_result, uid, self.return_write_results)
return writer
class _TestWriter(iobase.Writer):
STATE_UNSTARTED, STATE_WRITTEN, STATE_CLOSED = 0, 1, 2
TEST_WRITE_RESULT = 'test_write_result'
def __init__(self, init_result, uid, return_write_results=True):
self.state = _TestWriter.STATE_UNSTARTED
self.init_result = init_result
self.uid = uid
self.write_output = []
self.return_write_results = return_write_results
def close(self):
assert self.state in (
_TestWriter.STATE_WRITTEN, _TestWriter.STATE_UNSTARTED)
self.state = _TestWriter.STATE_CLOSED
if self.return_write_results:
return _TestWriter.TEST_WRITE_RESULT
def write(self, value):
if self.write_output:
assert self.state == _TestWriter.STATE_WRITTEN
else:
assert self.state == _TestWriter.STATE_UNSTARTED
self.state = _TestWriter.STATE_WRITTEN
self.write_output.append(value)
class WriteToTestSink(PTransform):
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
self.last_sink = None
self.label = 'write_to_test_sink'
def expand(self, pcoll):
self.last_sink = _TestSink(return_init_result=self.return_init_result,
return_write_results=self.return_write_results)
return pcoll | beam.io.Write(self.last_sink)
class WriteTest(unittest.TestCase):
DATA = ['some data', 'more data', 'another data', 'yet another data']
def _run_write_test(self,
data,
return_init_result=True,
return_write_results=True):
write_to_test_sink = WriteToTestSink(return_init_result,
return_write_results)
p = TestPipeline()
result = p | beam.Create(data) | write_to_test_sink | beam.Map(list)
assert_that(result, is_empty())
p.run()
sink = write_to_test_sink.last_sink
self.assertIsNotNone(sink)
def test_write(self):
self._run_write_test(WriteTest.DATA)
def test_write_with_empty_pcollection(self):
data = []
self._run_write_test(data)
def test_write_no_init_result(self):
self._run_write_test(WriteTest.DATA, return_init_result=False)
def test_write_no_write_results(self):
self._run_write_test(WriteTest.DATA, return_write_results=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
rjsproxy/wagtail | wagtail/wagtailadmin/tests/test_widgets.py | 10 | 2561 | from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from wagtail.wagtailadmin import widgets
from wagtail.wagtailcore.models import Page
from wagtail.tests.testapp.models import SimplePage, EventPage
class TestAdminPageChooserWidget(TestCase):
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(
title="foobarbaz",
slug="foobarbaz",
)
self.root_page.add_child(instance=self.child_page)
def test_render_html(self):
widget = widgets.AdminPageChooser()
html = widget.render_html('test', None, {})
self.assertIn("<input name=\"test\" type=\"hidden\" />", html)
def test_render_js_init(self):
widget = widgets.AdminPageChooser()
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"wagtailcore.page\"], null);")
def test_render_html_with_value(self):
widget = widgets.AdminPageChooser()
html = widget.render_html('test', self.child_page, {})
self.assertIn("<input name=\"test\" type=\"hidden\" value=\"%d\" />" % self.child_page.id, html)
def test_render_js_init_with_value(self):
widget = widgets.AdminPageChooser()
js_init = widget.render_js_init('test-id', 'test', self.child_page)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"wagtailcore.page\"], %d);" % self.root_page.id)
# def test_render_html_init_with_content_type omitted as HTML does not
# change when selecting a content type
def test_render_js_init_with_content_type(self):
content_type = ContentType.objects.get_for_model(SimplePage)
widget = widgets.AdminPageChooser(content_type=content_type)
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"tests.simplepage\"], null);")
def test_render_js_init_with_multiple_content_types(self):
content_types = [
# Not using get_for_models as we need deterministic ordering
ContentType.objects.get_for_model(SimplePage),
ContentType.objects.get_for_model(EventPage),
]
widget = widgets.AdminPageChooser(content_type=content_types)
js_init = widget.render_js_init('test-id', 'test', None)
self.assertEqual(js_init, "createPageChooser(\"test-id\", [\"tests.simplepage\", \"tests.eventpage\"], null);")
| bsd-3-clause |
imsparsh/python-for-android | python3-alpha/python3-src/Lib/importlib/test/extension/test_case_sensitivity.py | 50 | 1152 | import sys
from test import support
import unittest
from importlib import _bootstrap
from .. import util
from . import util as ext_util
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(unittest.TestCase):
def find_module(self):
good_name = ext_util.NAME
bad_name = good_name.upper()
assert good_name != bad_name
finder = _bootstrap._FileFinder(ext_util.PATH,
_bootstrap._ExtensionFinderDetails())
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
def test_main():
if ext_util.FILENAME is None:
return
support.run_unittest(ExtensionModuleCaseSensitivityTest)
if __name__ == '__main__':
test_main()
| apache-2.0 |
nathanielvarona/airflow | airflow/migrations/versions/4446e08588_dagrun_start_end.py | 8 | 1400 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""dagrun start end
Revision ID: 4446e08588
Revises: 561833c1c74b
Create Date: 2015-12-10 11:26:18.439223
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4446e08588'
down_revision = '561833c1c74b'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.add_column('dag_run', sa.Column('end_date', sa.DateTime(), nullable=True))
op.add_column('dag_run', sa.Column('start_date', sa.DateTime(), nullable=True))
def downgrade(): # noqa: D103
op.drop_column('dag_run', 'start_date')
op.drop_column('dag_run', 'end_date')
| apache-2.0 |
Dhivyap/ansible | test/units/modules/storage/netapp/test_na_ontap_vscan_scanner_pool.py | 38 | 6534 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_vscan_scanner_pool '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vscan_scanner_pool \
import NetAppOntapVscanScannerPool as scanner_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'scanner':
xml = self.build_scanner_pool_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_scanner_pool_info(sanner_details):
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'vscan-scanner-pool-info': {
'scanner-pool': sanner_details['scanner_pool'],
'scanner-policy': sanner_details['scanner_policy']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_scanner = {
'state': 'present',
'scanner_pool': 'test_pool',
'vserver': 'test_vserver',
'hostnames': ['host1', 'host2'],
'privileged_users': ['domain\\admin', 'domain\\carchi8py'],
'scanner_policy': 'primary'
}
def mock_args(self):
return {
'state': self.mock_scanner['state'],
'scanner_pool': self.mock_scanner['scanner_pool'],
'vserver': self.mock_scanner['vserver'],
'hostnames': self.mock_scanner['hostnames'],
'privileged_users': self.mock_scanner['privileged_users'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'scanner_policy': self.mock_scanner['scanner_policy']
}
def get_scanner_mock_object(self, kind=None):
scanner_obj = scanner_module()
scanner_obj.asup_log_for_cserver = Mock(return_value=None)
if kind is None:
scanner_obj.server = MockONTAPConnection()
else:
scanner_obj.server = MockONTAPConnection(kind='scanner', data=self.mock_scanner)
return scanner_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
scanner_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object().get_scanner_pool()
assert not result
def test_get_existing_scanner(self):
''' Test if get_scanner_pool returns None for non-existent job '''
set_module_args(self.mock_args())
result = self.get_scanner_mock_object('scanner').get_scanner_pool()
assert result
def test_successfully_create(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert not exc.value.args[0]['changed']
def test_apply_policy(self):
data = self.mock_args()
data['scanner_policy'] = 'secondary'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_successfully_delete(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object('scanner').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_scanner_mock_object().apply()
assert not exc.value.args[0]['changed']
| gpl-3.0 |
ReachingOut/unisubs | bin/update-integration.py | 4 | 2255 | #!/usr/bin/env python
import optparse
import os
import sys
import subprocess
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def optional_dir():
return os.path.join(root_dir, 'optional')
def repo_dir(repo_name):
return os.path.join(root_dir, repo_name)
def get_repo_names():
return [f for f in os.listdir(optional_dir())
if not f.startswith(".")]
def get_repo_commit(repo_name):
path = os.path.join(optional_dir(), repo_name)
return open(path).read().strip()
def run_command(*args):
subprocess.check_call(args, stdout=open(os.devnull, 'w'))
def run_git_clone(repo_name):
os.chdir(root_dir)
url = "[email protected]:pculture/{0}.git".format(repo_name)
print "{0}: cloning".format(repo_name)
run_command("git", "clone", url)
commit_id = get_repo_commit(repo_name)
os.chdir(repo_dir(repo_name))
print "{0}: reset to {1}".format(repo_name, commit_id)
run_command("git", "reset", "--hard", commit_id)
def run_git_reset(repo_name, skip_fetch):
os.chdir(repo_dir(repo_name))
if not skip_fetch:
print "{0}: fetching".format(repo_name)
run_command("git", "fetch", "origin")
else:
print "{0}: skipping fetch".format(repo_name)
commit_id = get_repo_commit(repo_name)
print "{0} reset to {1}".format(repo_name, commit_id)
run_command("git", "reset", "--hard", commit_id)
def make_option_parser():
parser = optparse.OptionParser()
parser.add_option("--skip-fetch'", dest="skip_fetch",
action='store_true', help="don't run git fetch")
parser.add_option("--clone-missing", dest="clone_missing",
action='store_true', help="clone missing repositories")
return parser
def main(argv):
parser = make_option_parser()
(options, args) = parser.parse_args(argv)
for repo_name in get_repo_names():
if os.path.exists(repo_dir(repo_name)):
run_git_reset(repo_name, options.skip_fetch)
elif options.clone_missing:
run_git_clone(repo_name)
else:
print ("{0}: directory doesn't exist use --clone-missing "
"to create it".format(repo_name))
if __name__ == '__main__':
main(sys.argv)
| agpl-3.0 |
benschulz/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/wrong_handshake_sig_wsh.py | 499 | 1859 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_do_extra_handshake signature.
"""
def no_web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_handshake_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
tensorflow/agents | tf_agents/policies/random_tf_policy.py | 1 | 7986 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy implementation that generates random actions."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import cast
import tensorflow as tf
from tf_agents.distributions import masked
from tf_agents.policies import tf_policy
from tf_agents.policies import utils as policy_utilities
from tf_agents.specs import bandit_spec_utils
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import nest_utils
def _calculate_log_probability(outer_dims, action_spec):
"""Helper function for calculating log prob of a uniform distribution.
Each item in the returned tensor will be equal to:
|action_spec.shape| * log_prob_of_each_component_of_action_spec.
Note that this method expects the same value for all outer_dims because
we're sampling uniformly from the same distribution for each batch row.
Args:
outer_dims: TensorShape.
action_spec: BoundedTensorSpec.
Returns:
A tensor of type float32 with shape outer_dims.
"""
# Equivalent of what a tfp.distribution.Categorical would return.
if action_spec.dtype.is_integer:
log_prob = -tf.math.log(action_spec.maximum - action_spec.minimum + 1.0)
# Equivalent of what a tfp.distribution.Uniform would return.
else:
log_prob = -tf.math.log(action_spec.maximum - action_spec.minimum)
# Note that log_prob may be a vector. We first reduce it to a scalar, and then
# adjust by the number of times that vector is repeated in action_spec.
log_prob = tf.reduce_sum(log_prob) * (
action_spec.shape.num_elements() / log_prob.shape.num_elements())
# Regardless of the type of the action, the log_prob should be float32.
return tf.cast(tf.fill(outer_dims, log_prob), tf.float32)
# TODO(b/161005095): Refactor into RandomTFPolicy and RandomBanditTFPolicy.
class RandomTFPolicy(tf_policy.TFPolicy):
"""Returns random samples of the given action_spec.
Note: the values in the info_spec (except for the log_probability) are random
values that have nothing to do with the emitted actions.
Note: The returned info.log_probabiliy will be an object matching the
structure of action_spec, where each value is a tensor of size [batch_size].
"""
def __init__(self, time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec, *args, **kwargs):
observation_and_action_constraint_splitter = (
kwargs.get('observation_and_action_constraint_splitter', None))
self._accepts_per_arm_features = (
kwargs.pop('accepts_per_arm_features', False))
if observation_and_action_constraint_splitter is not None:
if not isinstance(action_spec, tensor_spec.BoundedTensorSpec):
raise NotImplementedError(
'RandomTFPolicy only supports action constraints for '
'BoundedTensorSpec action specs.')
action_spec = tensor_spec.from_spec(action_spec)
action_spec = cast(tensor_spec.BoundedTensorSpec, action_spec)
scalar_shape = action_spec.shape.rank == 0
single_dim_shape = (
action_spec.shape.rank == 1 and action_spec.shape.dims == [1])
if not scalar_shape and not single_dim_shape:
raise NotImplementedError(
'RandomTFPolicy only supports action constraints for action specs '
'shaped as () or (1,) or their equivalent list forms.')
super(RandomTFPolicy, self).__init__(time_step_spec, action_spec, *args,
**kwargs)
def _variables(self):
return []
def _action(self, time_step, policy_state, seed):
observation_and_action_constraint_splitter = (
self.observation_and_action_constraint_splitter)
outer_dims = nest_utils.get_outer_shape(time_step, self._time_step_spec)
if observation_and_action_constraint_splitter is not None:
observation, mask = observation_and_action_constraint_splitter(
time_step.observation)
action_spec = tensor_spec.from_spec(self.action_spec)
action_spec = cast(tensor_spec.BoundedTensorSpec, action_spec)
zero_logits = tf.cast(tf.zeros_like(mask), tf.float32)
masked_categorical = masked.MaskedCategorical(zero_logits, mask)
action_ = tf.cast(masked_categorical.sample() + action_spec.minimum,
action_spec.dtype)
# If the action spec says each action should be shaped (1,), add another
# dimension so the final shape is (B, 1) rather than (B,).
if action_spec.shape.rank == 1:
action_ = tf.expand_dims(action_, axis=-1)
policy_info = tensor_spec.sample_spec_nest(
self._info_spec, outer_dims=outer_dims)
else:
observation = time_step.observation
action_spec = cast(tensor_spec.BoundedTensorSpec, self.action_spec)
if self._accepts_per_arm_features:
max_num_arms = action_spec.maximum - action_spec.minimum + 1
batch_size = tf.shape(time_step.step_type)[0]
num_actions = observation.get(
bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY,
tf.ones(shape=(batch_size,), dtype=tf.int32) * max_num_arms)
mask = tf.sequence_mask(num_actions, max_num_arms)
zero_logits = tf.cast(tf.zeros_like(mask), tf.float32)
masked_categorical = masked.MaskedCategorical(zero_logits, mask)
action_ = tf.nest.map_structure(
lambda t: tf.cast(masked_categorical.sample() + t.minimum, t.dtype),
action_spec)
else:
action_ = tensor_spec.sample_spec_nest(
self._action_spec, seed=seed, outer_dims=outer_dims)
policy_info = tensor_spec.sample_spec_nest(
self._info_spec, outer_dims=outer_dims)
# Update policy info with chosen arm features.
if self._accepts_per_arm_features:
def _gather_fn(t):
return tf.gather(params=t, indices=action_, batch_dims=1)
chosen_arm_features = tf.nest.map_structure(
_gather_fn, observation[bandit_spec_utils.PER_ARM_FEATURE_KEY])
if policy_utilities.has_chosen_arm_features(self._info_spec):
policy_info = policy_info._replace(
chosen_arm_features=chosen_arm_features)
# TODO(b/78181147): Investigate why this control dependency is required.
if time_step is not None:
with tf.control_dependencies(tf.nest.flatten(time_step)):
action_ = tf.nest.map_structure(tf.identity, action_)
if self.emit_log_probability:
if (self._accepts_per_arm_features
or observation_and_action_constraint_splitter is not None):
action_spec = cast(tensor_spec.BoundedTensorSpec, self.action_spec)
log_probability = masked_categorical.log_prob(
action_ - action_spec.minimum)
else:
log_probability = tf.nest.map_structure(
lambda s: _calculate_log_probability(outer_dims, s),
self._action_spec)
policy_info = policy_step.set_log_probability(policy_info,
log_probability)
step = policy_step.PolicyStep(action_, policy_state, policy_info)
return step
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'RandomTFPolicy does not support distributions yet.')
| apache-2.0 |
jordiclariana/ansible | lib/ansible/module_utils/asa.py | 14 | 4321 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Peter Sprygada, <[email protected]>
# Copyright (c) 2016 Patrick Ogenstad, <@ogenstad>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils.network import NetworkError, NetworkModule
from ansible.module_utils.network import add_argument, register_transport
from ansible.module_utils.network import to_list
from ansible.module_utils.shell import CliBase
from ansible.module_utils.netcli import Command
add_argument('context', dict(required=False))
class Cli(CliBase):
CLI_PROMPTS_RE = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
CLI_ERRORS_RE = [
re.compile(r"error:", re.I),
]
NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
def __init__(self, *args, **kwargs):
super(Cli, self).__init__(*args, **kwargs)
def connect(self, params, **kwargs):
super(Cli, self).connect(params, kickstart=False, **kwargs)
if params['context']:
self.change_context(params, **kwargs)
def authorize(self, params, **kwargs):
passwd = params['auth_pass']
errors = self.shell.errors
# Disable errors (if already in enable mode)
self.shell.errors = []
cmd = Command('enable', prompt=self.NET_PASSWD_RE, response=passwd)
self.execute([cmd, 'no terminal pager'])
# Reapply error handling
self.shell.errors = errors
def change_context(self, params):
context = params['context']
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
self.execute(command)
### Config methods ###
def configure(self, commands):
cmds = ['configure terminal']
cmds.extend(to_list(commands))
if cmds[-1] == 'exit':
cmds[-1] = 'end'
elif cmds[-1] != 'end':
cmds.append('end')
responses = self.execute(cmds)
return responses[1:]
def get_config(self, include=None):
if include not in [None, 'defaults', 'passwords']:
raise ValueError('include must be one of None, defaults, passwords')
cmd = 'show running-config'
if include == 'passwords':
cmd = 'more system:running-config'
elif include == 'defaults':
cmd = 'show running-config all'
else:
cmd = 'show running-config'
return self.run_commands(cmd)[0]
def load_config(self, commands):
return self.configure(commands)
def save_config(self):
self.execute(['write memory'])
Cli = register_transport('cli', default=True)(Cli)
| gpl-3.0 |
SciTools/mo_pack | lib/mo_pack/tests/test_wgdos.py | 3 | 3859 | # (C) British Crown Copyright 2015, Met Office
#
# This file is part of mo_pack.
#
# mo_pack is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mo_pack is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with mo_pack. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for the `mo_pack.compress_wgdos` and `mo_pack.decompress_wgdos`
functions.
"""
from __future__ import absolute_import, division, print_function
import os
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import mo_pack
class TestPackWGDOS(unittest.TestCase):
def assert_equal_when_decompressed(self, compressed_data, expected_array,
mdi=0):
x, y = expected_array.shape
decompressed_data = mo_pack.decompress_wgdos(
compressed_data, x, y, mdi)
np.testing.assert_array_equal(decompressed_data, expected_array)
def test_pack_wgdos(self):
data = np.arange(42, dtype=np.float32).reshape(7, 6)
compressed_data = mo_pack.compress_wgdos(data)
self.assert_equal_when_decompressed(compressed_data, data)
def test_mdi(self):
data = np.arange(12, dtype=np.float32).reshape(3, 4)
compressed_data = mo_pack.compress_wgdos(data,
missing_data_indicator=4.0)
expected_data = data
data[1, 0] = 4.0
self.assert_equal_when_decompressed(compressed_data, data, mdi=4.0)
def test_accuracy(self):
data = np.array([[0.1234, 0.2345, 0.3456], [0.4567, 0.5678, 0.6789]],
dtype=np.float32)
compressed = mo_pack.compress_wgdos(data, accuracy=-4)
decompressed_data = mo_pack.decompress_wgdos(compressed, 2, 3)
expected = np.array([[0.12340003, 0.18590003, 0.34560001],
[0.40810001, 0.56779999, 0.63029999]],
dtype=np.float32)
np.testing.assert_array_equal(decompressed_data, expected)
class TestdecompressWGDOS(unittest.TestCase):
def test_incorrect_size(self):
data = np.arange(77, dtype=np.float32).reshape(7, 11)
compressed_data = mo_pack.compress_wgdos(data)
with self.assertRaises(ValueError):
decompressed_data = mo_pack.decompress_wgdos(compressed_data, 5, 6)
def test_different_shape(self):
data = np.arange(24, dtype=np.float32).reshape(8, 3)
compressed_data = mo_pack.compress_wgdos(data)
decompressed_data = mo_pack.decompress_wgdos(compressed_data, 4, 6)
np.testing.assert_array_equal(decompressed_data, data.reshape(4, 6))
def test_real_data(self):
test_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(test_dir, 'test_data',
'nae.20100104-06_0001_0001.pp')
with open(fname, 'rb') as fh:
fh.seek(268)
data = mo_pack.decompress_wgdos(fh.read(339464), 360, 600)
assert_almost_equal(data.mean(), 130.84694, decimal=1)
expected = [[388.78125, 389.46875, 384.0625, 388.46875],
[388.09375, 381.375, 374.28125, 374.875],
[382.34375, 373.671875, 371.171875, 368.25],
[385.265625, 373.921875, 368.5, 365.3125]]
assert_array_equal(data[:4, :4], expected)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
40223119/2015w13 | static/Brython3.1.3-20150514-095342/Lib/unittest/main.py | 739 | 10385 | """Unittest main program"""
import sys
import optparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = warnings = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if ((len(argv) > 1 and argv[1].lower() == 'discover') or
(len(argv) == 1 and self.module is None)):
self._do_discovery(argv[2:])
return
parser = self._getOptParser()
options, args = parser.parse_args(argv[1:])
self._setAttributesFromOptions(options)
if len(args) == 0 and self.module is None:
# this allows "python -m unittest -v" to still work for
# test discovery. This means -c / -b / -v / -f options will
# be handled twice, which is harmless but not ideal.
self._do_discovery(argv[1:])
return
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = _convert_names(args)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _getOptParser(self):
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', default=False,
help='Quiet output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
return parser
def _setAttributesFromOptions(self, options):
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
elif options.quiet:
self.verbosity = 0
def _addDiscoveryOptions(self, parser):
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
parser = self._getOptParser()
self._addDiscoveryOptions(parser)
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
self._setAttributesFromOptions(options)
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| gpl-3.0 |
NewCell/Call-Text-v1 | jni/pjsip/sources/tests/cdash/cfg_site_sample.py | 107 | 1530 | #
# cfg_site_sample.py - Sample site configuration
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import builder
# Your site name
SITE_NAME="Newham3"
# The URL where tests will be submitted to
URL = "http://192.168.0.2/dash/submit.php?project=PJSIP"
# Test group
GROUP = "Experimental"
# PJSIP base directory
BASE_DIR = "/root/project/pjproject"
# List of additional ccdash options
#OPTIONS = ["-o", "out.xml", "-y"]
OPTIONS = []
# What's the content of config_site.h
CONFIG_SITE = ""
# What's the content of user.mak
USER_MAK = ""
# List of regular expression of test patterns to be excluded
EXCLUDE = []
# List of regular expression of test patterns to be included (even
# if they match EXCLUDE patterns)
NOT_EXCLUDE = []
#"configure", "update", "build.*make", "build", "run.py mod_run.*100_simple"]
| gpl-3.0 |
Yannig/ansible-modules-core | cloud/amazon/rds.py | 7 | 40737 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take.
required: true
default: null
aliases: []
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
aliases: []
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
aliases: []
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
aliases: []
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
aliases: []
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
aliases: []
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
aliases: []
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
aliases: []
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
aliases: []
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
aliases: []
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
aliases: []
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
aliases: []
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
aliases: []
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
aliases: []
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
aliases: []
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
aliases: []
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
aliases: []
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
aliases: []
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
aliases: []
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
aliases: []
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
aliases: []
version_added: 1.5
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
aliases: []
version_added: 1.9
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
aliases: []
version_added: 1.9
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
aliases: []
version_added: 1.9
requirements:
- "python >= 2.6"
- "boto"
author: Bruce Pennypacker, Will Thames
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError, e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound, e:
return None
except Exception, e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Endpoint exists only if the instance is available
if self.status == 'available':
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if self.status == 'available':
d['endpoint'] = self.instance["Endpoint"]["Address"]
d['port'] = self.instance["Endpoint"]["Port"]
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for resource %s" % resource.id)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="Problem with snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="Problem with instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException, e:
module.fail_json(msg="failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException, e:
module.fail_json(msg="failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException, e:
module.fail_json(msg="failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException, e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception, e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result.get_data().get('replication_source'):
changed = False
else:
try:
result = conn.promote_read_replica(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.")
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
mick-d/nipype_source | nipype/interfaces/slicer/surface.py | 13 | 16200 | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class MergeModelsInputSpec(CommandLineInputSpec):
Model1 = File(position=-3, desc="Model", exists=True, argstr="%s")
Model2 = File(position=-2, desc="Model", exists=True, argstr="%s")
ModelOutput = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Model", argstr="%s")
class MergeModelsOutputSpec(TraitedSpec):
ModelOutput = File(position=-1, desc="Model", exists=True)
class MergeModels(SEMLikeCommandLine):
"""title: Merge Models
category: Surface Models
description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files.
version: $Revision$
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = MergeModelsInputSpec
output_spec = MergeModelsOutputSpec
_cmd = "MergeModels "
_outputs_filenames = {'ModelOutput':'ModelOutput.vtk'}
class ModelToLabelMapInputSpec(CommandLineInputSpec):
distance = traits.Float(desc="Sample distance", argstr="--distance %f")
InputVolume = File(position=-3, desc="Input volume", exists=True, argstr="%s")
surface = File(position=-2, desc="Model", exists=True, argstr="%s")
OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="The label volume", argstr="%s")
class ModelToLabelMapOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="The label volume", exists=True)
class ModelToLabelMap(SEMLikeCommandLine):
"""title: Model To Label Map
category: Surface Models
description: Intersects an input model with an reference volume and produces an output label map.
version: 0.1.0.$Revision: 8643 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap
contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelToLabelMapInputSpec
output_spec = ModelToLabelMapOutputSpec
_cmd = "ModelToLabelMap "
_outputs_filenames = {'OutputVolume':'OutputVolume.nii'}
class GrayscaleModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(position=-2, desc="Volume containing the input grayscale data.", exists=True, argstr="%s")
OutputGeometry = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output that contains geometry model.", argstr="%s")
threshold = traits.Float(desc="Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", argstr="--threshold %f")
name = traits.Str(desc="Name to use for this model.", argstr="--name %s")
smooth = traits.Int(desc="Number of smoothing iterations. If 0, no smoothing will be done.", argstr="--smooth %d")
decimate = traits.Float(desc="Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", argstr="--decimate %f")
splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", argstr="--splitnormals ")
pointnormals = traits.Bool(desc="Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", argstr="--pointnormals ")
class GrayscaleModelMakerOutputSpec(TraitedSpec):
OutputGeometry = File(position=-1, desc="Output that contains geometry model.", exists=True)
class GrayscaleModelMaker(SEMLikeCommandLine):
"""title: Grayscale Model Maker
category: Surface Models
description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data.
version: 3.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker
license: slicer3
contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = GrayscaleModelMakerInputSpec
output_spec = GrayscaleModelMakerOutputSpec
_cmd = "GrayscaleModelMaker "
_outputs_filenames = {'OutputGeometry':'OutputGeometry.vtk'}
class ProbeVolumeWithModelInputSpec(CommandLineInputSpec):
InputVolume = File(position=-3, desc="Volume to use to 'paint' the model", exists=True, argstr="%s")
InputModel = File(position=-2, desc="Input model", exists=True, argstr="%s")
OutputModel = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output 'painted' model", argstr="%s")
class ProbeVolumeWithModelOutputSpec(TraitedSpec):
OutputModel = File(position=-1, desc="Output 'painted' model", exists=True)
class ProbeVolumeWithModel(SEMLikeCommandLine):
"""title: Probe Volume With Model
category: Surface Models
description: Paint a model by a volume (using vtkProbeFilter).
version: 0.1.0.$Revision: 1892 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel
contributor: Lauren O'Donnell (SPL, BWH)
acknowledgements: BWH, NCIGT/LMI
"""
input_spec = ProbeVolumeWithModelInputSpec
output_spec = ProbeVolumeWithModelOutputSpec
_cmd = "ProbeVolumeWithModel "
_outputs_filenames = {'OutputModel':'OutputModel.vtk'}
class LabelMapSmoothingInputSpec(CommandLineInputSpec):
labelToSmooth = traits.Int(desc="The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", argstr="--labelToSmooth %d")
numberOfIterations = traits.Int(desc="The number of iterations of the level set AntiAliasing algorithm", argstr="--numberOfIterations %d")
maxRMSError = traits.Float(desc="The maximum RMS error.", argstr="--maxRMSError %f")
gaussianSigma = traits.Float(desc="The standard deviation of the Gaussian kernel", argstr="--gaussianSigma %f")
inputVolume = File(position=-2, desc="Input label map to smooth", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Smoothed label map", argstr="%s")
class LabelMapSmoothingOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Smoothed label map", exists=True)
class LabelMapSmoothing(SEMLikeCommandLine):
"""title: Label Map Smoothing
category: Surface Models
description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map.
version: 1.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing
contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research.
"""
input_spec = LabelMapSmoothingInputSpec
output_spec = LabelMapSmoothingOutputSpec
_cmd = "LabelMapSmoothing "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class ModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(position=-1, desc="Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", exists=True, argstr="%s")
color = File(desc="Color table to make labels to colors and objects", exists=True, argstr="--color %s")
modelSceneFile = traits.Either(traits.Bool, InputMultiPath(File(), ), hash_files=False, desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", argstr="--modelSceneFile %s...")
name = traits.Str(desc="Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", argstr="--name %s")
generateAll = traits.Bool(desc="Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", argstr="--generateAll ")
labels = InputMultiPath(traits.Int, desc="A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", sep=",", argstr="--labels %s")
start = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", argstr="--start %d")
end = traits.Int(desc="If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", argstr="--end %d")
skipUnNamed = traits.Bool(desc="Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", argstr="--skipUnNamed ")
jointsmooth = traits.Bool(desc="This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", argstr="--jointsmooth ")
smooth = traits.Int(desc="Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", argstr="--smooth %d")
filtertype = traits.Enum("Sinc", "Laplacian", desc="You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", argstr="--filtertype %s")
decimate = traits.Float(desc="Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", argstr="--decimate %f")
splitnormals = traits.Bool(desc="Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", argstr="--splitnormals ")
pointnormals = traits.Bool(desc="Turn this flag on if you wish to calculate the normal vectors for the points.", argstr="--pointnormals ")
pad = traits.Bool(desc="Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", argstr="--pad ")
saveIntermediateModels = traits.Bool(desc="You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", argstr="--saveIntermediateModels ")
debug = traits.Bool(desc="turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", argstr="--debug ")
class ModelMakerOutputSpec(TraitedSpec):
modelSceneFile = OutputMultiPath(File(exists=True), desc="Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", exists=True)
class ModelMaker(SEMLikeCommandLine):
"""title: Model Maker
category: Surface Models
description: Create 3D surface models from segmented data.<p>Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).</p><p><b>Create Multiple:</b></p><p>If you specify a list of Labels, it will over ride any start/end label settings.</p><p>If you click<i>Generate All</i>it will over ride the list of lables and any start/end label settings.</p><p><b>Model Maker Settings:</b></p><p>You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.<br>You can set the flags to split normals or generate point normals in this pane as well.<br>You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:<br><i>slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()</i></p>
version: 4.1
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker
license: slicer4
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelMakerInputSpec
output_spec = ModelMakerOutputSpec
_cmd = "ModelMaker "
_outputs_filenames = {'modelSceneFile':'modelSceneFile.mrml'}
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/toolz/curried/__init__.py | 7 | 2615 | """
Alternate namespace for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)]
>>> list(map(lambda seq: get(0, seq), data))
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> list(map(get(0), data))
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from . import operator
from toolz import (
comp,
complement,
compose,
concat,
concatv,
count,
curry,
diff,
dissoc,
first,
flip,
frequencies,
identity,
interleave,
isdistinct,
isiterable,
juxt,
last,
memoize,
merge_sorted,
peek,
pipe,
second,
thread_first,
thread_last,
)
from .exceptions import merge, merge_with
accumulate = toolz.curry(toolz.accumulate)
assoc = toolz.curry(toolz.assoc)
assoc_in = toolz.curry(toolz.assoc_in)
cons = toolz.curry(toolz.cons)
countby = toolz.curry(toolz.countby)
do = toolz.curry(toolz.do)
drop = toolz.curry(toolz.drop)
excepts = toolz.curry(toolz.excepts)
filter = toolz.curry(toolz.filter)
get = toolz.curry(toolz.get)
get_in = toolz.curry(toolz.get_in)
groupby = toolz.curry(toolz.groupby)
interpose = toolz.curry(toolz.interpose)
itemfilter = toolz.curry(toolz.itemfilter)
itemmap = toolz.curry(toolz.itemmap)
iterate = toolz.curry(toolz.iterate)
join = toolz.curry(toolz.join)
keyfilter = toolz.curry(toolz.keyfilter)
keymap = toolz.curry(toolz.keymap)
map = toolz.curry(toolz.map)
mapcat = toolz.curry(toolz.mapcat)
nth = toolz.curry(toolz.nth)
partial = toolz.curry(toolz.partial)
partition = toolz.curry(toolz.partition)
partition_all = toolz.curry(toolz.partition_all)
partitionby = toolz.curry(toolz.partitionby)
pluck = toolz.curry(toolz.pluck)
random_sample = toolz.curry(toolz.random_sample)
reduce = toolz.curry(toolz.reduce)
reduceby = toolz.curry(toolz.reduceby)
remove = toolz.curry(toolz.remove)
sliding_window = toolz.curry(toolz.sliding_window)
sorted = toolz.curry(toolz.sorted)
tail = toolz.curry(toolz.tail)
take = toolz.curry(toolz.take)
take_nth = toolz.curry(toolz.take_nth)
topk = toolz.curry(toolz.topk)
unique = toolz.curry(toolz.unique)
update_in = toolz.curry(toolz.update_in)
valfilter = toolz.curry(toolz.valfilter)
valmap = toolz.curry(toolz.valmap)
del exceptions
del toolz
| gpl-3.0 |
TeachAtTUM/edx-platform | cms/djangoapps/contentstore/management/commands/reindex_library.py | 18 | 2454 | """ Management command to update libraries' search index """
from __future__ import print_function
from textwrap import dedent
from django.core.management import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from contentstore.courseware_index import LibrarySearchIndexer
from xmodule.modulestore.django import modulestore
from .prompt import query_yes_no
class Command(BaseCommand):
"""
Command to reindex content libraries (single, multiple or all available)
Examples:
./manage.py reindex_library lib1 lib2 - reindexes libraries with keys lib1 and lib2
./manage.py reindex_library --all - reindexes all available libraries
"""
help = dedent(__doc__)
CONFIRMATION_PROMPT = u"Reindexing all libraries might be a time consuming operation. Do you want to continue?"
def add_arguments(self, parser):
parser.add_argument('library_ids', nargs='*')
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Reindex all libraries'
)
def _parse_library_key(self, raw_value):
""" Parses library key from string """
result = CourseKey.from_string(raw_value)
if not isinstance(result, LibraryLocator):
raise CommandError(u"Argument {0} is not a library key".format(raw_value))
return result
def handle(self, *args, **options):
"""
By convention set by django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
if (not options['library_ids'] and not options['all']) or (options['library_ids'] and options['all']):
raise CommandError(u"reindex_library requires one or more <library_id>s or the --all flag.")
store = modulestore()
if options['all']:
if query_yes_no(self.CONFIRMATION_PROMPT, default="no"):
library_keys = [library.location.library_key.replace(branch=None) for library in store.get_libraries()]
else:
return
else:
library_keys = map(self._parse_library_key, options['library_ids'])
for library_key in library_keys:
print("Indexing library {}".format(library_key))
LibrarySearchIndexer.do_library_reindex(store, library_key)
| agpl-3.0 |
CVML/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
appcelerator-developer-relations/appc-sample-ti500 | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
danuzclaudes/robottelo | tests/foreman/ui/test_template.py | 3 | 10224 | # -*- encoding: utf-8 -*-
"""Test class for Template UI"""
from fauxfactory import gen_string
from nailgun import entities
from robottelo.constants import OS_TEMPLATE_DATA_FILE, SNIPPET_DATA_FILE
from robottelo.datafactory import generate_strings_list, invalid_values_list
from robottelo.decorators import run_only_on, tier1, tier2
from robottelo.helpers import get_data_file
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.factory import make_templates
from robottelo.ui.locators import common_locators
from robottelo.ui.session import Session
OS_TEMPLATE_DATA_FILE = get_data_file(OS_TEMPLATE_DATA_FILE)
SNIPPET_DATA_FILE = get_data_file(SNIPPET_DATA_FILE)
class TemplateTestCase(UITestCase):
"""Implements Provisioning Template tests from UI"""
@classmethod
def setUpClass(cls):
super(TemplateTestCase, cls).setUpClass()
cls.organization = entities.Organization().create()
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create new template using different valid names
@Feature: Template - Positive Create
@Assert: New provisioning template of type 'provision' should be
created successfully
"""
with Session(self.browser) as session:
for name in generate_strings_list(length=8):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_name(self):
"""Create a new template with invalid names
@Feature: Template - Negative Create
@Assert: Template is not created
"""
with Session(self.browser) as session:
for name in invalid_values_list(interface='ui'):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_with_same_name(self):
"""Template - Create a new template with same name
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['name_haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_without_type(self):
"""Template - Create a new template without selecting its type
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
with self.assertRaises(UIError) as context:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='',
)
self.assertEqual(
context.exception.message,
'Could not create template "{0}" without type'.format(name)
)
@run_only_on('sat')
@tier1
def test_negative_create_without_upload(self):
"""Template - Create a new template without uploading a template
@Feature: Template - Negative Create
@Assert: Template is not created
"""
name = gen_string('alpha')
with Session(self.browser) as session:
with self.assertRaises(UIError) as context:
make_templates(
session,
name=name,
template_path='',
custom_really=True,
template_type='PXELinux',
)
self.assertEqual(
context.exception.message,
'Could not create blank template "{0}"'.format(name)
)
@run_only_on('sat')
@tier1
def test_negative_create_with_too_long_audit(self):
"""Create a new template with 256 characters in audit comments
@Feature: Template - Negative Create
@Assert: Template is not created
"""
with Session(self.browser) as session:
make_templates(
session,
name=gen_string('alpha', 16),
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
audit_comment=gen_string('alpha', 256),
template_type='PXELinux',
)
self.assertIsNotNone(self.template.wait_until_element(
common_locators['haserror']))
@run_only_on('sat')
@tier1
def test_positive_create_with_snippet_type(self):
"""Create new template of type snippet
@Feature: Template - Positive Create
@Assert: New provisioning template of type 'snippet' should be created
successfully
"""
with Session(self.browser) as session:
for name in generate_strings_list(length=8):
with self.subTest(name):
make_templates(
session,
name=name,
template_path=SNIPPET_DATA_FILE,
custom_really=True,
snippet=True,
)
self.assertIsNotNone(self.template.search(name))
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Delete an existing template
@Feature: Template - Positive Delete
@Assert: Template is deleted successfully
"""
with Session(self.browser) as session:
for template_name in generate_strings_list(length=8):
with self.subTest(template_name):
entities.ConfigTemplate(
name=template_name,
organization=[self.organization],
).create()
session.nav.go_to_select_org(self.organization.name)
self.template.delete(template_name)
@run_only_on('sat')
@tier1
def test_positive_update_name_and_type(self):
"""Update template name and template type
@Feature: Template - Positive Update
@Assert: The template name and type should be updated successfully
"""
name = gen_string('alpha')
new_name = gen_string('alpha')
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.update(name, False, new_name, None, 'PXELinux')
self.assertIsNotNone(self.template.search(new_name))
@run_only_on('sat')
@tier1
def test_positive_update_os(self):
"""Creates new template, along with two OS's and associate list
of OS's with created template
@Feature: Template - Positive Update
@Assert: The template should be updated with newly created OS's
successfully
"""
name = gen_string('alpha')
new_name = gen_string('alpha')
os_list = [
entities.OperatingSystem().create().name for _ in range(2)
]
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.update(name, False, new_name, new_os_list=os_list)
self.assertIsNotNone(self.template.search(new_name))
@run_only_on('sat')
@tier2
def test_positive_clone(self):
"""Assure ability to clone a provisioning template
@Feature: Template - Clone
@Steps:
1. Go to Provisioning template UI
2. Choose a template and attempt to clone it
@Assert: The template is cloned
"""
name = gen_string('alpha')
clone_name = gen_string('alpha')
os_list = [
entities.OperatingSystem().create().name for _ in range(2)
]
with Session(self.browser) as session:
make_templates(
session,
name=name,
template_path=OS_TEMPLATE_DATA_FILE,
custom_really=True,
template_type='provision',
)
self.assertIsNotNone(self.template.search(name))
self.template.clone(
name,
custom_really=False,
clone_name=clone_name,
os_list=os_list,
)
self.assertIsNotNone(self.template.search(clone_name))
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.