repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
kiddhustle/wiardfmblog | refs/heads/master | django/contrib/gis/management/commands/ogrinspect.py | 126 | import os
from optparse import make_option
from django.contrib.gis import gdal
from django.core.management.base import LabelCommand, CommandError
def layer_option(option, opt, value, parser):
"""
Callback for `make_option` for the `ogrinspect` `layer_key`
keyword option which may be an integer or a string.
"""
try:
dest = int(value)
except ValueError:
dest = value
setattr(parser.values, option.dest, dest)
def list_option(option, opt, value, parser):
"""
Callback for `make_option` for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
if value.lower() == 'true':
dest = True
else:
dest = [s for s in value.split(',')]
setattr(parser.values, option.dest, dest)
class Command(LabelCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
args = '[data_source] [model_name]'
option_list = LabelCommand.option_list + (
make_option('--blank', dest='blank', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--decimal', dest='decimal', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.'),
make_option('--geom-name', dest='geom_name', type='string', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)'),
make_option('--layer', dest='layer_key', type='string', action='callback',
callback=layer_option, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.'),
make_option('--multi-geom', action='store_true', dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.'),
make_option('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__` function.'),
make_option('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` '
'statement.'),
make_option('--null', dest='null', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.'),
make_option('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
)
requires_model_validation = False
def handle(self, *args, **options):
try:
data_source, model_name = args
except ValueError:
raise CommandError('Invalid arguments, must provide: %s' % self.args)
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Removing options with `None` values.
options = dict([(k, v) for k, v in options.items() if not v is None])
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.OGRException as msg:
raise CommandError(msg)
# Whether the user wants to generate the LayerMapping dictionary as well.
show_mapping = options.pop('mapping', False)
# Getting rid of settings that `_ogrinspect` doesn't like.
verbosity = options.pop('verbosity', False)
settings = options.pop('settings', False)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
output = [s for s in _ogrinspect(ds, model_name, **options)]
if show_mapping:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name' : options['geom_name'],
'layer_key' : options['layer_key'],
'multi_geom' : options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = dict([(v, k) for k, v in mapping_dict.items()])
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend([" '%s' : '%s'," % (rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields])
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
|
pshen/ansible | refs/heads/devel | lib/ansible/modules/windows/win_msg.py | 24 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jon Hawkesworth (@jhawkesworth) <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_msg
version_added: "2.3"
short_description: Sends a message to logged in users on Windows hosts.
description:
- Wraps the msg.exe command in order to send messages to Windows hosts.
options:
to:
description:
- Who to send the message to. Can be a username, sessionname or sessionid.
default: '*'
display_seconds:
description:
- How long to wait for receiver to acknowledge message, in seconds.
default: 10
wait:
description:
- Whether to wait for users to respond. Module will only wait for the number of seconds specified in display_seconds or 10 seconds if not specified.
However, if I(wait) is true, the message is sent to each logged on user in turn, waiting for the user to either press 'ok' or for
the timeout to elapse before moving on to the next user.
required: false
default: false
msg:
description:
- The text of the message to be displayed.
default: Hello world!
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- This module must run on a windows host, so ensure your play targets windows
hosts, or delegates to a windows host.
- Messages are only sent to the local host where the module is run.
- The module does not support sending to users listed in a file.
- Setting wait to true can result in long run times on systems with many logged in users.
'''
EXAMPLES = r'''
# Warn logged in users of impending upgrade
win_msg:
display_seconds: 60
msg: "Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}"
'''
RETURN = r'''
msg:
description: Test of the message that was sent.
returned: changed
type: string
sample: "Automated upgrade about to start. Please save your work and log off before 22 July 2016 18:00:00"
display_seconds:
description: Value of display_seconds module parameter.
returned: success
type: string
sample: 10
runtime_seconds:
description: How long the module took to run on the remote windows host.
returned: success
type: string
sample: 22 July 2016 17:45:51
sent_localtime:
description: local time from windows host when the message was sent.
returned: success
type: string
sample: 22 July 2016 17:45:51
wait:
description: Value of wait module parameter.
returned: success
type: boolean
sample: false
'''
|
wdzhou/mantid | refs/heads/master | buildconfig/cmakelists_utils.py | 4 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import datetime
import os
import re
import sys
#======================================================================================
def find_basedir(project, subproject):
""" Returns the base directory. If the subproject is known to be in MantidQt or Vates, it uses that.
The default is current dir + Framework
Parameters
---------
project : the project, Framework, MantidQt, etc.
subproject : the subproject, Kernel, API, etc.
Returns
-------
basedir = base directory
header_folder = the folder name under the inc/ subfolder.
"""
header_folder = "Mantid" + subproject
if project == "MantidQt": header_folder = "MantidQt" + subproject
scriptdir = os.path.split(__file__)[0] #Folder of Code/Build
codedir = os.path.split(scriptdir)[0] #Folder of Code/
basedir = os.path.join(codedir, project, subproject)
return (basedir, header_folder)
#======================================================================================
def redo_cmake_section(lines, cmake_tag, add_this_line, remove_this_line=""):
""" Read the LINES of a file. Find first "set ( cmake_tag",
read all the lines to get all the files,
add your new line,
sort them,
rewrite. Only touches first section found to avoid messing up any other set
sections in the rest of the file
"""
search_for1 = "set ( " + cmake_tag
search_for2 = "set (" + cmake_tag
# List of files in the thingie
files = []
lines_before = []
lines_after = []
section_num = 0
section_processed = False
for line in lines:
if line.strip().startswith(search_for1): section_num = 1
if line.strip().startswith(search_for2): section_num = 1
if section_num == 0:
# These are the lines before
lines_before.append(line)
elif not section_processed and section_num == 1:
#this is a line with the name of a file
line = line.strip()
# Take off the tag
if line.startswith(search_for1): line = line[len(search_for1):].strip()
if line.startswith(search_for2): line = line[len(search_for2):].strip()
# Did we reach the last one?
if line.endswith(")"):
section_num = 2
section_processed = True
line = line[0:len(line) - 1].strip()
if len(line) > 0:
files.append(line)
else:
# These are lines after
lines_after.append(line)
# Add the new file to the list of files
if len(add_this_line) > 0:
files.append(add_this_line)
# Use a set to keep only unique linese
files = set(files)
# Remove an entry from the cmake list
try:
if len(remove_this_line) > 0:
files.remove(remove_this_line)
except:
# Ignore missing entry.
pass
files = list(files)
# Sort-em alphabetically
files.sort()
lines = lines_before
lines.append("set ( " + cmake_tag)
for file in files:
lines.append("\t" + file)
lines.append(")") # close the parentheses
lines += lines_after
return lines
#======================================================================
def fix_cmake_format(subproject):
""" Just fix the CMAKE format"""
cmake_path = os.path.join(os.path.curdir, "Framework/" + subproject + "/CMakeLists.txt")
source = open(cmake_path).read()
lines = source.split("\n");
lines = redo_cmake_section(lines, "SRC_FILES", "")
lines = redo_cmake_section(lines, "INC_FILES", "")
lines = redo_cmake_section(lines, "TEST_FILES", "")
f = open(cmake_path, 'w')
text = "\n".join(lines)
f.write(text)
f.close()
#======================================================================
def fix_all_cmakes():
""" Fix all cmake files """
projects = ["Algorithms", "DataObjects", "MDAlgorithms", "API",
"Geometry", "CurveFitting", "ICat", "MDEvents",
"DataHandling", "Kernel", "Nexus", "Crystal"]
for proj in projects:
fix_cmake_format(proj)
#======================================================================
def add_to_cmake(subproject, classname, args, subfolder):
""" Add the class to the cmake list of the given class
Parameters:
subproject : API, Kernel
classname : name of the class
args : argparse args
subfolder : subfolder under inc and src
"""
basedir, header_folder = find_basedir(args.project, subproject)
cmake_path = os.path.join(basedir, "CMakeLists.txt")
source = open(cmake_path).read()
lines = source.split("\n");
if args.header:
lines = redo_cmake_section(lines, "INC_FILES", "inc/" + header_folder + "/" + subfolder + classname + ".h")
if args.cpp:
lines = redo_cmake_section(lines, "SRC_FILES", "src/" + subfolder + classname + ".cpp")
if args.test:
lines = redo_cmake_section(lines, "TEST_FILES", classname + "Test.h")
f = open(cmake_path, 'w')
text = "\n".join(lines)
f.write(text)
f.close()
#======================================================================
def remove_from_cmake(subproject, classname, args, subfolder):
""" Removes the class from the cmake list of the given project """
basedir, header_folder = find_basedir(args.project, subproject)
cmake_path = os.path.join(basedir, "CMakeLists.txt")
source = open(cmake_path).read()
lines = source.split("\n");
if args.header:
lines = redo_cmake_section(lines, "INC_FILES", "", "inc/" + header_folder + "/"+ subfolder + classname + ".h")
if args.cpp:
lines = redo_cmake_section(lines, "SRC_FILES", "", "src/" + subfolder + classname + ".cpp")
if args.test:
lines = redo_cmake_section(lines, "TEST_FILES", "", classname + "Test.h")
f = open(cmake_path, 'w')
text = "\n".join(lines)
f.write(text)
f.close()
|
ralphbean/monroe | refs/heads/master | wsgi/tg2app/tg2app/model/foreclosure.py | 1 | # -*- coding: utf-8 -*-
"""Sample model module."""
import sqlalchemy as sa
from sqlalchemy import *
from sqlalchemy.orm import mapper, relation
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.types import Integer, Unicode, Float, Boolean, DateTime
from tg2app.model import DeclarativeBase, metadata, DBSession
from datetime import datetime
import pprint
class Foreclosure(DeclarativeBase):
__tablename__ = 'foreclosure_table'
def fancy_format(self):
d = dict()
for prop in sa.orm.class_mapper(Foreclosure).iterate_properties:
d[prop.key] = str(getattr(self, prop.key))
return pprint.pformat(d)
control_no = Column(Unicode(255), nullable=False, primary_key=True)
book = Column(Unicode(255), nullable=False)
book_page = Column(Unicode(255), nullable=False)
filing_date = Column(DateTime, nullable=False)
formatted_address = Column(Unicode(255), nullable=False)
grantee = Column(Unicode(255), nullable=False)
grantor = Column(Unicode(255), nullable=False)
index_detail = Column(Unicode(255), nullable=False)
instrument_type = Column(Unicode(255), nullable=False)
land_description = Column(Unicode(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
map_ready = Column(Boolean, nullable=False)
property_address = Column(Unicode(255), nullable=False)
reference_1 = Column(Unicode(255), nullable=False)
reference_2 = Column(Unicode(255), nullable=False)
view_image = Column(Unicode(255), nullable=False)
# Pulled from geo.cityofrochester.gov
xreffed_owner = Column(Unicode(255), nullable=False)
xref_updated = Column(DateTime, nullable=False, default=datetime.now)
# ridiculous details
acreage = Column(Unicode(255), nullable=False)
assessed_value = Column(Unicode(255), nullable=False)
baths = Column(Unicode(255), nullable=False)
bedrooms = Column(Unicode(255), nullable=False)
depth = Column(Unicode(255), nullable=False)
frontage = Column(Unicode(255), nullable=False)
housing_units = Column(Unicode(255), nullable=False)
improvements = Column(Unicode(255), nullable=False)
land_value = Column(Unicode(255), nullable=False)
landuse = Column(Unicode(255), nullable=False)
lot_number = Column(Unicode(255), nullable=False)
rooms = Column(Unicode(255), nullable=False)
square_footage = Column(Unicode(255), nullable=False)
stories = Column(Unicode(255), nullable=False)
#subdivision = Column(Unicode(255), nullable=False)
year_built = Column(Unicode(255), nullable=False)
zoning = Column(Unicode(255), nullable=False)
def csv_headers(self):
return [
key for key, value in Foreclosure.__dict__.iteritems()
if type(getattr(Foreclosure, key)) == InstrumentedAttribute
]
def to_csv(self):
return "|".join([
str(getattr(self, attr)) for attr in self.csv_headers()
])
def to_dict(self):
return dict([(attr, getattr(self, attr))
for attr in self.csv_headers()])
def to_geojson(self):
d = self.to_dict()
d['filing_date'] = d['filing_date'].strftime('%m/%d/%Y')
d['xref_updated'] = d['xref_updated'].strftime('%m/%d/%Y')
return d
|
synconics/odoo | refs/heads/8.0 | addons/website/models/website.py | 25 | # -*- coding: utf-8 -*-
import cStringIO
import contextlib
import datetime
import hashlib
import inspect
import logging
import math
import mimetypes
import unicodedata
import os
import re
import time
import urlparse
from PIL import Image
from sys import maxint
import werkzeug
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools import html_escape as escape, ustr, image_resize_and_sharpen, image_save_for_web
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(local_url, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = local_url.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
local_url = '/'.join(spath)
try:
# Try to match an endpoint in werkzeug's routing table
url = local_url.split('?')
path = url[0]
query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
# Force to check method to POST. Odoo uses methods : ['POST'] and ['GET', 'POST']
func = router.match(path, method='POST', query_args=query_string)[0]
return (func.routing.get('website', False) and
func.routing.get('multilang', func.routing['type'] == 'http'))
except Exception:
return False
def slugify(s, max_length=None):
""" Transform a string to a slug that can be used in a url path.
This method will first try to do the job with python-slugify if present.
Otherwise it will process string by stripping leading and ending spaces,
converting unicode chars to ascii, lowering all chars and replacing spaces
and underscore with hyphen "-".
:param s: str
:param max_length: int
:rtype: str
"""
s = ustr(s)
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')
slug = re.sub('[\W_]', ' ', uni).strip().lower()
slug = re.sub('[-\s]+', '-', slug)
return slug[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value.display_name
else:
# assume name_search result tuple
id, name = value
slugname = slugify(name or '').strip().strip('-')
if not slugname:
return str(id)
return "%s-%d" % (slugname, id)
# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags
_UNSLUG_RE = re.compile(r'(?:(\w{1,2}|\w[A-Za-z0-9-_]+?\w)-)?(-?\d+)(?=$|/)')
def unslug(s):
"""Extract slug and id from a string.
Always return un 2-tuple (str|None, int|None)
"""
m = _UNSLUG_RE.match(s)
if not m:
return None, None
return m.group(1), int(m.group(2))
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language", required=True),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.main_company'),
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
name = (name or "").replace("/page/website.", "").replace("/page/", "")
if not name:
return False
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0])
def get_alternate_languages(self, cr, uid, ids, req=None, context=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website(cr, uid, context=context).default_lang_code
shorts = []
def get_url_localized(router, lang):
arguments = dict(request.endpoint_arguments)
for k, v in arguments.items():
if isinstance(v, orm.browse_record):
arguments[k] = v.with_context(lang=lang)
return router.build(request.endpoint, arguments)
router = request.httprequest.app.get_db_router(request.db).bind('')
for code, name in self.get_languages(cr, uid, ids, context=context):
lg_path = ('/' + code) if code != default else ''
lg = code.split('_')
shorts.append(lg[0])
uri = request.endpoint and get_url_localized(router, code) or request.httprequest.path
if req.query_string:
uri += '?' + req.query_string
lang = {
'hreflang': ('-'.join(lg)).lower(),
'short': lg[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def is_publisher(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context=context)
return is_website_publisher
def is_user(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
return Access.check(cr, uid, 'ir.ui.menu', 'read', False, context=context)
def get_template(self, cr, uid, ids, template, context=None):
if isinstance(template, (int, long)):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page if str(page).isdigit() else 1), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = endpoint.routing.get('methods') or ['GET']
converters = rule._converters.values()
if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
and all(hasattr(converter, 'generate') for converter in converters)
and endpoint.routing.get('website')):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_set = set()
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
convitems = converters.items()
# converters with a domain are processed after the other ones
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
for (i,(name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i==(len(convitems)-1) and query_string
for v in converter.generate(request.cr, uid, query=query, args=val, context=context):
newval.append( val.copy() )
v[name] = v['loc']
del v['loc']
newval[-1].update(v)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
page = {'loc': url}
for key,val in value.items():
if key.startswith('__'):
page[key[2:]] = val
if url in ('/sitemap.xml',):
continue
if url in url_set:
continue
url_set.add(url)
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
name = (needle or "").replace("/page/website.", "").replace("/page/", "")
name = slugify(name, max_length=50)
res = []
for page in self.enumerate_pages(cr, uid, ids, query_string=name, context=context):
res.append(page)
if len(res) == limit:
break
return res
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
def _image_placeholder(self, response):
# file_open may return a StringIO. StringIO can be closed but are
# not context managers in Python 2 though that is fixed in 3
with contextlib.closing(openerp.tools.misc.file_open(
os.path.join('web', 'static', 'src', 'img', 'placeholder.png'),
mode='rb')) as f:
response.data = f.read()
return response.make_conditional(request.httprequest)
def _image(self, cr, uid, model, id, field, response, max_width=maxint, max_height=maxint, cache=None, context=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
Resizing is bypassed if the object provides a $field_big, which will
be interpreted as a pre-resized version of the base field.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~._image_placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
Model = self.pool[model]
id = int(id)
ids = None
if Model.check_access_rights(cr, uid, 'read', raise_exception=False):
ids = Model.search(cr, uid,
[('id', '=', id)], context=context)
if not ids and 'website_published' in Model._fields:
ids = Model.search(cr, openerp.SUPERUSER_ID,
[('id', '=', id), ('website_published', '=', True)], context=context)
if not ids:
return self._image_placeholder(response)
concurrency = '__last_update'
[record] = Model.read(cr, openerp.SUPERUSER_ID, [id],
[concurrency, field],
context=context)
if concurrency in record:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format)
# Field does not exist on model or field set to False
if not record.get(field):
# FIXME: maybe a field which does not exist should be a 404?
return self._image_placeholder(response)
response.set_etag(hashlib.sha1(record[field]).hexdigest())
response.make_conditional(request.httprequest)
if cache:
response.cache_control.max_age = cache
response.expires = int(time.time() + cache)
# conditional request match
if response.status_code == 304:
return response
data = record[field].decode('base64')
image = Image.open(cStringIO.StringIO(data))
response.mimetype = Image.MIME[image.format]
filename = '%s_%s.%s' % (model.replace('.', '_'), id, str(image.format).lower())
response.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
if (not max_width) and (not max_height):
response.data = data
return response
w, h = image.size
max_w = int(max_width) if max_width else maxint
max_h = int(max_height) if max_height else maxint
if w < max_w and h < max_h:
response.data = data
else:
size = (max_w, max_h)
img = image_resize_and_sharpen(image, size, preserve_aspect_ratio=True)
image_save_for_web(img, response.stream, format=image.format)
# invalidate content-length computed by make_conditional as
# writing to response.stream does not do it (as of werkzeug 0.9.3)
del response.headers['Content-Length']
return response
def image_url(self, cr, uid, record, field, size=None, context=None):
"""Returns a local url that points to the image field of a given browse record."""
model = record._name
sudo_record = record.sudo()
id = '%s_%s' % (record.id, hashlib.sha1(sudo_record.write_date or sudo_record.create_date or '').hexdigest()[0:7])
size = '' if size is None else '/%s' % size
return '/website/image/%s/%s/%s%s' % (model, id, field, size)
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'url': fields.char('Url'),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = self.pool['website'].image_url(cr, uid, attach, 'datas')
return result
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
attachments = self.read(cr, uid, ids, ['res_model'], context=context)
view_attachment_ids = [attachment['id'] for attachment in attachments if attachment['res_model'] == 'ir.ui.view']
for attach in self.read(cr, uid, view_attachment_ids, ['res_model', 'res_id', 'type', 'datas'], context=context):
result[attach['id']] = self._compute_checksum(attach)
return result
def _compute_checksum(self, attachment_dict):
if attachment_dict.get('res_model') == 'ir.ui.view'\
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
and attachment_dict.get('type', 'binary') == 'binary'\
and attachment_dict.get('datas'):
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
return None
def _datas_big(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
if context and context.get('bin_size'):
return result
for record in self.browse(cr, uid, ids, context=context):
if record.res_model != 'ir.ui.view' or not record.datas: continue
try:
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
except IOError: # apparently the error PIL.Image.open raises
pass
return result
_columns = {
'datas_checksum': fields.function(_datas_checksum, size=40,
string="Datas checksum", type='char', store=True, select=True),
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
'datas_big': fields.function (_datas_big, type='binary', store=True,
string="Resized file content"),
'mimetype': fields.char('Mime Type', readonly=True),
}
def _add_mimetype_if_needed(self, values):
if values.get('datas_fname'):
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
def create(self, cr, uid, values, context=None):
chk = self._compute_checksum(values)
if chk:
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
if match:
return match[0]
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).create(
cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = escape(attachment.website_url)
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('//maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=10, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': zoom,
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
|
liukaijv/XlsxWriter | refs/heads/master | xlsxwriter/test/worksheet/test_write_sheet_pr.py | 8 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteSheetPr(unittest.TestCase):
"""
Test the Worksheet _write_sheet_pr() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_pr_fit_to_page(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.fit_to_pages(1, 1)
self.worksheet._write_sheet_pr()
exp = """<sheetPr><pageSetUpPr fitToPage="1"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_pr_tab_color(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.set_tab_color('red')
self.worksheet._write_sheet_pr()
exp = """<sheetPr><tabColor rgb="FFFF0000"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_pr_both(self):
"""Test the _write_sheet_pr() method"""
self.worksheet.set_tab_color('red')
self.worksheet.fit_to_pages(1, 1)
self.worksheet._write_sheet_pr()
exp = """<sheetPr><tabColor rgb="FFFF0000"/><pageSetUpPr fitToPage="1"/></sheetPr>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
johmathe/keras | refs/heads/master | keras/models.py | 1 | from __future__ import absolute_import
from __future__ import print_function
import cPickle as pickle
import numpy as np
import theano
import theano.tensor as T
import warnings, time, copy, pprint
from six.moves import range
import six
from . import optimizers
from . import objectives
from . import regularizers
from . import constraints
from . import callbacks as cbks
from .utils.layer_utils import container_from_config
from .utils.generic_utils import Progbar, printv
from .layers import containers
def standardize_y(y):
if not hasattr(y, 'shape'):
y = np.asarray(y)
if len(y.shape) == 1:
y = np.expand_dims(y, 1)
return y
def batch_shuffle(index_array, batch_size):
batch_count = int(len(index_array)/batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count*batch_size:]
index_array = index_array[:batch_count*batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size/float(batch_size)))
return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)]
def standardize_X(X):
if type(X) == list:
return X
else:
return [X]
def slice_X(X, start=None, stop=None):
if type(X) == list:
if hasattr(start, '__len__'):
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
def weighted(y_true, y_pred, weights, mask=None):
# it's important that 0 * Inf == 0, not NaN, so we need to filter
# those out first
filtered_y_true = y_true[weights.nonzero()[:-1]]
filtered_y_pred = y_pred[weights.nonzero()[:-1]]
filtered_weights = weights[weights.nonzero()]
obj_output = fn(filtered_y_true, filtered_y_pred)
weighted = filtered_weights * obj_output
if mask is None:
# Instead of calling mean() here, we divide by the sum of filtered_weights.
return weighted.sum() / filtered_weights.sum()
else:
filtered_mask = mask[weights.nonzero()[:-1]]
return weighted.sum() / (filtered_mask * filtered_weights).sum()
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None):
if sample_weight is not None:
return standardize_y(sample_weight)
elif isinstance(class_weight, dict):
if len(y.shape) > 3:
raise Exception('class_weight not supported for 4+ dimensional targets.')
yshape = y.shape
y = np.reshape(y, (-1, yshape[-1])) # for time-distributed data, collapse time and sample
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
class_weights = np.asarray([class_weight[cls] for cls in y_classes])
return np.reshape(class_weights, yshape[:-1] + (1,)) # uncollapse initial dimensions
else:
return np.ones(y.shape[:-1] + (1,))
def model_from_yaml(yaml_string, custom_layers={}):
'''
Returns a model generated from a local yaml file,
which is either created by hand or from to_yaml method of Sequential or Graph
'''
import yaml
config = yaml.load(yaml_string)
return model_from_config(config, custom_layers=custom_layers)
def model_from_json(json_string, custom_layers={}):
import json
config = json.loads(json_string)
return model_from_config(config, custom_layers=custom_layers)
def model_from_config(config, custom_layers={}):
model_name = config.get('name')
if model_name not in {'Graph', 'Sequential'}:
raise Exception('Unrecognized model:', model_name)
# Create a container then set class to appropriate model
model = container_from_config(config, custom_layers=custom_layers)
if model_name == 'Graph':
model.__class__ = Graph
elif model_name == 'Sequential':
model.__class__ = Sequential
if 'optimizer' in config:
# if it has an optimizer, the model is assumed to be compiled
loss = config.get('loss')
class_mode = config.get('class_mode')
theano_mode = config.get('theano_mode')
optimizer_params = dict([(k, v) for k, v in config.get('optimizer').items()])
optimizer_name = optimizer_params.pop('name')
optimizer = optimizers.get(optimizer_name, optimizer_params)
if model_name == 'Sequential':
model.compile(loss=loss, optimizer=optimizer, class_mode=class_mode, theano_mode=theano_mode)
elif model_name == 'Graph':
model.compile(loss=loss, optimizer=optimizer, theano_mode=theano_mode)
return model
def get_function_name(o):
if isinstance(o, six.string_types):
return o
else:
return o.__name__
class Model(object):
def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True, metrics=[]):
'''
Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
history = cbks.History()
if verbose:
callbacks = [history, cbks.BaseLogger()] + callbacks
else:
callbacks = [history] + callbacks
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
self.stop_training = False
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
ins_batch = slice_X(ins, batch_ids)
except TypeError as err:
raise Exception('TypeError while preparing batch. \
If using HDF5 input data, pass shuffle="batch".\n')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(*ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
epoch_logs = {}
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return history
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
def get_config(self, verbose=0):
config = super(Model, self).get_config()
for p in ['class_mode', 'theano_mode']:
if hasattr(self, p):
config[p] = getattr(self, p)
if hasattr(self, 'optimizer'):
config['optimizer'] = self.optimizer.get_config()
if hasattr(self, 'loss'):
if type(self.loss) == dict:
config['loss'] = dict([(k, get_function_name(v)) for k, v in self.loss.items()])
else:
config['loss'] = get_function_name(self.loss)
if verbose:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
return config
def to_yaml(self, **kwargs):
# dump model configuration to yaml string
import yaml
config = self.get_config()
return yaml.dump(config, **kwargs)
def to_json(self, **kwargs):
# dump model configuration to json string
import json
config = self.get_config()
return json.dumps(config, **kwargs)
class Sequential(Model, containers.Sequential):
'''
Inherits from Model the following methods:
- _fit
- _predict
- _evaluate
Inherits from containers.Sequential the following methods:
- __init__
- add
- get_output
- get_input
- get_weights
- set_weights
'''
def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(objectives.get(loss))
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = T.zeros_like(self.y_train)
self.weights = T.ones_like(self.y_train)
if hasattr(self.layers[-1], "get_output_mask"):
mask = self.layers[-1].get_output_mask()
else:
mask = None
train_loss = weighted_loss(self.y, self.y_train, self.weights, mask)
test_loss = weighted_loss(self.y, self.y_test, self.weights, mask)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
self.y.name = 'y'
if class_mode == "categorical":
train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))
elif class_mode == "binary":
train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
else:
raise Exception("Invalid class mode:" + str(class_mode))
self.class_mode = class_mode
self.theano_mode = theano_mode
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
updates += self.updates
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = theano.function(train_ins, train_loss, updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy], updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(predict_ins, self.y_test,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
allow_input_downcast=True, mode=theano_mode)
def train_on_batch(self, X, y, accuracy=False, class_weight=None, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if accuracy:
return self._train_with_acc(*ins)
else:
return self._train(*ins)
def test_on_batch(self, X, y, accuracy=False, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if accuracy:
return self._test_with_acc(*ins)
else:
return self._test(*ins)
def predict_on_batch(self, X):
ins = standardize_X(X)
return self._predict(*ins)
def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, show_accuracy=False,
class_weight=None, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
val_f = None
val_ins = None
if validation_data or validation_split:
if show_accuracy:
val_f = self._test_with_acc
else:
val_f = self._test
if validation_data:
if len(validation_data) == 2:
X_val, y_val = validation_data
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
elif len(validation_data) == 3:
X_val, y_val, sample_weight_val = validation_data
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
else:
raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val) or (X_val, y_val, sample_weight). \
X_val may be a numpy array or a list of numpy arrays depending on your model input.")
val_ins = X_val + [y_val, sample_weight_val]
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
if sample_weight is not None:
sample_weight, sample_weight_val = (slice_X(sample_weight, 0, split_at), slice_X(sample_weight, split_at))
sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
else:
sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
val_ins = X_val + [y_val, sample_weight_val]
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
else:
f = self._train
out_labels = ['loss']
sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
ins = X + [y, sample_weight]
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
def predict(self, X, batch_size=128, verbose=0):
X = standardize_X(X)
return self._predict_loop(self._predict, X, batch_size, verbose)[0]
def predict_proba(self, X, batch_size=128, verbose=1):
preds = self.predict(X, batch_size, verbose)
if preds.min() < 0 or preds.max() > 1:
warnings.warn("Network returning invalid probability values.")
return preds
def predict_classes(self, X, batch_size=128, verbose=1):
proba = self.predict(X, batch_size=batch_size, verbose=verbose)
if self.class_mode == "categorical":
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1, sample_weight=None):
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight)
ins = X + [y, sample_weight]
if show_accuracy:
f = self._test_with_acc
else:
f = self._test
outs = self._test_loop(f, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
f.attrs['nb_layers'] = len(self.layers)
for k, l in enumerate(self.layers):
g = f.create_group('layer_{}'.format(k))
weights = l.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
'''
This method does not make use of Sequential.set_weights()
for backwards compatibility.
'''
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
for k in range(f.attrs['nb_layers']):
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.layers[k].set_weights(weights)
f.close()
def synchronize_weights(self, socket):
w_layers = []
for k, l in enumerate([l for l in self.layers if l.has_consensus()]):
w_layers.append(l.get_weights())
# TODO(johmathe): Smarter serialization
weights = pickle.dumps(w_layers)
socket.send(weights)
# Receive and deserialize new weights
unpickled = socket.recv()
consensus_vector = pickle.loads(unpickled)
for k, l in enumerate([l for l in self.layers if l.has_consensus()]):
l.set_consensus(consensus_vector[k])
l.dual_update()
class Graph(Model, containers.Graph):
def compile(self, optimizer, loss, theano_mode=None):
# loss is a dictionary mapping output name to loss functions
ys = []
ys_train = []
ys_test = []
weights = []
train_loss = 0.
test_loss = 0.
for output_name in self.output_order:
loss_fn = loss[output_name]
output = self.outputs[output_name]
y_train = output.get_output(True)
y_test = output.get_output(False)
y = T.zeros_like(y_test)
ys.append(y)
ys_train.append(y_train)
ys_test.append(y_test)
if hasattr(output, "get_output_mask"):
mask = output.get_output_mask()
else:
mask = None
weight = T.ones_like(y_test)
weights.append(weight)
weighted_loss = weighted_objective(objectives.get(loss_fn))
train_loss += weighted_loss(y, y_train, weight, mask)
test_loss += weighted_loss(y, y_test, weight, mask)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
ins = [self.inputs[name].input for name in self.input_order]
train_ins = ins + ys + weights
test_ins = ins + ys + weights
for r in self.regularizers:
train_loss = r(train_loss)
self.optimizer = optimizers.get(optimizer)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
updates += self.updates
self.theano_mode = theano_mode
self.loss = loss
self._train = theano.function(train_ins, train_loss, updates=updates,
allow_input_downcast=True, mode=theano_mode)
self._test = theano.function(test_ins, test_loss,
allow_input_downcast=True, mode=theano_mode)
self._predict = theano.function(inputs=ins, outputs=ys_test,
allow_input_downcast=True, mode=theano_mode)
def train_on_batch(self, data, class_weight={}, sample_weight={}):
# data is a dictionary mapping output and input names to arrays
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name),
class_weight=class_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
return self._train(*ins)
def test_on_batch(self, data, sample_weight={}):
# data is a dictionary mapping input names to arrays
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
return self._test(*ins)
def predict_on_batch(self, data):
# data is a dictionary mapping input names to arrays
ins = [data[name] for name in self.input_order]
return self._predict(*ins)
def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, class_weight={}, sample_weight={}):
X = [data[name] for name in self.input_order]
y = [standardize_y(data[name]) for name in self.output_order]
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight.get(self.output_order[i])) for i in range(len(self.output_order))]
class_weight_list = [class_weight.get(name) for name in self.output_order]
val_f = None
val_ins = None
if validation_data or validation_split:
val_f = self._test
if validation_data:
# can't use sample weights with validation data at this point
sample_weight = [standardize_weights(validation_data[name]) for name in self.output_order]
val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order] + sample_weight
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weight_list, sample_weight_list_val = (slice_X(sample_weight_list, 0, split_at), slice_X(sample_weight_list, split_at))
val_ins = X_val + y_val + sample_weight_list_val
f = self._train
out_labels = ['loss']
metrics = ['loss', 'val_loss']
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight_list[i],
class_weight=class_weight_list[i]) for i in range(len(self.output_order))]
ins = X + y + sample_weight_list
history = self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
return history
def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}):
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
outs = self._test_loop(self._test, ins, batch_size, verbose)
return outs[0]
def predict(self, data, batch_size=128, verbose=0):
ins = [data[name] for name in self.input_order]
outs = self._predict_loop(self._predict, ins, batch_size, verbose)
return dict(zip(self.output_order, outs))
def save_weights(self, filepath, overwrite=False):
# Save weights from all layers to HDF5
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
g = f.create_group('graph')
weights = self.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
# Loads weights from HDF5 file
import h5py
f = h5py.File(filepath)
g = f['graph']
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.set_weights(weights)
f.close()
|
AunShiLord/sympy | refs/heads/master | sympy/matrices/expressions/tests/test_slice.py | 119 | from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices.expressions import MatrixSymbol
from sympy.abc import a, b, c, d, k, l, m, n
from sympy.utilities.pytest import raises, XFAIL
from sympy.functions.elementary.integers import floor
from sympy.assumptions import assuming, Q
X = MatrixSymbol('X', n, m)
Y = MatrixSymbol('Y', m, k)
def test_shape():
B = MatrixSlice(X, (a, b), (c, d))
assert B.shape == (b - a, d - c)
def test_entry():
B = MatrixSlice(X, (a, b), (c, d))
assert B[0,0] == X[a, c]
assert B[k,l] == X[a+k, c+l]
raises(IndexError, lambda : MatrixSlice(X, 1, (2, 5))[1, 0])
assert X[1::2, :][1, 3] == X[1+2, 3]
assert X[:, 1::2][3, 1] == X[3, 1+2]
def test_on_diag():
assert not MatrixSlice(X, (a, b), (c, d)).on_diag
assert MatrixSlice(X, (a, b), (a, b)).on_diag
def test_inputs():
assert MatrixSlice(X, 1, (2, 5)) == MatrixSlice(X, (1, 2), (2, 5))
assert MatrixSlice(X, 1, (2, 5)).shape == (1, 3)
def test_slicing():
assert X[1:5, 2:4] == MatrixSlice(X, (1, 5), (2, 4))
assert X[1, 2:4] == MatrixSlice(X, 1, (2, 4))
assert X[1:5, :].shape == (4, X.shape[1])
assert X[:, 1:5].shape == (X.shape[0], 4)
assert X[::2, ::2].shape == (floor(n/2), floor(m/2))
assert X[2, :] == MatrixSlice(X, 2, (0, m))
assert X[k, :] == MatrixSlice(X, k, (0, m))
def test_exceptions():
X = MatrixSymbol('x', 10, 20)
raises(IndexError, lambda: X[0:12, 2])
raises(IndexError, lambda: X[0:9, 22])
raises(IndexError, lambda: X[-1:5, 2])
@XFAIL
def test_symmetry():
X = MatrixSymbol('x', 10, 10)
Y = X[:5, 5:]
with assuming(Q.symmetric(X)):
assert Y.T == X[5:, :5]
def test_slice_of_slice():
X = MatrixSymbol('x', 10, 10)
assert X[2, :][:, 3][0, 0] == X[2, 3]
assert X[:5, :5][:4, :4] == X[:4, :4]
assert X[1:5, 2:6][1:3, 2] == X[2:4, 4]
assert X[1:9:2, 2:6][1:3, 2] == X[3:7:2, 4]
def test_negative_index():
X = MatrixSymbol('x', 10, 10)
assert X[-1, :] == X[9, :]
|
DESHRAJ/fjord | refs/heads/master | vendor/packages/polib/tests/tests.py | 3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import subprocess
import sys
import tempfile
import unittest
sys.path.insert(1, os.path.abspath('.'))
import polib
from polib import u
class TestFunctions(unittest.TestCase):
def test_pofile_and_mofile1(self):
"""
Test bad usage of pofile/mofile.
"""
data = u('''# test for pofile/mofile with string buffer
msgid ""
msgstr ""
"Project-Id-Version: django\n"
msgid "foo"
msgstr "bar"
''')
po = polib.pofile(data)
self.assertTrue(isinstance(po, polib.POFile))
self.assertEqual(po.encoding, 'utf-8')
self.assertEqual(po[0].msgstr, u("bar"))
def test_indented_pofile(self):
"""
Test that an indented pofile returns a POFile instance.
"""
po = polib.pofile('tests/test_indented.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_pofile_and_mofile2(self):
"""
Test that the pofile function returns a POFile instance.
"""
po = polib.pofile('tests/test_utf8.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_pofile_and_mofile3(self):
"""
Test that the mofile function returns a MOFile instance.
"""
mo = polib.mofile('tests/test_utf8.mo')
self.assertTrue(isinstance(mo, polib.MOFile))
def test_pofile_and_mofile4(self):
"""
Test that check_for_duplicates is passed to the instance.
"""
po = polib.pofile('tests/test_iso-8859-15.po', check_for_duplicates=True,
autodetect_encoding=False, encoding='iso-8859-15')
self.assertTrue(po.check_for_duplicates == True)
def test_pofile_and_mofile5(self):
"""
Test that detect_encoding works as expected.
"""
po = polib.pofile('tests/test_iso-8859-15.po')
self.assertTrue(po.encoding == 'ISO_8859-15')
def test_pofile_and_mofile6(self):
"""
Test that encoding is default_encoding when detect_encoding is False.
"""
po = polib.pofile('tests/test_noencoding.po')
self.assertTrue(po.encoding == 'utf-8')
def test_pofile_and_mofile7(self):
"""
Test that encoding is ok when encoding is explicitely given.
"""
po = polib.pofile('tests/test_iso-8859-15.po', encoding='iso-8859-15')
self.assertTrue(po.encoding == 'iso-8859-15')
def test_pofile_and_mofile8(self):
"""
Test that weird occurrences are correctly parsed.
"""
po = polib.pofile('tests/test_weird_occurrences.po')
self.assertEqual(len(po), 46)
def test_pofile_and_mofile9(self):
"""
Test that obsolete previous msgid are ignored
"""
po = polib.pofile('tests/test_obsolete_previousmsgid.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_previous_msgid_1(self):
"""
Test previous msgid multiline.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "\nPartition table entries are not in disk order\n"
self.assertEquals(
po[0].previous_msgid,
expected
)
def test_previous_msgid_2(self):
"""
Test previous msgid single line.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "Partition table entries are not in disk order2\n"
self.assertEquals(
po[1].previous_msgid,
expected
)
def test_previous_msgctxt_1(self):
"""
Test previous msgctxt multiline.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "\nSome message context"
self.assertEquals(
po[0].previous_msgctxt,
expected
)
def test_previous_msgctxt_2(self):
"""
Test previous msgctxt single line.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "Some message context"
self.assertEquals(
po[1].previous_msgctxt,
expected
)
def test_unescaped_double_quote1(self):
"""
Test that polib reports an error when unescaped double quote is found.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgid "Some msgstr with "double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 3): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote2(self):
"""
Test that polib reports an error when unescaped double quote is found.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgstr ""
"Some msgstr with "double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 4): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote3(self):
"""
Test that polib reports an error when unescaped double quote is found at the beginning of the string.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgid ""Some msgstr with double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 3): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote4(self):
"""
Test that polib reports an error when unescaped double quote is found at the beginning of the string.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgstr ""
""Some msgstr with double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 4): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_detect_encoding1(self):
"""
Test that given enconding is returned when file has no encoding defined.
"""
self.assertEqual(polib.detect_encoding('tests/test_noencoding.po'), 'utf-8')
def test_detect_encoding2(self):
"""
Test with a .pot file.
"""
self.assertEqual(polib.detect_encoding('tests/test_merge.pot'), 'utf-8')
def test_detect_encoding3(self):
"""
Test with an utf8 .po file.
"""
self.assertEqual(polib.detect_encoding('tests/test_utf8.po'), 'UTF-8')
def test_detect_encoding4(self):
"""
Test with utf8 data (no file).
"""
if polib.PY3:
f = open('tests/test_utf8.po', 'rb')
data = str(f.read(), 'utf-8')
else:
f = open('tests/test_utf8.po', 'r')
data = f.read()
try:
self.assertEqual(polib.detect_encoding(data), 'UTF-8')
finally:
f.close()
def test_detect_encoding5(self):
"""
Test with utf8 .mo file.
"""
self.assertEqual(polib.detect_encoding('tests/test_utf8.mo', True), 'UTF-8')
def test_detect_encoding6(self):
"""
Test with iso-8859-15 .po file.
"""
self.assertEqual(polib.detect_encoding('tests/test_iso-8859-15.po'), 'ISO_8859-15')
def test_detect_encoding7(self):
"""
Test with iso-8859-15 .mo file.
"""
self.assertEqual(polib.detect_encoding('tests/test_iso-8859-15.mo', True), 'ISO_8859-15')
def test_escape(self):
"""
Tests the escape function.
"""
self.assertEqual(
polib.escape('\\t and \\n and \\r and " and \\ and \\\\'),
'\\\\t and \\\\n and \\\\r and \\" and \\\\ and \\\\\\\\'
)
def test_unescape(self):
"""
Tests the unescape function.
"""
self.assertEqual(
polib.unescape('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'),
'\\t and \\n and \\r and \\" and \\\\'
)
def test_pofile_with_subclass(self):
"""
Test that the pofile function correctly returns an instance of the
passed in class
"""
class CustomPOFile(polib.POFile):
pass
pofile = polib.pofile('tests/test_indented.po', klass=CustomPOFile)
self.assertEqual(pofile.__class__, CustomPOFile)
def test_mofile_with_subclass(self):
"""
Test that the mofile function correctly returns an instance of the
passed in class
"""
class CustomMOFile(polib.MOFile):
pass
mofile = polib.mofile('tests/test_utf8.mo', klass=CustomMOFile)
self.assertEqual(mofile.__class__, CustomMOFile)
def test_empty(self):
po = polib.pofile('')
self.assertEqual(po.__unicode__(), '# \nmsgid ""\nmsgstr ""\n')
def test_linenum_1(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po[0].linenum, 18)
def test_linenum_2(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po.find('XML text').linenum, 1799)
def test_linenum_3(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po[-1].linenum, 3478)
class TestBaseFile(unittest.TestCase):
"""
Tests for the _BaseFile class.
"""
def test_append1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = polib.POEntry(msgid="Foo", msgstr="Bar", msgctxt="Some context")
pofile.append(entry)
self.assertTrue(entry in pofile)
def test_append2(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.append(polib.POEntry(msgid="and"))
self.assertRaises(ValueError, add_duplicate)
def test_append3(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.append(polib.POEntry(msgid="and", msgctxt="some context"))
self.assertRaises(ValueError, add_duplicate)
def test_append4(self):
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
entry = polib.POEntry(msgid="and", msgctxt="some different context")
pofile.append(entry)
self.assertTrue(entry in pofile)
def test_insert1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = polib.POEntry(msgid="Foo", msgstr="Bar", msgctxt="Some context")
pofile.insert(0, entry)
self.assertEqual(pofile[0], entry)
def test_insert2(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.insert(0, polib.POEntry(msgid="and", msgstr="y"))
self.assertRaises(ValueError, add_duplicate)
def test_metadata_as_entry(self):
pofile = polib.pofile('tests/test_fuzzy_header.po')
f = open('tests/test_fuzzy_header.po')
lines = f.readlines()[2:]
f.close()
self.assertEqual(pofile.metadata_as_entry().__unicode__(), "".join(lines))
def test_find1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('and')
self.assertEqual(entry.msgstr, u('y'))
def test_find2(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('pacote', by="msgstr")
self.assertEqual(entry, None)
def test_find3(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('package', include_obsolete_entries=True)
self.assertEqual(entry.msgstr, u('pacote'))
def test_find4(self):
pofile = polib.pofile('tests/test_utf8.po')
entry1 = pofile.find('test context', msgctxt='@context1')
entry2 = pofile.find('test context', msgctxt='@context2')
self.assertEqual(entry1.msgstr, u('test context 1'))
self.assertEqual(entry2.msgstr, u('test context 2'))
def test_save1(self):
pofile = polib.POFile()
self.assertRaises(IOError, pofile.save)
def test_save2(self):
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
try:
pofile = polib.POFile()
pofile.save(tmpfile)
pofile.save()
self.assertTrue(os.path.isfile(tmpfile))
finally:
os.remove(tmpfile)
def test_ordered_metadata(self):
pofile = polib.pofile('tests/test_fuzzy_header.po')
f = open('tests/test_fuzzy_header.po')
lines = f.readlines()[2:]
f.close()
mdata = [
('Project-Id-Version', u('PACKAGE VERSION')),
('Report-Msgid-Bugs-To', u('')),
('POT-Creation-Date', u('2010-02-08 16:57+0100')),
('PO-Revision-Date', u('YEAR-MO-DA HO:MI+ZONE')),
('Last-Translator', u('FULL NAME <EMAIL@ADDRESS>')),
('Language-Team', u('LANGUAGE <[email protected]>')),
('MIME-Version', u('1.0')),
('Content-Type', u('text/plain; charset=UTF-8')),
('Content-Transfer-Encoding', u('8bit'))
]
self.assertEqual(pofile.ordered_metadata(), mdata)
def test_unicode1(self):
pofile = polib.pofile('tests/test_merge_after.po')
f = codecs.open('tests/test_merge_after.po', encoding='utf8')
expected = f.read()
f.close()
self.assertEqual(pofile.__unicode__(), expected)
def test_unicode2(self):
pofile = polib.pofile('tests/test_iso-8859-15.po')
f = codecs.open('tests/test_iso-8859-15.po', encoding='iso-8859-15')
expected = f.read()
f.close()
self.assertEqual(pofile.__unicode__(), expected)
def test_str(self):
pofile = polib.pofile('tests/test_iso-8859-15.po')
if polib.PY3:
f = codecs.open('tests/test_iso-8859-15.po', encoding='iso-8859-15')
else:
f = open('tests/test_iso-8859-15.po')
expected = f.read()
f.close()
self.assertEqual(str(pofile), expected)
def test_wrapping(self):
pofile = polib.pofile('tests/test_wrap.po', wrapwidth=50)
expected = r'''# test wrapping
msgid ""
msgstr ""
msgid "This line will not be wrapped"
msgstr ""
msgid ""
"Some line that contain special characters \" and"
" that \t is very, very, very long...: %s \n"
msgstr ""
msgid ""
"Some line that contain special characters "
"\"foobar\" and that contains whitespace at the "
"end "
msgstr ""
'''
self.assertEqual(str(pofile), expected)
def test_sort(self):
a1 = polib.POEntry(msgid='a1', occurrences=[('b.py', 1), ('b.py', 3)])
a2 = polib.POEntry(msgid='a2')
a3 = polib.POEntry(msgid='a1', occurrences=[('b.py', 1), ('b.py', 3)], obsolete=True)
b1 = polib.POEntry(msgid='b1', occurrences=[('b.py', 1), ('b.py', 3)])
b2 = polib.POEntry(msgid='b2', occurrences=[('d.py', 3), ('b.py', 1)])
c1 = polib.POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
c2 = polib.POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
pofile = polib.POFile()
pofile.append(b1)
pofile.append(a3)
pofile.append(a2)
pofile.append(a1)
pofile.append(b2)
pofile.append(c1)
pofile.append(c2)
pofile.sort()
expected = u('''#
msgid ""
msgstr ""
msgid "a2"
msgstr ""
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
#: b.py:1 b.py:3
msgid "a1"
msgstr ""
#: b.py:1 b.py:3
msgid "b1"
msgstr ""
#: d.py:3 b.py:1
msgid "b2"
msgstr ""
#~ msgid "a1"
#~ msgstr ""
''')
self.assertEqual(pofile.__unicode__(), expected)
def test_trailing_comment(self):
pofile = polib.pofile('tests/test_trailing_comment.po')
expected = r'''#
msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8\n"
msgid "foo"
msgstr "oof"
'''
self.assertEqual(str(pofile), expected)
class TestPoFile(unittest.TestCase):
"""
Tests for PoFile class.
"""
def test_save_as_mofile(self):
"""
Test for the POFile.save_as_mofile() method.
"""
import distutils.spawn
msgfmt = distutils.spawn.find_executable('msgfmt')
if msgfmt is None:
try:
return unittest.skip('msgfmt is not installed')
except AttributeError:
return
reffiles = ['tests/test_utf8.po', 'tests/test_iso-8859-15.po']
encodings = ['utf-8', 'iso-8859-15']
for reffile, encoding in zip(reffiles, encodings):
fd, tmpfile1 = tempfile.mkstemp()
os.close(fd)
fd, tmpfile2 = tempfile.mkstemp()
os.close(fd)
po = polib.pofile(reffile, autodetect_encoding=False, encoding=encoding)
po.save_as_mofile(tmpfile1)
subprocess.call([msgfmt, '--no-hash', '-o', tmpfile2, reffile])
try:
f = open(tmpfile1, 'rb')
s1 = f.read()
f.close()
f = open(tmpfile2, 'rb')
s2 = f.read()
f.close()
self.assertEqual(s1, s2)
finally:
os.remove(tmpfile1)
os.remove(tmpfile2)
def test_merge(self):
refpot = polib.pofile('tests/test_merge.pot')
po = polib.pofile('tests/test_merge_before.po')
po.merge(refpot)
expected_po = polib.pofile('tests/test_merge_after.po')
self.assertEqual(po, expected_po)
def test_percent_translated(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(po.percent_translated(), 53)
po = polib.POFile()
self.assertEqual(po.percent_translated(), 100)
def test_translated_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.translated_entries()), 7)
def test_untranslated_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.untranslated_entries()), 4)
def test_fuzzy_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.fuzzy_entries()), 2)
def test_obsolete_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.obsolete_entries()), 4)
def test_unusual_metadata_location(self):
po = polib.pofile('tests/test_unusual_metadata_location.po')
self.assertNotEqual(po.metadata, {})
self.assertEqual(po.metadata['Content-Type'], 'text/plain; charset=UTF-8')
def test_comment_starting_with_two_hashes(self):
po = polib.pofile('tests/test_utf8.po')
e = po.find("Some comment starting with two '#'", by='tcomment')
self.assertTrue(isinstance(e, polib.POEntry))
def test_word_garbage(self):
po = polib.pofile('tests/test_word_garbage.po')
e = po.find("Whatever", by='msgid')
self.assertTrue(isinstance(e, polib.POEntry))
class TestMoFile(unittest.TestCase):
"""
Tests for MoFile class.
"""
def test_dummy_methods(self):
"""
This is stupid and just here for code coverage.
"""
mo = polib.MOFile()
self.assertEqual(mo.percent_translated(), 100)
self.assertEqual(mo.translated_entries(), mo)
self.assertEqual(mo.untranslated_entries(), [])
self.assertEqual(mo.fuzzy_entries(), [])
self.assertEqual(mo.obsolete_entries(), [])
def test_save_as_pofile(self):
"""
Test for the MOFile.save_as_pofile() method.
"""
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
mo = polib.mofile('tests/test_utf8.mo', wrapwidth=78)
mo.save_as_pofile(tmpfile)
try:
if polib.PY3:
f = open(tmpfile, encoding='utf-8')
else:
f = open(tmpfile)
s1 = f.read()
f.close()
if polib.PY3:
f = open('tests/test_save_as_pofile.po', encoding='utf-8')
else:
f = open('tests/test_save_as_pofile.po')
s2 = f.read()
f.close()
self.assertEqual(s1, s2)
finally:
os.remove(tmpfile)
def test_msgctxt(self):
#import pdb; pdb.set_trace()
mo = polib.mofile('tests/test_msgctxt.mo')
expected = u('''msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8\u005cn"
msgctxt "Some message context"
msgid "some string"
msgstr "une cha\u00eene"
msgctxt "Some other message context"
msgid "singular"
msgid_plural "plural"
msgstr[0] "singulier"
msgstr[1] "pluriel"
''')
self.assertEqual(mo.__unicode__(), expected)
def test_invalid_version(self):
self.assertRaises(IOError, polib.mofile, 'tests/test_invalid_version.mo')
def test_no_header(self):
mo = polib.mofile('tests/test_no_header.mo')
expected = u('''msgid ""
msgstr ""
msgid "bar"
msgstr "rab"
msgid "foo"
msgstr "oof"
''')
self.assertEqual(mo.__unicode__(), expected)
class TestTextWrap(unittest.TestCase):
def test_wrap1(self):
text = ' Some line that is longer than fifteen characters (whitespace will not be preserved) '
ret = polib.TextWrapper(width=15).wrap(text)
expected = [
' Some line', 'that is longer', 'than fifteen', 'characters',
'(whitespace', 'will not be', 'preserved)'
]
self.assertEqual(ret, expected)
def test_wrap2(self):
text = ' Some line that is longer than fifteen characters (whitespace will be preserved) '
ret = polib.TextWrapper(width=15, drop_whitespace=False).wrap(text)
expected = [
' Some line ', 'that is longer ', 'than fifteen ', 'characters ',
'(whitespace ', 'will be ', 'preserved) '
]
self.assertEqual(ret, expected)
if __name__ == '__main__':
unittest.main()
|
JT5D/scikit-learn | refs/heads/master | examples/tree/plot_tree_regression.py | 8 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(X, y, c="k", label="data")
pl.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
pl.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
pl.xlabel("data")
pl.ylabel("target")
pl.title("Decision Tree Regression")
pl.legend()
pl.show()
|
dfdx2/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_c/migrations/0002_c2.py | 133 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lookuperror_a', '0002_a2'),
('lookuperror_c', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='C2',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('a1', models.ForeignKey('lookuperror_a.A1', models.CASCADE)),
],
),
]
|
georgeyk/quickstartup | refs/heads/master | testproject/testapp.py | 1 | # coding: utf-8
from django.conf.urls import url
from django.http.response import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, "apps/index.html")
urlpatterns = [
url(r"^", index, name="index"),
]
|
smuzaffar/root | refs/heads/master | interpreter/llvm/src/utils/lint/common_lint.py | 147 | #!/usr/bin/python
#
# Common lint functions applicable to multiple types of files.
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print 'Cound not open %s' % filename
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
|
macobo/documentation | refs/heads/master | code_snippets/results/result.api-monitor-create.py | 1 | {'creator': {'email': '[email protected]',
'handle': '[email protected]',
'id': 1896,
'name': u'Matt'},
'id': 2081,
'message': 'We may need to add web hosts if this is consistently high.',
'name': 'Bytes received on host0',
'tags': ['app:webserver', 'frontend'],
'options': {'no_data_timeframe': 20,
'notify_audit': False,
'notify_no_data': True,
'silenced': {}},
'org_id': 2,
'overall_state': 'No Data',
'query': 'avg(last_1h):sum:system.net.bytes_rcvd{host:host0} > 100',
'type': 'metric alert',
'multi': False}
|
gnowxilef/Wox | refs/heads/master | PythonHome/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py | 1731 | from __future__ import absolute_import, division, unicode_literals
from . import _base
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Filter(_base.Filter):
def __iter__(self):
for token in _base.Filter.__iter__(self):
if token["type"] in ("StartTag", "EmptyTag"):
attrs = OrderedDict()
for name, value in sorted(token["data"].items(),
key=lambda x: x[0]):
attrs[name] = value
token["data"] = attrs
yield token
|
globau/servo | refs/heads/master | tests/wpt/web-platform-tests/2dcontext/tools/specextract.py | 75 | import html5lib
import html5lib.treebuilders.dom
# Expected use:
# curl --compressed http://www.whatwg.org/specs/web-apps/current-work/ >current-work
# python specextract.py
#
# Generates current-work-canvas.xhtml, for use by gentest.py to create the annotated spec document
def extract():
parser = html5lib.html5parser.HTMLParser(tree=html5lib.treebuilders.dom.TreeBuilder)
doc = parser.parse(open('current-work', "r"), encoding='utf-8')
head = doc.getElementsByTagName('head')[0]
for n in head.childNodes:
if n.tagName == 'script':
head.removeChild(n)
header = doc.getElementsByTagName('header')[0]
#thecanvas = doc.getElementById('the-canvas') # doesn't work (?!)
thecanvas = [ n for n in doc.getElementsByTagName('h4') if n.getAttribute('id') == 'the-canvas-element' ][0]
keep = [header, thecanvas]
node = thecanvas.nextSibling
while node.nodeName != 'h4':
keep.append(node)
node = node.nextSibling
p = thecanvas.parentNode
for n in p.childNodes[:]:
if n not in keep:
p.removeChild(n)
for n in header.childNodes[3:-4]:
header.removeChild(n)
def make_absolute(uri):
if uri.startswith('data:'):
return uri
elif uri[0] == '/':
return 'http://www.whatwg.org' + uri
else:
return 'http://www.whatwg.org/specs/web-apps/current-work/' + uri
# Fix the stylesheet, icon and image references
for e in doc.getElementsByTagName('link'):
e.setAttribute('href', make_absolute(e.getAttribute('href')))
for img in doc.getElementsByTagName('img'):
img.setAttribute('src', make_absolute(img.getAttribute('src')))
# Convert to XHTML, because it's quicker to re-parse than HTML5
doc.documentElement.setAttribute('xmlns', 'http://www.w3.org/1999/xhtml')
doc.documentElement.setAttribute('xml:lang', doc.documentElement.getAttribute('lang'))
doc.removeChild(doc.firstChild) # remove the DOCTYPE
open('current-work-canvas.xhtml', 'w').write(doc.toxml(encoding = 'UTF-8'))
extract()
|
Alberto-Beralix/Beralix | refs/heads/master | i386-squashfs-root/usr/lib/python2.7/dist-packages/telepathy/_generated/Connection_Interface_Service_Point.py | 2 | ../../../../../share/pyshared/telepathy/_generated/Connection_Interface_Service_Point.py |
alexcuellar/odoo | refs/heads/8.0 | openerp/report/render/rml.py | 457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mgit-at/ansible | refs/heads/devel | test/sanity/code-smell/no-underscore-variable.py | 23 | #!/usr/bin/env python
# Only needed until we can enable a pylint test for this. We may have to write
# one or add it to another existing test (like the one to warn on inappropriate
# variable names). Adding to an existing test may be hard as we may have many
# other things that are not compliant with that test.
import os
import re
import sys
def main():
skip = set([
'test/sanity/code-smell/%s' % os.path.basename(__file__),
# These files currently use _ as a variable. Fix them and then remove them
# from this list. Note that we're not sure if we'll translate module return
# values. If we decide never to do that, then we can stop checking for those.
'contrib/inventory/gce.py',
'lib/ansible/cli/console.py',
'lib/ansible/compat/selectors/_selectors2.py',
'lib/ansible/executor/playbook_executor.py',
'lib/ansible/executor/task_queue_manager.py',
'lib/ansible/module_utils/facts/network/linux.py',
'lib/ansible/module_utils/urls.py',
'lib/ansible/modules/cloud/amazon/data_pipeline.py',
'lib/ansible/modules/cloud/amazon/ec2_group_facts.py',
'lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py',
'lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py',
'lib/ansible/modules/cloud/amazon/efs.py',
'lib/ansible/modules/cloud/amazon/efs_facts.py',
'lib/ansible/modules/cloud/amazon/kinesis_stream.py',
'lib/ansible/modules/cloud/amazon/route53_zone.py',
'lib/ansible/modules/cloud/amazon/s3_sync.py',
'lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py',
'lib/ansible/modules/cloud/docker/docker_service.py',
'lib/ansible/modules/cloud/google/gce.py',
'lib/ansible/modules/cloud/google/gce_eip.py',
'lib/ansible/modules/cloud/google/gce_img.py',
'lib/ansible/modules/cloud/google/gce_instance_template.py',
'lib/ansible/modules/cloud/google/gce_lb.py',
'lib/ansible/modules/cloud/google/gce_mig.py',
'lib/ansible/modules/cloud/google/gce_net.py',
'lib/ansible/modules/cloud/google/gce_pd.py',
'lib/ansible/modules/cloud/google/gce_snapshot.py',
'lib/ansible/modules/cloud/google/gce_tag.py',
'lib/ansible/modules/cloud/google/gcp_backend_service.py',
'lib/ansible/modules/cloud/google/gcp_healthcheck.py',
'lib/ansible/modules/cloud/lxc/lxc_container.py',
'lib/ansible/modules/files/copy.py',
'lib/ansible/modules/files/patch.py',
'lib/ansible/modules/files/synchronize.py',
'lib/ansible/modules/monitoring/statusio_maintenance.py',
'lib/ansible/modules/monitoring/zabbix/zabbix_maintenance.py',
'lib/ansible/modules/net_tools/basics/uri.py',
'lib/ansible/modules/network/cloudengine/ce_acl.py',
'lib/ansible/modules/network/cloudengine/ce_command.py',
'lib/ansible/modules/network/cloudengine/ce_dldp_interface.py',
'lib/ansible/modules/network/cloudengine/ce_mlag_interface.py',
'lib/ansible/modules/network/cloudvision/cv_server_provision.py',
'lib/ansible/modules/network/f5/bigip_remote_syslog.py',
'lib/ansible/modules/network/illumos/dladm_etherstub.py',
'lib/ansible/modules/network/illumos/dladm_iptun.py',
'lib/ansible/modules/network/illumos/dladm_linkprop.py',
'lib/ansible/modules/network/illumos/dladm_vlan.py',
'lib/ansible/modules/network/illumos/dladm_vnic.py',
'lib/ansible/modules/network/illumos/flowadm.py',
'lib/ansible/modules/network/illumos/ipadm_addr.py',
'lib/ansible/modules/network/illumos/ipadm_addrprop.py',
'lib/ansible/modules/network/illumos/ipadm_if.py',
'lib/ansible/modules/network/illumos/ipadm_ifprop.py',
'lib/ansible/modules/network/illumos/ipadm_prop.py',
'lib/ansible/modules/network/vyos/vyos_command.py',
'lib/ansible/modules/packaging/language/pip.py',
'lib/ansible/modules/packaging/os/yum.py',
'lib/ansible/modules/source_control/git.py',
'lib/ansible/modules/system/alternatives.py',
'lib/ansible/modules/system/beadm.py',
'lib/ansible/modules/system/cronvar.py',
'lib/ansible/modules/system/dconf.py',
'lib/ansible/modules/system/filesystem.py',
'lib/ansible/modules/system/gconftool2.py',
'lib/ansible/modules/system/interfaces_file.py',
'lib/ansible/modules/system/iptables.py',
'lib/ansible/modules/system/java_cert.py',
'lib/ansible/modules/system/lvg.py',
'lib/ansible/modules/system/lvol.py',
'lib/ansible/modules/system/parted.py',
'lib/ansible/modules/system/timezone.py',
'lib/ansible/modules/system/ufw.py',
'lib/ansible/modules/utilities/logic/wait_for.py',
'lib/ansible/modules/web_infrastructure/rundeck_acl_policy.py',
'lib/ansible/parsing/vault/__init__.py',
'lib/ansible/playbook/base.py',
'lib/ansible/playbook/helpers.py',
'lib/ansible/playbook/role/__init__.py',
'lib/ansible/playbook/taggable.py',
'lib/ansible/plugins/callback/hipchat.py',
'lib/ansible/plugins/connection/lxc.py',
'lib/ansible/plugins/filter/core.py',
'lib/ansible/plugins/lookup/sequence.py',
'lib/ansible/plugins/strategy/__init__.py',
'lib/ansible/plugins/strategy/linear.py',
'test/legacy/cleanup_gce.py',
'test/legacy/gce_credentials.py',
'test/runner/lib/cloud/cs.py',
'test/runner/lib/core_ci.py',
'test/runner/lib/delegation.py',
'test/runner/lib/docker_util.py',
'test/runner/lib/executor.py',
'test/runner/lib/http.py',
'test/runner/lib/import_analysis.py',
'test/runner/lib/manage_ci.py',
'test/runner/lib/target.py',
'test/runner/lib/util.py',
'test/sanity/import/importer.py',
'test/sanity/validate-modules/main.py',
'test/units/executor/test_play_iterator.py',
'test/units/module_utils/basic/test_run_command.py',
'test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py',
'test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py',
'test/units/modules/system/interfaces_file/test_interfaces_file.py',
])
for path in sys.argv[1:] or sys.stdin.read().splitlines():
if path in skip:
continue
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(?: |[^C]\()(_)(?:[ ,)])', text)
if match:
print('%s:%d:%d: use `dummy` instead of `_` for a variable name' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
|
mandli/coastal | refs/heads/master | examples/geoclaw/benchmark_2/setplot.py | 1 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
import os
from numpy import loadtxt
slope = 1. / 19.85
print "slope = ",slope
E = loadtxt("file2.txt",skiprows=5)
compare = False
if compare:
print "Comparing results..."
outdir1 = os.path.abspath('_output_50')
outdir2 = os.path.abspath('_output_200')
outdir3 = os.path.abspath('_output_400')
else:
outdir1 = os.path.abspath('_output')
outdir2 = os.path.abspath('_output')
outdir3 = os.path.abspath('_output')
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
plotdata.clearfigures() # clear any old figures,axes,items data
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
def save(current_data):
from pylab import figure, savefig
frameno = current_data.frameno
figure(2)
savefig('canonical-%s.png' % frameno)
plotdata.afterframe = save
#-----------------------------------------
# Figure for line plot
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='line', figno=2)
#plotfigure.show = False
def eta_slice(current_data):
x = current_data.x[:,0]
q = current_data.q
eta = q[3,:,3]
return x,eta
def B_slice(current_data):
x = current_data.x[:,0]
q = current_data.q
h = q[0,:,3]
eta = q[3,:,3]
B = eta - h
return x,B
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('line')
plotaxes.axescmd = 'subplot(211)'
plotaxes.title = 'Surface'
# plotaxes.xlimits = [-5,20]
plotaxes.xlimits = [-5,60]
#plotaxes.ylimits = [-1.2,0.6]
plotaxes.ylimits = [-0.04,0.3]
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'b'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir1
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'r'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir2
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'g'
plotitem.plotstyle = '--'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir3
# Topography
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = B_slice
plotitem.color = 'k'
plotitem.outdir = outdir3
def afteraxes(current_data):
from pylab import plot, legend, loadtxt
t = current_data.t
plot(t, 0*t, 'k')
frameno = current_data.frameno
if frameno in [1,2,3,4]:
plot(E[:,(frameno-1)*2],E[:,(frameno-1)*2+1],'bo')
plotaxes.afteraxes = afteraxes
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('linezoom')
#plotaxes.show = False
plotaxes.axescmd = 'subplot(212)'
plotaxes.title = 'Surface'
plotaxes.xlimits = [-4.0,3.0]
#plotaxes.ylimits = [-0.1,0.4]
plotaxes.ylimits = [-0.04,0.3]
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'b'
plotitem.plotstyle = 'o-'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir1
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'r'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir2
# Water
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = eta_slice
plotitem.color = 'g'
plotitem.plotstyle = '--'
plotitem.kwargs = {'linewidth':2}
plotitem.outdir = outdir3
# Topography
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
plotitem.map_2d_to_1d = B_slice
plotitem.color = 'k'
def afteraxes(current_data):
from pylab import plot, legend, loadtxt
t = current_data.t
plot(t, 0*t, 'k')
frameno = current_data.frameno
if frameno in [1,2,3,4]:
plot(E[:,(frameno-1)*2],E[:,(frameno-1)*2+1],'bo')
plotaxes.afteraxes = afteraxes
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,60]
plotaxes.ylimits = [-0.02, 0.05]
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
def gaugetopo(current_data):
q = current_data.q
h = q[:,0]
eta = q[:,3]
topo = eta - h
return topo
plotitem.plot_var = gaugetopo
# plotitem.clf_each_gauge = False
plotitem.plotstyle = 'g-'
def afteraxes(current_data):
from pylab import plot, legend, loadtxt
t = current_data.t
plot(t, 0*t, 'k')
try:
labgage = loadtxt('WaveGages.txt',skiprows=1)
except:
print "*** Did not find WaveGages.txt from benchmark"
gaugeno = current_data.gaugeno
if gaugeno in [1,2,3]:
plot(labgage[:,0],0.01*labgage[:,gaugeno],'r')
legend(('GeoClaw','topography','sea level','lab data'),loc='upper right')
else:
legend(('GeoClaw','topography','sea level'),loc='upper right')
plotaxes.afteraxes = afteraxes
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
durden/frappy | refs/heads/master | frappy/core/api.py | 1 | """
Base implementation of Frappy framework.
"""
from frappy.core.auth import NoAuth
import requests
try:
import json
except ImportError:
import simplejson as json
class APIHTTPError(Exception):
"""
Base Exception thrown by the APICall object when there is a
general error interacting with the API.
"""
def __init__(self, status_code, uri):
"""Initalize error object"""
self.status_code = status_code
self.uri = uri
super(Exception, self).__init__()
def __str__(self):
"""Stringify error"""
return ("API sent status %i for URL: %s " % (self.status_code,
self.uri))
class APICall(object):
"""
Base implementation of API call.
This class is very generic and should provide most of the send/retrieve
functionality for API access. Thus, you should be able to subclass it,
and provide a basic __init__ method.
"""
def __init__(self, auth, req_format, domain, secure=True):
"""Initialize call API object"""
self.auth = auth
if auth is None:
self.auth = NoAuth()
self.req_format = req_format
secure_str = ''
if secure:
secure_str = 's'
self.base_uri = "http%s://%s/" % (secure_str, domain)
self.uri = self.base_uri
self.requested_uri = ""
self.method = "get"
self.response = None
self.headers = {'request': {}, 'response': {}}
self.missing_attrs = ()
def __getattr__(self, k):
"""
Look for attribute k in base object, other wise append to uri
This is allows for a very powerful and expressive syntax for creating
API calls that map closely to the uri they query. For example a
Twitter call: <object>.statuses.public_timeline() will map to
<domain>/statuses/public_timeline.
"""
self.missing_attrs += (k,)
return self
def service_build_uri(self, *args, **kwargs):
"""
Service specific build uri
This method is meant to be overriden by child classes to have the last
opportunity to verify self.uri and add additional elements to it, etc.
NOTE: Make sure to pop all arguments off the list if you use
them otherwise they will be appended twice since all leftovers are
eventually added to the request uri
Also, don't forget to call this base method after doing service
specific alterations!
"""
# Don't use join here b/c not all arguments are required to be strings
for arg in args:
self.uri += '/%s' % (arg)
return kwargs
def _build_uri(self, **kwargs):
"""
Build uri for request with any missing attribute accesses that have
accumulated and any arguments or keyword arguments and return any
leftover keyword arguments
"""
uriparts = []
# Search all missing attributes for matching keyword argument
for uripart in self.missing_attrs:
# If keyword argument matches missing attribute use the value of
# keyword argument, otherwise just append the missing attribute
# This allows for putting keyword arguments in the middle of a uri
# string instead of the at end
# For example:
# myobject.test.id.details(id=1) maps to domain/test/1/details/
uriparts.append(str(kwargs.pop(uripart, uripart)))
self.uri += '/'.join(uriparts)
# Return leftover keyword arguments for service specific code to use,
# otherwise they'll just be appended at the end later
return kwargs
def _handle_auth(self):
"""
Setup authentication in headers and return properly encoded request
data
"""
if self.auth is None:
raise ValueError('Authentication is None')
self.headers['request'].clear()
self.headers['response'].clear()
self.headers['request'].update(self.auth.generate_headers())
def _set_request_method(self, **kwargs):
"""Set request method for response by passing in 'method' kwarg"""
self.method = kwargs.pop('method', 'get')
def __call__(self, *args, **kwargs):
"""
Finish building uri with leftover arguments, append authentication, and
send off request
"""
kwargs = self._build_uri(**kwargs)
# Wrapper for child classes to customize creation of the uri
kwargs = self.service_build_uri(*args, **kwargs)
self._set_request_method(**kwargs)
# Append any authentication specified to request
self._handle_auth()
resp = self._send_request(**kwargs)
return self._handle_response(resp)
def _prepare_request_params(self, **kwargs):
"""Handle encoding or any special processing of request parameters"""
return kwargs
def request_method_is_safe(self):
"""
Determines if request is 'safe' in REST terminology (aka doesn't
change data, just requests it)
"""
return self.method == 'get' or self.method == 'head'
def _send_request(self, **kwargs):
"""Send request to self.uri with associated (encoded) data"""
# Make it lowercase to b/c the methods in requests module are lowercase
self.method = self.method.lower()
method_call = getattr(requests, self.method, None)
if method_call is None:
raise AttributeError(
'%s not a supported HTTP method' % (self.method))
arg_data = self._prepare_request_params(**kwargs)
# 'get' and 'head' take params to put in query string
if self.request_method_is_safe():
resp = method_call(self.uri, params=arg_data,
headers=self.headers['request'])
# Update uri with full location (including query params encoded)
self.uri = resp.url
else:
resp = method_call(self.uri, data=arg_data,
headers=self.headers['request'])
return resp
def _handle_response(self, resp):
"""Verify response code and format data accordingly"""
self.headers['response'] = resp.headers
# Roll over request to prepare for new one
self._reset_uri()
# 200 - ok, 201 - created
if resp.status_code != 200 and resp.status_code != 201:
if (resp.status_code == 304):
return []
else:
raise APIHTTPError(resp.status_code, self.requested_uri)
if "json" == self.req_format:
self.response = json.loads(resp.content.decode('utf8'))
self.response_json = json.dumps(self.response)
else:
self.response = resp.content.decode('utf8')
return self
def _reset_uri(self):
"""Clear active request uri to make way for another request"""
# Save off the current uri request just for testing and inspection
self.requested_uri = self.uri
self.uri = self.base_uri
self.missing_attrs = ()
@property
def rate_limit_remaining(self):
"""
Remaining requests in the current rate-limit.
"""
try:
return int(self.headers['response']['x-ratelimit-remaining'])
except KeyError:
return 0
@property
def rate_limit(self):
"""
Max number of requests allowed.
"""
try:
return int(self.headers['response']['x-ratelimit-limit'])
except KeyError:
return 0
@property
def rate_limit_reset(self):
"""
Time in UTC epoch seconds when the rate limit will reset.
"""
try:
return int(self.headers['response']['x-ratelimit-reset'])
except KeyError:
return 0
|
ol-loginov/intellij-community | refs/heads/master | python/testData/inspections/AugmentAssignmentWithContext.py | 83 | class A:
x = 3
a = A()
<weak_warning descr="Assignment can be replaced with augmented assignment">a.x = a.x +<caret> 1</weak_warning> |
MHatmaker/maplinkr | refs/heads/master | node_modules/meanio/node_modules/npm/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
kiwitcms/Kiwi | refs/heads/master | tcms/rpc/api/tag.py | 2 | # -*- coding: utf-8 -*-
from django.conf import settings
from modernrpc.core import rpc_method
from tcms.management.models import Tag
from tcms.rpc.decorators import permissions_required
@permissions_required("management.view_tag")
@rpc_method(name="Tag.filter")
def filter(query): # pylint: disable=redefined-builtin
"""
.. function:: RPC Tag.filter(query)
Search and return a list of tags
:param query: Field lookups for :class:`tcms.management.models.Tag`
:type query: dict
:return: Serialized list of :class:`tcms.management.models.Tag` objects
:rtype: list(dict)
"""
fields_list = ["id", "name", "case", "plan", "run"]
if "tcms.bugs.apps.AppConfig" in settings.INSTALLED_APPS:
fields_list.append("bugs")
return list(Tag.objects.filter(**query).values(*fields_list).distinct())
|
hansenmakangiras/disperindag | refs/heads/master | static/assets/node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
abaditsegay/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/build/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
frishberg/django | refs/heads/master | django/conf/locale/el/formats.py | 58 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd/m/Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'd/m/Y P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', '%Y-%m-%d', # '25/10/2006', '25/10/06', '2006-10-25',
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
GHackAnonymous/ninja-ide | refs/heads/master | ninja_ide/intellisensei/analyzer/analyzer_daemon.py | 7 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
import os
import time
from threading import Thread, Lock
from multiprocessing import Process, Queue
from ninja_ide.intellisensei.analyzer import model
from ninja_ide.intellisense.analyzer import analyzer
try:
unicode
except NameError:
# Python 3
basestring = unicode = str # lint:ok
__completion_daemon_instance = None
WAITING_BEFORE_START = 5
PROJECTS = {}
def CompletionDaemon():
global __completion_daemon_instance
if __completion_daemon_instance is None:
__completion_daemon_instance = __CompletionDaemon()
__completion_daemon_instance.start()
return __completion_daemon_instance
class __CompletionDaemon(Thread):
def __init__(self):
Thread.__init__(self)
self.analyzer = analyzer.Analyzer()
self.modules = {}
self.projects_modules = {}
self._relations = {}
self.reference_counter = 0
self.keep_alive = True
self.lock = Lock()
self.queue_receive = Queue()
self.queue_send = Queue()
self.daemon = _DaemonProcess(self.queue_send, self.queue_receive)
self.daemon.start()
def run(self):
global WAITING_BEFORE_START
time.sleep(WAITING_BEFORE_START)
while self.keep_alive:
path_id, module, resolve = self.queue_receive.get()
if path_id is None:
continue
self.lock.acquire()
self.modules[path_id] = module
self.lock.release()
if resolve:
resolution = self._resolve_with_other_modules(resolve)
self._relations[path_id] = []
for package in resolution:
self._relations[path_id].append(resolution[package])
self.queue_send.put((path_id, module, False, resolution))
def _resolve_with_other_modules(self, packages):
resolution = {}
for package in packages:
if package.find('(') != -1:
package = package[:package.index('(')]
if self.projects_modules.get(package, False):
folder = self.projects_modules[package]
filename = os.path.join(folder, '__init__.py')
if self._analyze_file(filename):
resolution[package] = filename
elif self.projects_modules.get(package.rsplit('.', 1)[0], False):
name = package.rsplit('.', 1)
folder = self.projects_modules[name[0]]
filename = "%s.py" % os.path.join(folder, name[1])
if os.path.isfile(filename):
if self._analyze_file(filename):
resolution[package] = filename
elif self.projects_modules.get(package.rsplit('.', 2)[0], False):
name = package.rsplit('.', 2)
folder = self.projects_modules[name[0]]
filename = "%s.py" % os.path.join(folder, name[1])
if os.path.isfile(filename):
if self._analyze_file(filename):
resolution[package.rsplit('.', 1)[0]] = filename
return resolution
def _analyze_file(self, filename):
try:
if filename not in self.modules:
source = ''
with open(filename) as f:
source = f.read()
module = self.analyzer.analyze(source)
self.inspect_module(filename, module, False)
return True
except Exception as reason:
print(reason)
return False
def unload_module(self, path_id):
relations = self._relations.pop(path_id, None)
if relations is not None:
relations.append(path_id)
for module in relations:
valid = False
for rel in self._relations:
other_modules = self._relations[rel]
if module in other_modules:
valid = True
if not valid:
self.modules.pop(module, None)
def process_path(self):
for project in PROJECTS:
if PROJECTS[project]:
continue
project = os.path.abspath(project)
package = os.path.basename(project)
self.projects_modules[package] = project
for root, dirs, files in os.walk(project, followlinks=True):
if '__init__.py' in files:
package = root[len(project) + 1:].replace(
os.path.sep, '.')
self.projects_modules[package] = root
def inspect_module(self, path_id, module, recursive=True):
self.lock.acquire()
self.modules[path_id] = module
self.lock.release()
self.queue_send.put((path_id, module, recursive, None))
def get_module(self, path_id):
return self.modules.get(path_id, None)
def _shutdown_process(self):
self.queue_send.put((None, None, None, None))
self.daemon.terminate()
self.queue_receive.put((None, None, None))
def force_stop(self):
self.keep_alive = False
self._shutdown_process()
for project in PROJECTS:
PROJECTS[project] = False
if self.is_alive():
self.join()
class _DaemonProcess(Process):
def __init__(self, queue_receive, queue_send):
super(_DaemonProcess, self).__init__()
self.queue_receive = queue_receive
self.queue_send = queue_send
self.iteration = 0
self.packages = []
def run(self):
while True:
self.iteration = 0
path_id, module, recursive, resolution = self.queue_receive.get()
if path_id is None and module is None:
break
try:
if resolution is not None:
self.packages = resolution
self.iteration = 2
self._resolve_module(module)
elif module.need_resolution():
self._resolve_module(module)
self.iteration = 1
self._resolve_module(module)
else:
continue
if self.packages and recursive:
self.queue_send.put((path_id, module, self.packages))
else:
self.queue_send.put((path_id, module, []))
except Exception as reason:
# Try to not die whatever happend
message = 'Daemon Fail with: %r', reason
print(message)
raise
finally:
self.packages = []
def _resolve_module(self, module):
self._resolve_attributes(module, module)
self._resolve_functions(module, module)
for cla in module.classes:
clazz = module.classes[cla]
self._resolve_inheritance(clazz, module)
self._resolve_attributes(clazz, module)
self._resolve_functions(clazz, module)
def _resolve_inheritance(self, clazz, module):
for base in clazz.bases:
name = base.split('.', 1)
main_attr = name[0]
child_attrs = ''
if len(name) == 2:
child_attrs = name[1]
result = module.get_type(main_attr, child_attrs)
data = model.late_resolution
if result.get('found', True):
data_type = module.imports[main_attr].get_data_type()
if child_attrs:
child_attrs = '.%s' % child_attrs
name = '%s%s().' % (data_type, child_attrs)
imports = module.get_imports()
imports = [imp.split('.')[0] for imp in imports]
data = completer.get_all_completions(name, imports)
data = (name, data)
elif result.get('object', False).__class__ is model.Clazz:
data = result['object']
clazz.bases[base] = data
clazz.update_with_parent_data()
def _resolve_functions(self, structure, module):
if structure.__class__ is model.Assign:
return
for func in structure.functions:
function = structure.functions[func]
self._resolve_attributes(function, module)
self._resolve_functions(function, module)
self._resolve_returns(function, module)
def _resolve_returns(self, structure, module):
if structure.__class__ is model.Assign:
return
self._resolve_types(structure.return_type, module, structure, 'return')
def _resolve_attributes(self, structure, module):
if structure.__class__ is model.Assign:
return
for attr in structure.attributes:
assign = structure.attributes[attr]
self._resolve_types(assign.data, module, assign)
def _resolve_types(self, types, module, structure=None, split_by='='):
if self.iteration == 0:
self._resolve_with_imports(types, module, split_by)
self._resolve_with_local_names(types, module, split_by)
elif self.iteration == 1:
self._resolve_with_local_vars(types, module, split_by, structure)
else:
self._resolve_with_linked_modules(types, module, structure)
def _resolve_with_linked_modules(self, types, module, structure):
for data in types:
name = data.data_type
if not isinstance(name, basestring):
continue
for package in self.packages:
if name.startswith(package):
to_resolve = name[len(package):]
if to_resolve and to_resolve[0] == '.':
to_resolve = to_resolve[1:]
path = self.packages[package]
linked = model.LinkedModule(path, to_resolve)
data.data_type = linked
break
def _resolve_with_imports(self, types, module, splitby):
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].strip().split('.')
name = value[0]
extra = ''
if name.find('(') != -1:
extra = name[name.index('('):]
name = name[:name.index('(')]
if name in module.imports:
value[0] = module.imports[name].data_type
package = '.'.join(value)
resolve = "%s%s" % (package, extra)
data.data_type = resolve
self.packages.append(package)
def _resolve_with_local_names(self, types, module, splitby):
#TODO: resolve with functions returns
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].split('(')[0].strip()
if value in module.classes:
clazz = module.classes[value]
data.data_type = clazz
def _resolve_with_local_vars(self, types, module, splitby, structure=None):
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].split('(')[0].strip()
sym = value.split('.')
if len(sym) != 0:
main_attr = sym[0]
if len(sym) > 2:
child_attr = '.'.join(sym[1:])
elif len(sym) == 2:
child_attr = sym[1]
else:
child_attr = ''
scope = []
self._get_scope(structure, scope)
if structure.__class__ is model.Assign:
scope.pop(0)
scope.reverse()
result = module.get_type(main_attr, child_attr, scope)
data_type = model.late_resolution
if isinstance(result['type'], basestring) and len(result) < 3:
if child_attr and \
structure.__class__ is not model.Function:
data_type = "%s.%s" % (result['type'], child_attr)
else:
data_type = result['type']
elif result.get('object', False):
data_type = result['object']
if data is not None:
data.data_type = data_type
def _get_scope(self, structure, scope):
if structure.__class__ not in (None, model.Module):
scope.append(structure.name)
self._get_scope(structure.parent, scope)
def shutdown_daemon():
daemon = CompletionDaemon()
daemon.force_stop()
global __completion_daemon_instance
__completion_daemon_instance = None
def add_project_folder(project_path):
global PROJECTS
if project_path not in PROJECTS:
PROJECTS[project_path] = False
daemon = CompletionDaemon()
daemon.process_path()
|
GlobalBoost/GlobalBoost | refs/heads/master | test/functional/feature_config_args.py | 3 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_config_file_parser(self):
# Assume node is stopped
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def run_test(self):
self.stop_node(0)
self.test_config_file_parser()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "bitcoin.conf")
# datadir needs to be set before [regtest] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
# Temporarily disabled, because this test would access the user's home dir (~/.bitcoin)
#self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
# Temporarily disabled, because this test would access the user's home dir (~/.bitcoin)
#self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
#self.stop_node(0)
#assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'blocks'))
#if self.is_wallet_compiled():
#assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
vibhorag/scikit-learn | refs/heads/master | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
adityaduggal/erpnext | refs/heads/develop | erpnext/domains/retail.py | 12 | data = {
'desktop_icons': [
'POS',
'Item',
'Customer',
'Sales Invoice',
'Purchase Order',
'Accounts',
'Task',
'ToDo'
],
'set_value': [
['Stock Settings', None, 'show_barcode_field', 1]
],
'default_portal_role': 'Customer'
}
|
JamesonFinney/librenms | refs/heads/master | poller-service.py | 11 | #! /usr/bin/env python
"""
poller-service A service to wrap SNMP polling. It will poll up to $threads devices at a time, and will not re-poll
devices that have been polled within the last $poll_frequency seconds. It will prioritize devices based
on the last time polled. If resources are sufficient, this service should poll every device every
$poll_frequency seconds, but should gracefully degrade if resources are inefficient, polling devices as
frequently as possible. This service is based on Job Snijders' poller-wrapper.py.
Author: Clint Armstrong <[email protected]>
Date: July 2015
License: BSD 2-Clause
Copyright (c) 2015, Clint Armstrong
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
import os
import subprocess
import sys
import threading
import time
import MySQLdb
import logging
import logging.handlers
from datetime import datetime, timedelta
from collections import namedtuple
log = logging.getLogger('poller-service')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('poller-service: %(message)s')
handler = logging.handlers.SysLogHandler(address='/dev/log')
handler.setFormatter(formatter)
log.addHandler(handler)
install_dir = os.path.dirname(os.path.realpath(__file__))
config_file = install_dir + '/config.php'
log.info('INFO: Starting poller-service')
class DB:
conn = None
def __init__(self):
self.in_use = threading.Lock()
self.connect()
def connect(self):
self.in_use.acquire(True)
while True:
try:
self.conn.close()
except:
pass
try:
if db_port == 0:
self.conn = MySQLdb.connect(host=db_server, user=db_username, passwd=db_password, db=db_dbname)
else:
self.conn = MySQLdb.connect(host=db_server, port=db_port, user=db_username, passwd=db_password, db=db_dbname)
break
except (AttributeError, MySQLdb.OperationalError):
log.warning('WARNING: MySQL Error, reconnecting.')
time.sleep(.5)
self.conn.autocommit(True)
self.conn.ping(True)
self.in_use.release()
def query(self, sql):
self.in_use.acquire(True)
while True:
try:
cursor = self.conn.cursor()
cursor.execute(sql)
ret = cursor.fetchall()
cursor.close()
self.in_use.release()
return ret
except (AttributeError, MySQLdb.OperationalError):
log.warning('WARNING: MySQL Operational Error during query, reconnecting.')
self.in_use.release()
self.connect()
except (AttributeError, MySQLdb.ProgrammingError):
log.warning('WARNING: MySQL Programming Error during query, attempting query again.')
cursor.close()
def get_config_data():
config_cmd = ['/usr/bin/env', 'php', '%s/config_to_json.php' % install_dir]
try:
proc = subprocess.Popen(config_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except:
log.critical("ERROR: Could not execute: %s" % config_cmd)
sys.exit(2)
return proc.communicate()[0].decode()
try:
with open(config_file) as f:
pass
except IOError as e:
log.critical("ERROR: Oh dear... %s does not seem readable" % config_file)
sys.exit(2)
try:
config = json.loads(get_config_data())
except:
log.critical("ERROR: Could not load or parse configuration, are PATHs correct?")
sys.exit(2)
try:
loglevel = int(config['poller_service_loglevel'])
except KeyError:
loglevel = 20
except ValueError:
loglevel = logging.getLevelName(config['poller_service_loglevel'])
try:
log.setLevel(loglevel)
except ValueError:
log.warning('ERROR: {0} is not a valid log level. If using python 3.4.0-3.4.1 you must specify loglevel by number'.format(str(loglevel)))
log.setLevel(20)
poller_path = config['install_dir'] + '/poller.php'
discover_path = config['install_dir'] + '/discovery.php'
db_username = config['db_user']
db_password = config['db_pass']
if config['db_host'][:5].lower() == 'unix:':
db_server = config['db_host']
db_port = 0
elif ':' in config['db_host']:
db_server = config['db_host'].rsplit(':')[0]
db_port = int(config['db_host'].rsplit(':')[1])
else:
db_server = config['db_host']
db_port = 0
db_dbname = config['db_name']
try:
amount_of_workers = int(config['poller_service_workers'])
if amount_of_workers == 0:
amount_of_workers = 16
except KeyError:
amount_of_workers = 16
try:
poll_frequency = int(config['poller_service_poll_frequency'])
if poll_frequency == 0:
poll_frequency = 300
except KeyError:
poll_frequency = 300
try:
discover_frequency = int(config['poller_service_discover_frequency'])
if discover_frequency == 0:
discover_frequency = 21600
except KeyError:
discover_frequency = 21600
try:
down_retry = int(config['poller_service_down_retry'])
if down_retry == 0:
down_retry = 60
except KeyError:
down_retry = 60
try:
retry_query = int(config['poller_service_retry_query'])
if retry_query == 0:
retry_query = 1
except KeyError:
retry_query = 1
try:
single_connection = bool(config['poller_service_single_connection'])
except KeyError:
single_connection = False
db = DB()
def lockFree(lock, db=db):
query = "SELECT IS_FREE_LOCK('{0}')".format(lock)
return db.query(query)[0][0] == 1
def getLock(lock, db=db):
query = "SELECT GET_LOCK('{0}', 0)".format(lock)
return db.query(query)[0][0] == 1
def releaseLock(lock, db=db):
query = "SELECT RELEASE_LOCK('{0}')".format(lock)
cursor = db.query(query)
return db.query(query)[0][0] == 1
def sleep_until(timestamp):
now = datetime.now()
if timestamp > now:
sleeptime = (timestamp - now).seconds
else:
sleeptime = 0
time.sleep(sleeptime)
poller_group = ('and poller_group IN({0}) '
.format(str(config['distributed_poller_group'])) if 'distributed_poller_group' in config else '')
# Add last_polled and last_polled_timetaken so we can sort by the time the last poll started, with the goal
# of having each device complete a poll within the given time range.
dev_query = ('SELECT device_id, status, '
'CAST( '
' DATE_ADD( '
' DATE_SUB( '
' last_polled, '
' INTERVAL last_polled_timetaken SECOND '
' ), '
' INTERVAL {0} SECOND) '
' AS DATETIME '
') AS next_poll, '
'CAST( '
' DATE_ADD( '
' DATE_SUB( '
' last_discovered, '
' INTERVAL last_discovered_timetaken SECOND '
' ), '
' INTERVAL {1} SECOND) '
' AS DATETIME '
') as next_discovery '
'FROM devices WHERE '
'disabled = 0 '
'AND IS_FREE_LOCK(CONCAT("poll.", device_id)) '
'AND IS_FREE_LOCK(CONCAT("discovery.", device_id)) '
'AND IS_FREE_LOCK(CONCAT("queue.", device_id)) '
'AND ( last_poll_attempted < DATE_SUB(NOW(), INTERVAL {2} SECOND ) '
' OR last_poll_attempted IS NULL ) '
'{3} '
'ORDER BY next_poll asc '
'LIMIT 1 ').format(poll_frequency,
discover_frequency,
down_retry,
poller_group)
next_update = datetime.now() + timedelta(minutes=1)
devices_scanned = 0
dont_query_until = datetime.fromtimestamp(0)
def poll_worker():
global dev_query
global devices_scanned
global dont_query_until
global single_connection
thread_id = threading.current_thread().name
if single_connection:
global db
else:
db = DB()
while True:
if datetime.now() < dont_query_until:
time.sleep(1)
continue
dev_row = db.query(dev_query)
if len(dev_row) < 1:
dont_query_until = datetime.now() + timedelta(seconds=retry_query)
time.sleep(1)
continue
device_id, status, next_poll, next_discovery = dev_row[0]
if not getLock('queue.{0}'.format(device_id), db):
releaseLock('queue.{0}'.format(device_id), db)
continue
if next_poll and next_poll > datetime.now():
log.debug('DEBUG: Thread {0} Sleeping until {1} before polling {2}'.format(thread_id, next_poll, device_id))
sleep_until(next_poll)
action = 'poll'
if (not next_discovery or next_discovery < datetime.now()) and status == 1:
action = 'discovery'
log.debug('DEBUG: Thread {0} Starting {1} of device {2}'.format(thread_id, action, device_id))
devices_scanned += 1
db.query('UPDATE devices SET last_poll_attempted = NOW() WHERE device_id = {0}'.format(device_id))
if not getLock('{0}.{1}'.format(action, device_id), db):
releaseLock('{0}.{1}'.format(action, device_id), db)
releaseLock('queue.{0}'.format(device_id), db)
continue
releaseLock('queue.{0}'.format(device_id), db)
try:
start_time = time.time()
path = poller_path
if action == 'discovery':
path = discover_path
command = "/usr/bin/env php %s -h %s >> /dev/null 2>&1" % (path, device_id)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
if elapsed_time < 300:
log.debug("DEBUG: Thread {0} finished {1} of device {2} in {3} seconds".format(thread_id, action, device_id, elapsed_time))
else:
log.warning("WARNING: Thread {0} finished {1} of device {2} in {3} seconds".format(thread_id, action, device_id, elapsed_time))
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
finally:
releaseLock('{0}.{1}'.format(action, device_id), db)
for i in range(0, amount_of_workers):
t = threading.Thread(target=poll_worker)
t.name = i
t.daemon = True
t.start()
while True:
sleep_until(next_update)
seconds_taken = (datetime.now() - (next_update - timedelta(minutes=1))).seconds
update_query = ('INSERT INTO pollers(poller_name, '
' last_polled, '
' devices, '
' time_taken) '
' values("{0}", NOW(), "{1}", "{2}") '
'ON DUPLICATE KEY UPDATE '
' last_polled=values(last_polled), '
' devices=values(devices), '
' time_taken=values(time_taken) ').format(config['distributed_poller_name'].strip(),
devices_scanned,
seconds_taken)
try:
db.query(update_query)
except:
log.critical('ERROR: MySQL query error. Is your schema up to date?')
sys.exit(2)
log.info('INFO: {0} devices scanned in the last minute'.format(devices_scanned))
devices_scanned = 0
next_update = datetime.now() + timedelta(minutes=1)
|
AnhellO/DAS_Sistemas | refs/heads/development | Ago-Dic-2019/Ejemplos/Design Patterns/Simple Factory/Client.py | 1 | from SimpleFactory import SimpleFactory
def main():
instancia = SimpleFactory.makeConcreteClassPersonaje(
tipo='Mago',
nombre='Merlin',
hab_especial='Transmutación'
)
print(instancia)
if __name__ == '__main__':
main() |
onyxfish/csvkit | refs/heads/master | tests/test_utilities/test_in2csv.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import six
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.in2csv import In2CSV, launch_new_instance
from tests.utils import CSVKitTestCase, EmptyFileTests, stdin_as_string
class TestIn2CSV(CSVKitTestCase, EmptyFileTests):
Utility = In2CSV
default_args = ['-f', 'csv']
def assertConverted(self, input_format, input_filename, output_filename, additional_args=[]):
output = self.get_output(['-f', input_format, input_filename] + additional_args)
with open(output_filename, 'r') as f:
self.assertEqual(output, f.read())
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']):
launch_new_instance()
def test_version(self):
with self.assertRaises(SystemExit) as e:
self.get_output(['-V'])
self.assertEqual(e.exception.code, 0)
def test_locale(self):
self.assertConverted('csv', 'examples/test_locale.csv', 'examples/test_locale_converted.csv', ['--locale', 'de_DE'])
def test_no_blanks(self):
self.assertConverted('csv', 'examples/blanks.csv', 'examples/blanks_converted.csv')
def test_blanks(self):
self.assertConverted('csv', 'examples/blanks.csv', 'examples/blanks.csv', ['--blanks'])
def test_date_format(self):
self.assertConverted('csv', 'examples/test_date_format.csv', 'examples/test_date_format_converted.csv', ['--date-format', '%d/%m/%Y'])
def test_numeric_date_format(self):
self.assertConverted('csv', 'examples/test_numeric_date_format.csv', 'examples/test_date_format_converted.csv', ['--date-format', '%Y%m%d'])
def test_convert_csv(self):
self.assertConverted('csv', 'examples/testfixed_converted.csv', 'examples/testfixed_converted.csv')
def test_convert_csv_with_skip_lines(self):
self.assertConverted('csv', 'examples/test_skip_lines.csv', 'examples/dummy.csv', ['--skip-lines', '3', '--no-inference'])
def test_convert_tsv(self):
self.assertConverted('csv', 'examples/dummy.tsv', 'examples/dummy.csv', ['--no-inference'])
def test_convert_tsv_streaming(self):
self.assertConverted('csv', 'examples/dummy.tsv', 'examples/dummy.csv', ['--no-inference', '--snifflimit', '0', '--tabs'])
def test_convert_dbf(self):
self.assertConverted('dbf', 'examples/testdbf.dbf', 'examples/testdbf_converted.csv')
def test_convert_json(self):
self.assertConverted('json', 'examples/testjson.json', 'examples/testjson_converted.csv')
def test_convert_geojson(self):
self.assertConverted('geojson', 'examples/test_geojson.json', 'examples/test_geojson.csv')
def test_convert_ndjson(self):
self.assertConverted('ndjson', 'examples/testjson_multiline.json', 'examples/testjson_multiline_converted.csv')
def test_convert_nested_json(self):
self.assertConverted('json', 'examples/testjson_nested.json', 'examples/testjson_nested_converted.csv')
def test_convert_xls(self):
self.assertConverted('xls', 'examples/test.xls', 'examples/testxls_converted.csv')
def test_convert_xls_with_sheet(self):
self.assertConverted('xls', 'examples/sheets.xls', 'examples/testxls_converted.csv', ['--sheet', 'data'])
def test_convert_xls_with_unicode_sheet(self):
self.assertLines(['--sheet', 'ʤ', 'examples/sheets.xls'], [
'a,b,c',
'1.0,2.0,3.0',
])
def test_convert_xls_with_skip_lines(self):
self.assertConverted('xls', 'examples/test_skip_lines.xls', 'examples/testxls_converted.csv', ['--skip-lines', '3'])
def test_convert_xlsx(self):
self.assertConverted('xlsx', 'examples/test.xlsx', 'examples/testxlsx_converted.csv')
def test_convert_xlsx_with_sheet(self):
self.assertConverted('xlsx', 'examples/sheets.xlsx', 'examples/testxlsx_converted.csv', ['--sheet', 'data'])
def test_convert_xlsx_with_unicode_sheet(self):
self.assertLines(['--sheet', 'ʤ', '--no-inference', 'examples/sheets.xlsx'], [
'a,b,c',
'1,2,3',
])
def test_convert_xlsx_with_skip_lines(self):
self.assertConverted('xlsx', 'examples/test_skip_lines.xlsx', 'examples/testxlsx_converted.csv', ['--skip-lines', '3'])
def test_names(self):
self.assertLines(['--names', 'examples/sheets.xlsx'], [
'not this one',
'data',
u'ʤ',
])
def test_csv_no_headers(self):
self.assertConverted('csv', 'examples/no_header_row.csv', 'examples/dummy.csv', ['--no-header-row', '--no-inference'])
def test_csv_no_headers_streaming(self):
self.assertConverted('csv', 'examples/no_header_row.csv', 'examples/dummy.csv', ['--no-header-row', '--no-inference', '--snifflimit', '0'])
def test_csv_datetime_inference(self):
input_file = six.StringIO('a\n2015-01-01T00:00:00Z')
with stdin_as_string(input_file):
self.assertLines(['-f', 'csv'], [
'a',
'2015-01-01T00:00:00+00:00',
])
input_file.close()
def test_csv_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.csv'], [
'a,b,c',
'1,2,3',
])
def test_xls_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.xls'], [
'a,b,c',
'1.0,2.0,3.0',
])
def test_xlsx_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.xlsx'], [
'a,b,c',
'1,2,3',
])
def test_geojson_no_inference(self):
input_file = six.StringIO('{"a": 1, "b": 2, "type": "FeatureCollection", "features": [{"geometry": {}, "properties": {"a": 1, "b": 2, "c": 3}}]}')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'geojson'], [
'id,a,b,c,geojson,type,longitude,latitude',
',1,2,3,{},,,',
])
input_file.close()
def test_json_no_inference(self):
input_file = six.StringIO('[{"a": 1, "b": 2, "c": 3}]')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'json'], [
'a,b,c',
'1,2,3',
])
input_file.close()
def test_ndjson_no_inference(self):
input_file = six.StringIO('{"a": 1, "b": 2, "c": 3}')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'ndjson'], [
'a,b,c',
'1,2,3',
])
input_file.close()
def test_names_xls(self):
output = self.get_output_as_io(['-n', 'examples/sheets.xls'])
self.assertEqual(next(output), 'not this one\n')
self.assertEqual(next(output), 'data\n')
def test_names_xlsx(self):
output = self.get_output_as_io(['-n', 'examples/sheets.xlsx'])
self.assertEqual(next(output), 'not this one\n')
self.assertEqual(next(output), 'data\n')
def test_convert_xls_with_write_sheets(self):
try:
self.assertConverted('xls', 'examples/sheets.xls', 'examples/testxls_converted.csv', ['--sheet', 'data', '--write-sheets', "ʤ,1"])
with open('examples/sheets_0.csv', 'r') as f:
with open('examples/testxls_unicode_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_1.csv', 'r') as f:
with open('examples/testxls_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in (0, 1):
path = 'examples/sheets_%d.csv' % suffix
if os.path.exists(path):
os.remove(path)
def test_convert_xlsx_with_write_sheets(self):
try:
self.assertConverted('xlsx', 'examples/sheets.xlsx', 'examples/testxlsx_noinference_converted.csv', ['--no-inference', '--sheet', 'data', '--write-sheets', "ʤ,1"])
with open('examples/sheets_0.csv', 'r') as f:
with open('examples/testxlsx_unicode_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_1.csv', 'r') as f:
with open('examples/testxlsx_noinference_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in (0, 1):
path = 'examples/sheets_%d.csv' % suffix
if os.path.exists(path):
os.remove(path)
|
pbrod/numpy | refs/heads/master | numpy/typing/tests/data/fail/arrayprint.py | 9 | from typing import Callable, Any
import numpy as np
AR: np.ndarray
func1: Callable[[Any], str]
func2: Callable[[np.integer[Any]], str]
np.array2string(AR, style=None) # E: Unexpected keyword argument
np.array2string(AR, legacy="1.14") # E: incompatible type
np.array2string(AR, sign="*") # E: incompatible type
np.array2string(AR, floatmode="default") # E: incompatible type
np.array2string(AR, formatter={"A": func1}) # E: incompatible type
np.array2string(AR, formatter={"float": func2}) # E: Incompatible types
|
jarvys/django-1.7-jdb | refs/heads/master | tests/model_options/test_tablespaces.py | 34 | from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.core.management.color import no_style
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models.tablespaces import (Article, ArticleRef, Authors, Reviewers,
Scientist, ScientistRef)
def sql_for_table(model):
return '\n'.join(connection.creation.sql_create_model(model,
no_style())[0])
def sql_for_index(model):
return '\n'.join(connection.creation.sql_indexes_for_model(model,
no_style()))
# We can't test the DEFAULT_TABLESPACE and DEFAULT_INDEX_TABLESPACE settings
# because they're evaluated when the model class is defined. As a consequence,
# @override_settings doesn't work, and the tests depend
class TablespacesTests(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['model_options'].models.copy()
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = True
def tearDown(self):
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = False
apps.app_configs['model_options'].models = self._old_models
apps.all_models['model_options'] = self._old_models
apps.clear_cache()
def assertNumContains(self, haystack, needle, count):
real_count = haystack.count(needle)
self.assertEqual(real_count, count, "Found %d instances of '%s', "
"expected %d" % (real_count, needle, count))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_model(self):
sql = sql_for_table(Scientist).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the index on the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_model(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Scientist),
sql_for_table(ScientistRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_indexed_field(self):
sql = sql_for_table(Article).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
# 1 for the table + 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, 'tbl_tbsp', 3)
# 1 for the index on reference
self.assertNumContains(sql, 'idx_tbsp', 1)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_indexed_field(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Article),
sql_for_table(ArticleRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_many_to_many_field(self):
sql = sql_for_table(Authors).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Authors).lower()
# The ManyToManyField declares no db_tablespace, its indexes go to
# the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_table(Reviewers).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Reviewers).lower()
# The ManyToManyField declares db_tablespace, its indexes go there.
self.assertNumContains(sql, 'tbl_tbsp', 0)
self.assertNumContains(sql, 'idx_tbsp', 2)
|
holmes/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/baseStringCheck.py | 78 | def repl(s):
if not isinstance(s, basestring):
return s
return s.replace(s.replace('a', 'b'), s)
|
DarrienG/Server-Automation | refs/heads/master | automate.py | 1 | #!/usr/bin/env python
__author__ = 'Darrien'
# sys is needed to skip the python script when updating
# os is needed for changing the working directory, and making folders
import sys
import os
# Kills the script in order to return to the bash script that called it.
def update_kill():
print "Killing main script..."
sys.exit("Updating... ")
# Creates a folder if there is none already there
# Also returns the path to the final folder created
def folder_creation(sub_dir, appender):
# Using the subdirectory obtained in the main method, gets the full directory path
filename = "/var/www/F14Courses/" + str(sub_dir)
filename += "/Assignment" + appender
# Checks to see if the folder already exists
if not os.path.exists(filename):
os.makedirs(filename)
print "Folder: \'Assignment" + appender + "\' created."
return filename
else:
print "Folder already exists. Folder not created."
return filename
# Creates how_many number of assignments using an outline
# how_many = number of assignments wanted, a_num = assignment number
def assignment_creator(how_many, a_num, script_ver):
thing = int(how_many)
safe_text = open("safe_text.txt", "w")
safe_text.write(how_many)
start = "Assignment" + a_num + "pe"
# Loop to make assignments until how_many
for i in range(thing):
thing = i + 1
if thing < 10:
ender = "0" + str(thing)
else:
ender = str(thing)
file_name = str(start + ender + ".c")
# Checking to see if file exists - will not overwrite files already there
if os.path.exists(file_name):
print "%s exists. New file not created." % file_name
else:
line = "/*****************************************************/\n"
filler = "/*" + " "*51 + "*/\n"
new_file = open(file_name, "w")
new_file.write(line)
new_file.write("/* Programmer: Darrien Bradley Glasser */\n")
new_file.write(filler)
new_file.write("/* Program: Assignment " + str(i + 1) + " */\n")
new_file.write(filler)
new_file.write("/* Approximate Completion Time: TO BE FILLED */\n")
new_file.write(line)
new_file.write("\n/*\nEnter description of program here.\n*/\n")
new_file.write("\n#include <stdio.h>\n\n")
new_file.write("int main(int argc, char*argv[]){\n")
new_file.write("\n\n\treturn 0;\n}")
new_file.write("\n\n/* Headers autogenerated by Darrien's Server Automation Script version: " + script_ver + " */")
new_file.close()
print "New file " + file_name + " created."
# Modifies existing HTML files, and generates new ones in order to display all files on darrieng.raspctl.com
# how_many = num of assignments, a_num = assignment number, direc = 101, 102, etc.
def html_gen(how_many, a_num, direc, script_ver):
# 'big_file' is the file that will contain all parts of the final HTML file
# 'top' contains the headers, 'links' contain links to the assignment folders, 'bottom' contains the closing tags
# VERY IMPORTANT: links must not be opened in a+, will be filled with garbage otherwise. See C: I/O bug
direc = str(direc)
big_file = open(direc + ".html", "w")
top = open("top.html", "r")
links = open("links.html", "a")
bottom = open("bottom.html", "r")
print "Modding: " + direc + ".html"
# Appending new assignment to links.html
links.write("<li><a href=\"/F14Courses/" + direc + "/Assignment" + a_num + ".html\">Assignment" + a_num + "</a></li>")
links.close()
links = open("links.html", "r")
# Adding top, links, and bottom together to make the final file
big_file.write(top.read())
big_file.write(links.read())
big_file.write(bottom.read())
big_file.close(); top.close(); links.close(); bottom.close()
print "File modifications completed."
# Move to directory with new assignments in it
os.chdir("Assignment" + a_num)
# Generating new HTML file in AssignmentXX folder pointing towards assignments
# Printing periods signifies that the server has not frozen
print "Creating Assignment" + a_num + ".html"
new_assignment = open("Assignment" + a_num + ".html", "w")
print ".",
new_assignment.write("<!DOCTYPE html PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n")
new_assignment.write("<html>\n")
new_assignment.write(" <head>\n")
new_assignment.write(" <title>\n")
new_assignment.write(" Assignment" + a_num)
new_assignment.write(" </title>\n")
new_assignment.write(" </head>\n")
new_assignment.write(" <body>\n")
new_assignment.write(" <h1>\n")
new_assignment.write(" Assignment" + a_num + "\n")
new_assignment.write(" </h1>\n")
new_assignment.write(" <nav>\n")
new_assignment.write(" <ul>")
print ".",
# Adding as many links as the user asked for
for i in range(int(how_many)):
thing = i + 1
if thing < 10:
num = "0" + str(thing)
new_assignment.write("\n")
new_assignment.write("<li><a href=\"/F14Courses/101/Assignment" + a_num + "pe" + num +
".c\">Assignment" + a_num + "pe" + num + "</a></li>")
new_assignment.write("\n")
print ".",
new_assignment.write("\n </ul>\n")
new_assignment.write(" <nav>\n")
new_assignment.write("<p>Page autogenerated by Darrien's Server Automation Script version: " + script_ver + "</p>\n")
new_assignment.write(" </body>\n")
new_assignment.write("</html>\n")
new_assignment.close()
print "\nAssignment" + a_num + ".html created."
print "HTML file generation completed."
if __name__ == "__main__":
# Opening print statements
script_ver = "0.2.7"
print "\n\n#######################################\n"
print "Server Automation Script version: " + script_ver
print "BETA VERSION: Script works, but may have odd quirks."
print "\n#######################################\n"
print "Welcome back Darrien!"
# Setting directory to root for easy switching
# retval will also save this directory for later
os.chdir("..")
retval = os.getcwd()
# 'u' will kill the script, and go straight to the bash script for updating all files
# 'F1' corresponds to Freshman year semester I. This pattern is expected to be carried forward
while True:
folder = raw_input("'u' = update or Y/Semester #\n> ")
if folder not in ("u", "F1", "f1", "F2", "f2"):
print "Typo. Please enter input again."
else:
break
if folder == 'u':
update_kill()
print "Please enter the assignment number."
a_num = raw_input("> ")
if a_num < 10:
appender = "0" + str(a_num)
else:
appender = str(a_num)
print "How many assignments?"
how_many = raw_input("> ")
if folder in ("f1", "F1"):
direc = 101
elif folder in ("f2", "F2"):
direc = 102
else:
f_path = "NOOOOOOOOOOOOOOO"
print "This should not happen. Spontaneous failure"
sys.exit("FAILURE")
# Creating a folder for the new assignment, and then returning the path to it in f_path (file_path)
f_path = folder_creation(direc, appender)
# Change directory to inside newly created folder, then create as many files as the user asked for
os.chdir(f_path)
assignment_creator(how_many, appender, script_ver)
# A file read/created in assignment_creator
# Assures that an HTML file with more assignments is not overwritten with an HTML file with fewer assignments
safe_text = open("safe_text.txt")
occupied = safe_text.read()
if occupied < how_many:
print "Number of assignments wanted is less than number of assignments created."
sys.exit("Skipping HTML gen script.")
# Moving up one directory from AssignmentXXpeXX to 101 or 102 or 1XX
os.chdir("..")
# Running the HTML file generation/modification method
html_gen(how_many, appender, direc, script_ver)
|
Vladimirek/DPexamples | refs/heads/master | omronDemo/py/www2plc.py | 1 | from dataplicity.client.task import Task, onsignal
from omronTcpFins import OmronPLC
class Www2plc(Task):
"""PLC data writer"""
def pre_startup(self):
"""Called prior to running the project"""
# self.conf contains the data- constants from the conf
self.livecfg = self.conf.get('valsetconfig')
@onsignal('settings_update', 'valueset')
def on_settings_update(self, name, settings):
"""Catches the 'settings_update' signal for 'valueset'"""
# This signal is sent on startup and whenever settings are changed by the server
self.plcip = settings.get(self.livecfg, 'splcip')
self.plcport = settings.get_integer(self.livecfg, 'splcport', 9600)
self.memadr = settings.get(self.livecfg, 'smemaddr', "A0")
self.savevalue = settings.get_float(self.livecfg, 'savevalue', 0.0)
self.log.debug(" SettingValue updated: valueset {}:{}".format(self.memadr, self.savevalue))
#write data to Omron PLC:
plc = OmronPLC( )
plc.openFins( self.plcip, self.plcport)
plc.writeFloat( self.memadr, self.savevalue)
plc.close()
def poll(self):
"""Called on a schedule defined in dataplicity.conf"""
pass #nothing to do regullary
|
ashemedai/ansible | refs/heads/devel | lib/ansible/modules/utilities/logic/async_wrapper.py | 89 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid }))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary notice only
"stderr": stderr,
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
####################
## main ##
####################
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
# setup job output directory
jobdir = os.path.expanduser("~/.ansible_async")
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print(json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)"%(sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s"%sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)"%os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)"%os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s"%(e))
print(json.dumps({
"failed" : True,
"msg" : "FATAL ERROR: %s" % str(e)
}))
sys.exit(1)
|
vitaly4uk/django | refs/heads/master | tests/m2o_recursive/models.py | 282 | """
Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self', ...)``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True, related_name='child_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Person(models.Model):
full_name = models.CharField(max_length=20)
mother = models.ForeignKey('self', models.SET_NULL, null=True, related_name='mothers_child_set')
father = models.ForeignKey('self', models.SET_NULL, null=True, related_name='fathers_child_set')
def __str__(self):
return self.full_name
|
FHannes/intellij-community | refs/heads/master | python/testData/mover/simple_afterUp.py | 91 | a = 1
b = 2
|
oseledets/pybtex | refs/heads/master | pybtex/tests/plugin_test.py | 1 | # Copyright (c) 2014 Matthias C. M. Troffaes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
import nose.tools
import pybtex.database.input.bibtex
import pybtex.plugin
import pybtex.style.formatting.plain
def test_plugin_loader():
"""Check that all enumerated plugins can be imported."""
for group in pybtex.plugin._DEFAULT_PLUGINS:
for name in pybtex.plugin.enumerate_plugin_names(group):
pybtex.plugin.find_plugin(group, name)
class TestPlugin1(pybtex.plugin.Plugin):
pass
class TestPlugin2(pybtex.plugin.Plugin):
pass
class TestPlugin3(pybtex.plugin.Plugin):
pass
class TestPlugin4(pybtex.plugin.Plugin):
pass
def test_register_plugin_1():
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin1))
nose.tools.assert_is(
TestPlugin1, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
nose.tools.assert_false(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin2))
nose.tools.assert_is(
TestPlugin1, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'yippikayee', TestPlugin2, force=True))
nose.tools.assert_is(
TestPlugin2, pybtex.plugin.find_plugin(
'pybtex.style.formatting', 'yippikayee'))
def test_register_plugin_2():
nose.tools.assert_false(
pybtex.plugin.register_plugin(
'pybtex.style.formatting', 'plain', TestPlugin2))
plugin = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')
nose.tools.assert_is_not(plugin, TestPlugin2)
nose.tools.assert_is(plugin, pybtex.style.formatting.plain.Style)
def test_register_plugin_3():
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.style.formatting.suffixes', '.woo', TestPlugin3))
plugin = pybtex.plugin.find_plugin(
'pybtex.style.formatting', filename='test.woo')
nose.tools.assert_is(plugin, TestPlugin3)
def test_bad_find_plugin():
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.find_plugin("pybtex.invalid.group", "__oops"))
nose.tools.assert_raises_regexp(
pybtex.plugin.PluginNotFound,
re.escape('plugin pybtex.style.formatting.__oops not found'),
lambda: pybtex.plugin.find_plugin("pybtex.style.formatting", "__oops"))
nose.tools.assert_raises(
pybtex.plugin.PluginNotFound,
lambda: pybtex.plugin.find_plugin("pybtex.style.formatting",
filename="oh.__oops"))
def test_bad_register_plugin():
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.register_plugin(
"pybtex.invalid.group", "__oops", TestPlugin1))
nose.tools.assert_raises(
pybtex.plugin.PluginGroupNotFound,
lambda: pybtex.plugin.register_plugin(
"pybtex.invalid.group.suffixes", ".__oops", TestPlugin1))
# suffixes must start with a dot
nose.tools.assert_raises(
ValueError,
lambda: pybtex.plugin.register_plugin(
"pybtex.style.formatting.suffixes", "notasuffix", TestPlugin1))
def test_plugin_suffix():
plugin = pybtex.plugin.find_plugin(
"pybtex.database.input", filename="test.bib")
nose.tools.assert_is(plugin, pybtex.database.input.bibtex.Parser)
def test_plugin_alias():
pybtex.plugin._DEFAULT_PLUGINS['pybtex.legacy.input'] = 'punchcard'
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.legacy.input', 'punchcard', TestPlugin4))
nose.tools.assert_true(
pybtex.plugin.register_plugin(
'pybtex.legacy.input.aliases', 'punchedcard', TestPlugin4))
nose.tools.assert_equal(
list(pybtex.plugin.enumerate_plugin_names('pybtex.legacy.input')),
['punchcard']
)
plugin = pybtex.plugin.find_plugin("pybtex.legacy.input", 'punchedcard')
nose.tools.assert_equal(plugin, TestPlugin4)
del pybtex.plugin._DEFAULT_PLUGINS['pybtex.legacy.input']
def test_plugin_class():
"""If a plugin class is passed to find_plugin(), it shoud be returned back."""
plugin = pybtex.plugin.find_plugin("pybtex.database.input", 'bibtex')
plugin2 = pybtex.plugin.find_plugin("pybtex.database.input", plugin)
nose.tools.assert_equal(plugin, plugin2)
|
Juniper/contrail-dev-neutron | refs/heads/master | neutron/openstack/common/local.py | 378 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
|
commonwealth-of-puerto-rico/lean | refs/heads/master | paart/apps/workflows/migrations/0008_auto__chg_field_workflowinstancehistory_workflow_instance.py | 2 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'WorkflowInstanceHistory.workflow_instance'
db.alter_column('workflows_workflowinstancehistory', 'workflow_instance_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['workflows.WorkflowInstance']))
def backwards(self, orm):
# Changing field 'WorkflowInstanceHistory.workflow_instance'
db.alter_column('workflows_workflowinstancehistory', 'workflow_instance_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['workflows.WorkflowType']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflows.workflowinstance': {
'Meta': {'object_name': 'WorkflowInstance'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 21, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'workflow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowType']"})
},
'workflows.workflowinstancehistory': {
'Meta': {'object_name': 'WorkflowInstanceHistory'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 21, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'workflow_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowInstance']"}),
'workflow_type_action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowTypeAction']"})
},
'workflows.workflowinstancestate': {
'Meta': {'object_name': 'WorkflowInstanceState'},
'datetime_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 21, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'workflow_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['workflows.WorkflowInstance']", 'unique': 'True'}),
'workflow_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowTypeState']"})
},
'workflows.workflowtype': {
'Meta': {'ordering': "['label']", 'object_name': 'WorkflowType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowTypeAction']", 'null': 'True', 'blank': 'True'}),
'initial_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowTypeState']", 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'workflows.workflowtypeaction': {
'Meta': {'ordering': "['label']", 'unique_together': "(('workflow_type', 'label'),)", 'object_name': 'WorkflowTypeAction'},
'allow_runtime_assignee_group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_runtime_assignee_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_assignee_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'auto_assignee_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'connections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['workflows.WorkflowTypeAction']", 'symmetrical': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowTypeState']", 'null': 'True', 'blank': 'True'}),
'workflow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowType']"})
},
'workflows.workflowtyperelation': {
'Meta': {'unique_together': "(('workflow_type', 'content_type'),)", 'object_name': 'WorkflowTypeRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'workflow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowType']"})
},
'workflows.workflowtypestate': {
'Meta': {'ordering': "['label']", 'unique_together': "(('workflow_type', 'label'),)", 'object_name': 'WorkflowTypeState'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'workflow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflows.WorkflowType']"})
}
}
complete_apps = ['workflows'] |
anivk/riceai-traffic | refs/heads/master | speeds/json_to_mongo.py | 1 | from os import listdir
from os.path import isfile, join
mypath = "C:\\flow"
for folder in listdir(mypath):
if not isfile(join(mypath,folder)):
folder_path = join(mypath,folder)
for json_file in listdir(folder_path):
if json_file.endswith('.json'):
json_file_path = join(folder_path, json_file)
with open('script.cmd', 'a') as f:
f.write(r'"C:\Program Files\MongoDB\Server\3.2\bin\mongoimport.exe" --host localhost --port 27017 --collection flow --db traffic --file "'+json_file_path+ '" --numInsertionWorkers 16' + '\n')
|
WSDC-NITWarangal/django | refs/heads/master | django/conf/locale/nl/__init__.py | 12133432 | |
Kami/libcloud | refs/heads/trunk | integration/driver/__init__.py | 12133432 | |
rhertzog/django | refs/heads/master | django/contrib/sessions/management/commands/__init__.py | 12133432 | |
bols-blue/ansible | refs/heads/devel | test/units/__init__.py | 267 |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
IronLanguages/ironpython2 | refs/heads/master | Src/StdLib/Lib/test/test_fcntl.py | 45 | """Test program for the fcntl C module.
OS/2+EMX doesn't support the file locking operations.
"""
import os
import struct
import sys
import unittest
from test.test_support import (verbose, TESTFN, unlink, run_unittest,
import_module, cpython_only)
# Skip test if no fcntl module.
fcntl = import_module('fcntl')
# TODO - Write tests for flock() and lockf().
def get_lockdata():
if sys.platform.startswith('atheos'):
start_len = "qq"
else:
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd', 'bsdos'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
elif sys.platform in ['os2emx']:
lockdata = None
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print 'struct.pack: ', repr(lockdata)
return lockdata
lockdata = get_lockdata()
class BadFile:
def __init__(self, fn):
self.fn = fn
def fileno(self):
return self.fn
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'w')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print 'Status from fcntl with O_NONBLOCK: ', rv
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print 'String from fcntl with F_SETLKW: ', repr(rv)
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'w')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
self.f.close()
def test_fcntl_bad_file(self):
with self.assertRaises(ValueError):
fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(TypeError):
fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
@cpython_only
def test_fcntl_bad_file_overflow(self):
from _testcapi import INT_MAX, INT_MIN
# Issue 15989
with self.assertRaises(ValueError):
fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK)
with self.assertRaises(ValueError):
fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK)
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
# This will raise OverflowError if issue1309352 is present.
fcntl.fcntl(fd, cmd, flags)
except IOError:
pass # Running on a system that doesn't support these flags.
finally:
os.close(fd)
def test_main():
run_unittest(TestFcntl)
if __name__ == '__main__':
test_main()
|
skomski/duktape | refs/heads/master | examples/alloc-logging/pool_simulator.py | 11 | #!/usr/bin/python
#
# Simulate pool allocator behavior against a memory allocation log written
# by duk_alloc_logging.c or in matching format. Provide commands to provide
# statistics and graphs, and to optimize pool counts for single or multiple
# application profiles.
#
# The pool allocator simulator incorporates quite basic pool features
# including "borrowing" from larger pool sizes. The behavior matches
# AllJoyn.js ajs_heap.c allocator:
#
# https://git.allseenalliance.org/cgit/core/alljoyn-js.git/tree/ajs_heap.c
#
# If your pool allocator has different behavior (e.g. ability to split and
# merge pool entries) you'll need to modify the simulator to properly
# optimize pool counts.
#
# Pool configuration and pool state are both expressed in JSON compatible
# form internally so that they can read/written from/to files easily.
#
import os
import sys
import math
import json
import optparse
#---------------------------------------------------------------------------
#
# Various helpers
#
def dprint(x):
sys.stderr.write('%s\n' % x)
sys.stderr.flush()
def readJson(fn):
f = open(fn, 'rb')
d = f.read()
f.close()
return json.loads(d)
def readFile(fn):
f = open(fn, 'rb')
d = f.read()
f.close()
return d
def writeJson(fn, val):
f = open(fn, 'wb')
f.write(json.dumps(val, indent=4, ensure_ascii=True, sort_keys=True))
f.close()
def writeFile(fn, val):
f = open(fn, 'wb')
f.write(val)
f.close()
# Clone a pool config (state), with all runtime fields intact
def clonePool(pool):
return json.loads(json.dumps(pool))
# Clone a pool config, but clean it of any runtime fields
def clonePoolCleaned(pool):
p = json.loads(json.dumps(pool))
for k in [ 'entries', 'ajs_use', 'ajs_hwm', 'ajs_min', 'ajs_max' ]:
if p.has_key(k):
del p[k]
return p
#---------------------------------------------------------------------------
#
# Pool allocator simulator
#
# Pointers are represented simply as running numbers; 0 is NULL, and other
# numbers are simply alloc/realloc count.
nullPtr = 0
nextPtr = 1
HUGE = 0x100000000 # used for min()
class AllocFailedException(Exception):
pass
class PoolSimulator:
state = None
config = None
allow_borrow = True # matches ajs_heap.c
auto_extend = True # for getting hwm w/o borrowing
ignore_zero_alloc = False # matches ajs_heap.c
def __init__(self, config, borrow=True, extend=False):
global nextPtr
self.allow_borrow = borrow
self.auto_extend = extend
self.state = { 'pools': [] }
self.config = json.loads(json.dumps(config)) # verify and clone
for cfg in config['pools']:
st = json.loads(json.dumps(cfg))
st['entries'] = []
st['ajs_use'] = 0 # entries in use
st['ajs_hwm'] = 0 # max entries in use
#st['ajs_min'] = None # min alloc size
#st['ajs_max'] = None # max alloc size
st['heap_index'] = st.get('heap_index', 0) # ajs specific
for i in xrange(cfg['count']):
ent = { 'alloc_size': None,
'entry_size': st['size'],
'borrowed': False } # free
ent['pointer'] = nextPtr
nextPtr += 1
st['entries'].append(ent)
self.state['pools'].append(st)
def alloc(self, size):
global nextPtr
#print('alloc %d' % size)
if size == 0 and self.ignore_zero_alloc:
return nullPtr
borrowed = False
def alloc_match(e):
e['alloc_size'] = size
e['borrowed'] = borrowed
p['ajs_use'] += 1
p['ajs_hwm'] = max(p['ajs_use'], p['ajs_hwm'])
p['ajs_min'] = min(p.get('ajs_min', HUGE), size)
p['ajs_max'] = max(p.get('ajs_max', 0), size)
return e['pointer']
for p in self.state['pools']:
if p['size'] < size:
continue
for e in p['entries']:
if e['alloc_size'] is not None:
continue
return alloc_match(e)
# Auto extend for measuring pool hwm without borrowing
if self.auto_extend:
ent = { 'alloc_size': None,
'entry_size': p['size'],
'borrowed': False,
'extended': True }
ent['pointer'] = nextPtr
nextPtr += 1
p['entries'].append(ent)
return alloc_match(ent)
if not self.allow_borrow or not p['borrow']:
raise AllocFailedException('alloc failure for size %d: pool full, no borrow' % size)
borrowed = True
raise AllocFailedException('alloc failure for size %d: went through all pools, no space' % size)
def realloc(self, ptr, size):
#print('realloc %d %d' % (ptr, size))
if ptr == nullPtr:
return self.alloc(size)
if size == 0:
self.free(ptr)
return nullPtr
# ptr != NULL and size != 0 here
for idx in xrange(len(self.state['pools'])):
p = self.state['pools'][idx]
prev_p = None
if idx >= 0:
prev_p = self.state['pools'][idx - 1]
for e in p['entries']:
if e['pointer'] == ptr:
if e['alloc_size'] is None:
raise AllocFailedException('realloc failure for pointer %d: entry not allocated (double free)' % ptr)
fits_current = (size <= p['size'])
fits_previous = (prev_p is not None and size <= prev_p['size'])
if fits_current and not fits_previous:
# New alloc size fits current pool and won't fit into
# previous pool (so it could be shrunk).
p['ajs_max'] = max(p.get('ajs_max', 0), size)
return ptr
# Reallocate entry (smaller or larger).
# Note: when shrinking, ajs_heap.c doesn't make sure
# there's actually a free entry in the smaller pool.
# This affects only some corner cases, but match
# that behavior here.
newPtr = self.alloc(size)
self.free(ptr)
return newPtr
raise AllocFailedException('free failure for pointer %d: cannot find pointer' % ptr)
def free(self, ptr):
#print('free %d' % ptr)
if ptr == nullPtr:
return
for p in self.state['pools']:
for e in p['entries']:
if e['pointer'] == ptr:
if e['alloc_size'] is None:
raise AllocFailedException('free failure for pointer %d: entry not allocated (double free)' % ptr)
e['alloc_size'] = None
e['borrowed'] = False
p['ajs_use'] -= 1
return
raise AllocFailedException('free failure for pointer %d: cannot find pointer' % ptr)
# Get a list of pool byte sizes.
def getSizes(self):
res = []
for p in self.state['pools']:
res.append(p['size'])
return res
# Get stats from current allocation state.
def stats(self):
alloc_bytes = 0
waste_bytes = 0
free_bytes = 0
ajs_hwm_bytes = 0 # these correspond to runtime values from ajs_heap.c
ajs_use_bytes = 0 # and are approximate
ajs_waste_bytes = 0
by_pool = []
for p in self.state['pools']:
alloc_bytes_pool = 0
waste_bytes_pool = 0
free_bytes_pool = 0
for e in p['entries']:
if e['alloc_size'] is None:
free_bytes_pool += e['entry_size']
else:
alloc_bytes_pool += e['alloc_size']
waste_bytes_pool += e['entry_size'] - e['alloc_size']
ajs_use_count_pool = p['ajs_use']
ajs_hwm_count_pool = p['ajs_hwm']
ajs_min_bytes_pool = p.get('ajs_min', 0)
ajs_max_bytes_pool = p.get('ajs_max', 0)
ajs_hwm_bytes_pool = p['ajs_hwm'] * p['size']
ajs_use_bytes_pool = p['ajs_use'] * p['size']
ajs_waste_bytes_pool = p['ajs_hwm'] * (p['size'] - p.get('ajs_max', 0))
by_pool.append({
'size': p['size'],
'alloc': alloc_bytes_pool,
'waste': waste_bytes_pool,
'free': free_bytes_pool,
'ajs_use_count': ajs_use_count_pool,
'ajs_hwm_count': ajs_hwm_count_pool,
'ajs_min_bytes': ajs_min_bytes_pool,
'ajs_max_bytes': ajs_max_bytes_pool,
'ajs_hwm_bytes': ajs_hwm_bytes_pool,
'ajs_use_bytes': ajs_use_bytes_pool,
'ajs_waste_bytes': ajs_waste_bytes_pool
})
alloc_bytes += alloc_bytes_pool
waste_bytes += waste_bytes_pool
free_bytes += free_bytes_pool
ajs_hwm_bytes += ajs_hwm_bytes_pool
ajs_use_bytes += ajs_use_bytes_pool
ajs_waste_bytes += ajs_waste_bytes_pool
return {
'alloc_bytes': alloc_bytes,
'waste_bytes': waste_bytes,
'free_bytes': free_bytes,
'ajs_hwm_bytes': ajs_hwm_bytes,
'ajs_use_bytes': ajs_use_bytes,
'ajs_waste_bytes': ajs_waste_bytes,
'byPool': by_pool
}
# Get "tight" pool config based on hwm of each pool size.
def getTightHwmConfig(self):
pools = []
cfg = { 'pools': pools }
total_bytes = 0
for p in self.state['pools']:
pool = clonePoolCleaned(p)
pool['count'] = p['ajs_hwm']
pools.append(pool)
total_bytes += pool['size'] * pool['count']
cfg['total_bytes'] = total_bytes
return cfg
#---------------------------------------------------------------------------
#
# Simulation: replay an allocation log
#
xIndex = 0
def processAllocLog(ps, f_log, out_dir, throw_on_oom=True, emit_files=True):
# map native pointer to current simulator pointer
ptrmap = {}
def writeFile(fn, line):
f = open(fn, 'ab')
f.write(line + '\n')
f.close()
def emitStats():
global xIndex
if not emit_files:
return
stats = ps.stats()
writeFile(os.path.join(out_dir, 'alloc_bytes_all.txt'), '%d %d' % (xIndex, stats['alloc_bytes']))
writeFile(os.path.join(out_dir, 'waste_bytes_all.txt'), '%d %d' % (xIndex, stats['waste_bytes']))
writeFile(os.path.join(out_dir, 'free_bytes_all.txt'), '%d %d' % (xIndex, stats['free_bytes']))
writeFile(os.path.join(out_dir, 'ajs_hwm_bytes_all.txt'), '%d %d' % (xIndex, stats['ajs_hwm_bytes']))
writeFile(os.path.join(out_dir, 'ajs_use_bytes_all.txt'), '%d %d' % (xIndex, stats['ajs_use_bytes']))
writeFile(os.path.join(out_dir, 'ajs_waste_bytes_all.txt'), '%d %d' % (xIndex, stats['ajs_waste_bytes']))
for p in stats['byPool']:
writeFile(os.path.join(out_dir, 'alloc_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['alloc']))
writeFile(os.path.join(out_dir, 'waste_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['waste']))
writeFile(os.path.join(out_dir, 'free_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['free']))
writeFile(os.path.join(out_dir, 'ajs_use_count_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_use_count']))
writeFile(os.path.join(out_dir, 'ajs_hwm_count_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_hwm_count']))
writeFile(os.path.join(out_dir, 'ajs_min_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_min_bytes']))
writeFile(os.path.join(out_dir, 'ajs_max_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_max_bytes']))
writeFile(os.path.join(out_dir, 'ajs_hwm_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_hwm_bytes']))
writeFile(os.path.join(out_dir, 'ajs_use_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_use_bytes']))
writeFile(os.path.join(out_dir, 'ajs_waste_bytes_%d.txt' % p['size']), '%d %d' % (xIndex, p['ajs_waste_bytes']))
xIndex += 1
def emitSnapshot(count):
if not emit_files:
return
f = open(os.path.join(out_dir, 'state_%d.json' % count), 'wb')
f.write(json.dumps(ps.state, indent=4))
f.close()
stats = ps.stats()
for p in stats['byPool']:
logsize = math.log(p['size'], 2)
writeFile(os.path.join(out_dir, 'alloc_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['alloc'], p['size']))
writeFile(os.path.join(out_dir, 'waste_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['waste'], p['size']))
writeFile(os.path.join(out_dir, 'free_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['free'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_use_count_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_use_count'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_hwm_count_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_hwm_count'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_min_bytes_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_min_bytes'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_max_bytes_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_max_bytes'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_hwm_bytes_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_hwm_bytes'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_use_bytes_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_use_bytes'], p['size']))
writeFile(os.path.join(out_dir, 'ajs_waste_bytes_bypool_%d.txt' % count), '%f %d # size=%d' % (logsize, p['ajs_waste_bytes'], p['size']))
sys.stdout.write('Simulating...')
sys.stdout.flush()
success = False
try:
count = 0
for line in f_log:
count += 1
if (count % 1000) == 0:
sys.stdout.write('.')
sys.stdout.flush()
emitSnapshot(count)
emitStats()
line = line.strip()
parts = line.split(' ')
# A ptr/NULL/FAIL size
# F ptr/NULL size
# R ptr/NULL oldsize ptr/NULL/FAIL newsize
if len(parts) < 1:
pass # ignore
elif parts[0] == 'A':
if parts[1] == 'FAIL':
pass
elif parts[1] == 'NULL':
ps.alloc(nullPtr)
else:
ptrmap[parts[1]] = ps.alloc(long(parts[2]))
elif parts[0] == 'F':
if parts[1] == 'NULL':
ps.free(nullPtr)
else:
ptr = ptrmap[parts[1]]
ps.free(ptr)
del ptrmap[parts[1]]
elif parts[0] == 'R':
# oldsize is not needed; don't use because e.g. ajduk
# log stats don't provide it
if parts[1] == 'NULL':
oldptr = nullPtr
else:
oldptr = ptrmap[parts[1]]
if parts[3] == 'FAIL':
pass
else:
newsize = long(parts[4])
newptr = ps.realloc(oldptr, newsize)
if newptr == nullPtr and newsize > 0:
# Failed/freed, don't update pointers
pass
else:
if parts[1] != 'NULL' and ptrmap.has_key(parts[1]):
del ptrmap[parts[1]]
if parts[3] != 'NULL':
ptrmap[parts[3]] = newptr
else:
pass # ignore
sys.stdout.write(' done\n')
sys.stdout.flush()
success = True
except AllocFailedException:
sys.stdout.write(' failed, out of memory\n')
sys.stdout.flush()
if throw_on_oom:
raise Exception('out of memory')
emitSnapshot(count)
emitStats()
return success
#---------------------------------------------------------------------------
#
# Gnuplot helper
#
def gnuplotGraphs(ps, out_dir):
def plot(files, out_fn):
f = open('/tmp/gnuplot-commands', 'wb')
f.write('set terminal dumb\n')
for idx, fn in enumerate(files):
full_fn = os.path.join(out_dir, fn)
cmd = 'plot'
if idx > 0:
cmd = 'replot'
f.write('%s "%s" with lines\n' % (cmd, full_fn))
#f.write('%s "%s" with boxes\n' % (cmd, full_fn))
f.write('set terminal pngcairo size 1024,768\n')
f.write('set output "%s"\n' % os.path.join(out_dir, out_fn))
f.write('replot\n')
f.close()
os.system('gnuplot </tmp/gnuplot-commands >/dev/null 2>/dev/null')
plot([ 'alloc_bytes_all.txt',
'waste_bytes_all.txt',
'free_bytes_all.txt' ], 'alloc_waste_free_all.png')
plot([ 'alloc_bytes_all.txt',
'waste_bytes_all.txt',
'free_bytes_all.txt',
'ajs_hwm_bytes_all.txt',
'ajs_use_bytes_all.txt',
'ajs_waste_bytes_all.txt' ], 'alloc_waste_free_withajs_all.png')
plot([ 'alloc_bytes_all.txt',
'waste_bytes_all.txt' ], 'alloc_waste_all.png')
plot([ 'alloc_bytes_all.txt',
'waste_bytes_all.txt',
'ajs_hwm_bytes_all.txt',
'ajs_use_bytes_all.txt',
'ajs_waste_bytes_all.txt' ], 'alloc_waste_withajs_all.png')
for sz in ps.getSizes():
plot([ 'alloc_bytes_%d.txt' % sz,
'waste_bytes_%d.txt' % sz,
'free_bytes_%d.txt' % sz ], 'alloc_waste_free_%d.png' % sz)
plot([ 'alloc_bytes_%d.txt' % sz,
'waste_bytes_%d.txt' % sz,
'free_bytes_%d.txt' % sz,
'ajs_hwm_bytes_%d.txt' % sz,
'ajs_use_bytes_%d.txt' % sz,
'ajs_waste_bytes_%d.txt' % sz ], 'alloc_waste_free_withajs_%d.png' % sz)
plot([ 'alloc_bytes_%d.txt' % sz,
'waste_bytes_%d.txt' % sz ], 'alloc_waste_%d.png' % sz)
plot([ 'alloc_bytes_%d.txt' % sz,
'waste_bytes_%d.txt' % sz,
'ajs_hwm_bytes_%d.txt' % sz,
'ajs_use_bytes_%d.txt' % sz,
'ajs_waste_bytes_%d.txt' % sz ], 'alloc_waste_withajs_%d.png' % sz)
# plots containing all pool sizes in a timeline
for name in [ 'alloc', 'waste' ]:
files = []
for sz in ps.getSizes():
files.append('%s_bytes_%d.txt' % (name, sz))
plot(files, '%s_bytes_allpools.png' % name)
# autoplot for all data files
for fn in os.listdir(out_dir):
fn_txt = os.path.join(out_dir, fn)
if not fn_txt.endswith('.txt'):
continue
fn_png = os.path.splitext(fn_txt)[0] + '.png'
if os.path.exists(fn_png):
continue
plot([ fn ], fn_png)
# XXX: plots for snapshots
#---------------------------------------------------------------------------
#
# Pool optimization helpers
#
# Summary a pool config into a one-line string.
def configOneLiner(cfg):
total_bytes = 0
res = ''
for i in xrange(len(cfg['pools'])):
p1 = cfg['pools'][i]
total_bytes += p1['size'] * p1['count']
res += ' %r=%r' % (p1['size'], p1['count'])
res = ('total %d:' % total_bytes) + res
return res
# Convert a pool config into an ajs_heap.c AJS_HeapConfig initializer.
def configToAjsHeader(cfg):
ind = ' '
cfgName = 'heapConfig'
res = []
res.append('/* optimized using pool_simulator.py */')
res.append('static const AJS_HeapConfig %s[] = {' % cfgName)
res.append('%s/* %d bytes total */' % (ind, cfg['total_bytes']))
for i in xrange(len(cfg['pools'])):
p = cfg['pools'][i]
if p['count'] == 0:
continue
borrow = '0'
if p.get('borrow', False):
borrow = 'AJS_POOL_BORROW'
comma = ',' # could remove, need to know which line is last (zero counts affect it)
res.append('%s{ %-7d, %-5d, %-16s, %d }%s /* %7d bytes */' % \
(ind, p['size'], p['count'], borrow, p.get('heap_index', 0), comma,
p['size'] * p['count']))
res.append('};')
return '\n'.join(res) + '\n'
# Recompute 'total_bytes' of the pool (useful after modifications).
def recomputePoolTotal(cfg):
total_bytes = 0
for i in xrange(len(cfg['pools'])):
p1 = cfg['pools'][i]
total_bytes += p1['size'] * p1['count']
cfg['total_bytes'] = total_bytes
return cfg # in-place
# Create a new pool config with pool counts added together.
def addPoolCounts(cfg1, cfg2):
pools = []
cfg = { 'pools': pools }
if len(cfg1['pools']) != len(cfg2['pools']):
raise Exception('incompatible pool configs')
for i in xrange(len(cfg1['pools'])):
p1 = cfg1['pools'][i]
p2 = cfg2['pools'][i]
if p1['size'] != p2['size']:
raise Exception('incompatible pool configs')
p3 = clonePoolCleaned(p1)
p3['count'] = p1['count'] + p2['count']
pools.append(p3)
recomputePoolTotal(cfg)
return cfg
# Create a new pool config with pool counts subtracts (result = cfg1 - cfg2).
def subtractPoolCounts(cfg1, cfg2):
pools = []
cfg = { 'pools': pools }
if len(cfg1['pools']) != len(cfg2['pools']):
raise Exception('incompatible pool configs')
for i in xrange(len(cfg1['pools'])):
p1 = cfg1['pools'][i]
p2 = cfg2['pools'][i]
if p1['size'] != p2['size']:
raise Exception('incompatible pool configs')
p3 = clonePoolCleaned(p1)
p3['count'] = p1['count'] - p2['count']
if p3['count'] < 0:
print 'Warning: pool count went negative, replace with zero'
p3['count'] = 0
#raise Exception('pool count went negative')
pools.append(p3)
recomputePoolTotal(cfg)
return cfg
# Create a new pool config with pool count being the maximum of all input
# configs (for each pool size).
def maxPoolCounts(cfglist):
cfg1 = json.loads(json.dumps(cfglist[0])) # start from clone of first config
for cfg2 in cfglist:
if len(cfg1['pools']) != len(cfg2['pools']):
raise Exception('incompatible pool configs')
for i in xrange(len(cfg1['pools'])):
p1 = cfg1['pools'][i]
p2 = cfg2['pools'][i]
if p1['size'] != p2['size']:
raise Exception('incompatible pool configs')
p1['count'] = max(p1['count'], p2['count'])
recomputePoolTotal(cfg1)
return cfg1
# Scale pool counts with a factor, leaving pool counts fractional.
def scalePoolCountsFractional(cfg1, factor):
pools = []
cfg = { 'pools': pools }
for i in xrange(len(cfg1['pools'])):
p1 = cfg1['pools'][i]
p2 = clonePoolCleaned(p1)
p2['count'] = factor * p1['count'] # fractional
pools.append(p2)
recomputePoolTotal(cfg)
return cfg
# Round pool counts to integer values with a configurable threshold.
def roundPoolCounts(cfg1, threshold):
pools = []
cfg = { 'pools': pools }
for i in xrange(len(cfg1['pools'])):
p1 = cfg1['pools'][i]
count = math.floor(p1['count'])
if p1['count'] - count > threshold:
count += 1
p2 = clonePoolCleaned(p1)
p2['count'] = int(count)
pools.append(p2)
recomputePoolTotal(cfg)
return cfg
def optimizePoolCountsForMemory(cfg_duktape, cfg_apps, target_memory):
print('Duktape baseline: %s' % configOneLiner(cfg_duktape))
# Subtract Duktape baseline from app memory usage
for i in xrange(len(cfg_apps)):
print('App with Duktape baseline: %s' % configOneLiner(cfg_apps[i]))
cfg = subtractPoolCounts(cfg_apps[i], cfg_duktape)
cfg_apps[i] = cfg
print('App minus Duktape baseline: %s' % configOneLiner(cfg))
# Normalize app memory usage
normalized_memory = 1024.0 * 1024.0 # number doesn't really matter, fractions used
for i in xrange(len(cfg_apps)):
cfg = cfg_apps[i]
factor = normalized_memory / cfg['total_bytes']
cfg = scalePoolCountsFractional(cfg, factor)
cfg_apps[i] = cfg
print('Scaled app %d: %s' % (i, configOneLiner(cfg)))
# Establish a representative profile over normalized application
# profiles (over Duktape baseline).
cfg_rep = maxPoolCounts(cfg_apps)
print('Representative: %s' % configOneLiner(cfg_rep))
# Scale (fractionally) to total bytes
factor = (target_memory - cfg_duktape['total_bytes']) / cfg_rep['total_bytes']
cfg_res = scalePoolCountsFractional(cfg_rep, factor)
cfg_res = addPoolCounts(cfg_duktape, cfg_res)
print('Fractional result: %s' % configOneLiner(cfg_res))
# Round to integer pool counts with a sliding rounding
# threshold so that we meet target memory as closely
# as possible
round_threshold = 1.0
round_step = 0.0001
round_threshold += round_step
while True:
cfg_tmp = roundPoolCounts(cfg_res, round_threshold - round_step)
#print('rounding... %f -> %d total bytes' % (round_threshold, cfg_tmp['total_bytes']))
if cfg_tmp['total_bytes'] > target_memory:
# previous value was good
break
round_threshold -= round_step
if round_threshold < 0.0:
print('should not happen')
round_threshold = 0.0
break
print('Final round threshold: %f' % round_threshold)
cfg_final = roundPoolCounts(cfg_res, round_threshold)
# XXX: negative pool counts
print('Final pools: %s' % configOneLiner(cfg_final))
return cfg_final
#---------------------------------------------------------------------------
#
# Main program
#
# Simulate an allocation log and write out a lot of statistics and graphs.
def cmd_simulate(opts, args):
dprint('Init pool simulator')
ps = PoolSimulator(readJson(opts.pool_config), borrow=True, extend=False)
dprint('Process allocation log')
f = open(opts.alloc_log)
processAllocLog(ps, f, opts.out_dir)
f.close()
dprint('Write tight pool config based on hwm')
cfg = ps.getTightHwmConfig()
f = open(os.path.join(opts.out_dir, 'config_tight.json'), 'wb')
f.write(json.dumps(cfg, indent=4))
f.close()
dprint('Plot graphs (gnuplot)')
gnuplotGraphs(ps, opts.out_dir)
dprint('Finished, output is in: ' + str(opts.out_dir))
#print(json.dumps(ps.state))
# Simulate an allocation log and optimize pool counts to tight values.
#
# If borrow_optimize=False, match pool count to high water mark with no
# borrowing. Input pool counts are ignored and pools are extended as needed.
#
# If borrow_optimize=True, match pool counts initially to high water mark
# as before, but then reduce pool counts iteratively to minimum values which
# still allow the allocation log to be replayed without out-of-memory. This
# results in pool counts which should be close to minimum values when
# borrowing behavior is taken into account.
def cmd_tight_counts(opts, args, borrow_optimize):
# Get hwm profile with "autoextend", i.e. no borrowing
print('Get hwm pool count profile with autoextend enabled (= no borrowing)')
ps = PoolSimulator(readJson(opts.pool_config), borrow=False, extend=True)
f = open(opts.alloc_log)
processAllocLog(ps, f, opts.out_dir, throw_on_oom=True, emit_files=False)
f.close()
cfg = ps.getTightHwmConfig()
print('Tight config based on hwm, no borrowing: %s' % configOneLiner(cfg))
f = open(os.path.join(opts.out_dir, 'config_tight.json'), 'wb')
f.write(json.dumps(cfg, indent=4))
f.close()
if not borrow_optimize:
return cfg
# Optimize pool counts taking borrowing into account. Not very
# optimal but step resizing ensures there shouldn't be pathological
# cases (which might happen if step was -1).
print('Optimizing pool counts taking borrowing into account (takes a while)...')
for i in xrange(len(cfg['pools']) - 1, -1, -1):
p = cfg['pools'][i]
step = 1
while step < p['count']:
step *= 2
highest_fail = -1
while p['count'] > 0 and step > 0:
prev_count = p['count']
p['count'] -= step
print('Reduce count for pool size %d bytes from %r to %r and resimulate' % (p['size'], prev_count, p['count']))
# XXX: emits unused snapshots, optimize
if p['count'] <= highest_fail:
# we know this will fail
success = False
else:
ps = PoolSimulator(cfg, borrow=True, extend=False)
f = open(opts.alloc_log)
success = processAllocLog(ps, f, opts.out_dir, throw_on_oom=False, emit_files=False)
f.close()
if not success:
highest_fail = max(highest_fail, p['count'])
p['count'] = prev_count
step /= 2
print('Pool config after size %d: %s' % (p['size'], configOneLiner(cfg)))
print('Tight config based on hwm and optimizing borrowing: %s' % configOneLiner(cfg))
return cfg
# Main entry point.
def main():
parser = optparse.OptionParser()
parser.add_option('--out-dir', dest='out_dir')
parser.add_option('--pool-config', dest='pool_config')
parser.add_option('--alloc-log', dest='alloc_log')
parser.add_option('--out-pool-config', dest='out_pool_config')
parser.add_option('--out-ajsheap-config', dest='out_ajsheap_config', default=None)
(opts, args) = parser.parse_args()
if not os.path.isdir(opts.out_dir):
raise Exception('--out-dir argument is not a directory')
if len(os.listdir(opts.out_dir)) > 0:
raise Exception('--out-dir argument is not empty')
def writeOutputs(cfg):
writeJson(opts.out_pool_config, cfg)
if opts.out_ajsheap_config is not None:
writeFile(opts.out_ajsheap_config, configToAjsHeader(cfg))
cmd = args[0]
if cmd == 'simulate':
cmd_simulate(opts, args)
elif cmd == 'tight_counts_noborrow':
cfg = cmd_tight_counts(opts, args, False)
writeOutputs(cfg)
elif cmd == 'tight_counts_borrow':
cfg = cmd_tight_counts(opts, args, True)
writeOutputs(cfg)
elif cmd == 'subtract_pool_counts':
# XXX: unused
cfg1 = readJson(args[1])
cfg2 = readJson(args[2])
cfg3 = subtractPoolCounts(cfg1, cfg2)
writeOutputs(cfg3)
elif cmd == 'max_pool_counts':
# XXX: unused
# Not very useful without normalization.
cfg = maxPoolCounts(args[1:])
writeOutputs(cfg)
elif cmd == 'pool_counts_for_memory':
target_memory = long(args[1])
cfg_duktape = readJson(args[2])
print('Duktape baseline: %d bytes' % cfg_duktape['total_bytes'])
cfg_apps = []
for arg in args[3:]:
cfg = readJson(arg)
cfg_apps.append(cfg)
print('Application: %d bytes' % cfg['total_bytes'])
cfg = optimizePoolCountsForMemory(cfg_duktape, cfg_apps, target_memory)
writeOutputs(cfg)
else:
raise Exception('invalid command ' + str(cmd))
if __name__ == '__main__':
main()
|
mlf4aiur/payslip | refs/heads/master | libs/xlrd/timemachine.py | 64 | # -*- coding: cp1252 -*-
##
# <p>Copyright © 2006-2008 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
# timemachine.py -- adaptation for earlier Pythons e.g. 2.1
# usage: from timemachine import *
# 2008-02-08 SJM Generalised method of detecting IronPython
import sys
python_version = sys.version_info[:2] # e.g. version 2.4 -> (2, 4)
CAN_PICKLE_ARRAY = python_version >= (2, 5)
CAN_SUBCLASS_BUILTIN = python_version >= (2, 2)
if sys.version.find("IronPython") >= 0:
array_array = None
else:
from array import array as array_array
if python_version < (2, 2):
class object:
pass
False = 0
True = 1
def int_floor_div(x, y):
return divmod(x, y)[0]
def intbool(x):
if x:
return 1
return 0
if python_version < (2, 3):
def sum(sequence, start=0):
tot = start
for item in aseq:
tot += item
return tot
|
216software/Profiles | refs/heads/dev | communityprofiles/profiles/oldmigrations/0055_auto__add_flatvalue.py | 2 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FlatValue'
db.create_table(u'profiles_flatvalue', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'])),
('display_title', self.gf('django.db.models.fields.CharField')(max_length='255', db_index=True)),
('geography', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'])),
('value_type', self.gf('django.db.models.fields.CharField')(max_length='100')),
('time_key', self.gf('django.db.models.fields.CharField')(max_length='255')),
('number', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('percent', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('moe', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2, blank=True)),
('f_number', self.gf('django.db.models.fields.CharField')(max_length='255', null=True, blank=True)),
('f_percent', self.gf('django.db.models.fields.CharField')(max_length='255', null=True, blank=True)),
('f_moe', self.gf('django.db.models.fields.CharField')(max_length='255', null=True, blank=True)),
))
db.send_create_signal(u'profiles', ['FlatValue'])
def backwards(self, orm):
# Deleting model 'FlatValue'
db.delete_table(u'profiles_flatvalue')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 16, 12, 4, 30, 380754)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 16, 12, 4, 30, 380329)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
Lujeni/ansible | refs/heads/devel | lib/ansible/modules/system/parted.py | 16 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Fabrizio Colonna <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- Fabrizio Colonna (@ColOfAbRiX)
module: parted
short_description: Configure block device partitions
version_added: "2.3"
description:
- This module allows configuring block device partition using the C(parted)
command line tool. For a full description of the fields and the options
check the GNU parted manual.
requirements:
- This module requires parted version 1.8.3 and above.
- If the version of parted is below 3.1, it requires a Linux version running
the sysfs file system C(/sys/).
options:
device:
description: The block device (disk) where to operate.
type: str
required: True
align:
description: Set alignment for newly created partitions.
type: str
choices: [ cylinder, minimal, none, optimal ]
default: optimal
number:
description:
- The number of the partition to work with or the number of the partition
that will be created.
- Required when performing any action on the disk, except fetching information.
type: int
unit:
description:
- Selects the current default unit that Parted will use to display
locations and capacities on the disk and to interpret those given by the
user if they are not suffixed by an unit.
- When fetching information about a disk, it is always recommended to specify a unit.
type: str
choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
default: KiB
label:
description: Creates a new disk label.
type: str
choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
default: msdos
part_type:
description:
- May be specified only with 'msdos' or 'dvh' partition tables.
- A C(name) must be specified for a 'gpt' partition table.
- Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
type: str
choices: [ extended, logical, primary ]
default: primary
part_start:
description:
- Where the partition will start as offset from the beginning of the disk,
that is, the "distance" from the start of the disk.
- The distance can be specified with all the units supported by parted
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
type: str
default: 0%
part_end :
description:
- Where the partition will end as offset from the beginning of the disk,
that is, the "distance" from the start of the disk.
- The distance can be specified with all the units supported by parted
(except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
type: str
default: 100%
name:
description:
- Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
type: str
flags:
description: A list of the flags that has to be set on the partition.
type: list
state:
description:
- Whether to create or delete a partition.
- If set to C(info) the module will only return the device information.
type: str
choices: [ absent, present, info ]
default: info
notes:
- When fetching information about a new disk and when the version of parted
installed on the system is before version 3.1, the module queries the kernel
through C(/sys/) to obtain disk information. In this case the units CHS and
CYL are not supported.
'''
RETURN = r'''
partition_info:
description: Current partition information
returned: success
type: complex
contains:
device:
description: Generic device information.
type: dict
partitions:
description: List of device partitions.
type: list
sample: {
"disk": {
"dev": "/dev/sdb",
"logical_block": 512,
"model": "VMware Virtual disk",
"physical_block": 512,
"size": 5.0,
"table": "msdos",
"unit": "gib"
},
"partitions": [{
"begin": 0.0,
"end": 1.0,
"flags": ["boot", "lvm"],
"fstype": "",
"name": "",
"num": 1,
"size": 1.0
}, {
"begin": 1.0,
"end": 5.0,
"flags": [],
"fstype": "",
"name": "",
"num": 2,
"size": 4.0
}]
}
'''
EXAMPLES = r'''
- name: Create a new primary partition
parted:
device: /dev/sdb
number: 1
state: present
- name: Remove partition number 1
parted:
device: /dev/sdb
number: 1
state: absent
- name: Create a new primary partition with a size of 1GiB
parted:
device: /dev/sdb
number: 1
state: present
part_end: 1GiB
- name: Create a new primary partition for LVM
parted:
device: /dev/sdb
number: 2
flags: [ lvm ]
state: present
part_start: 1GiB
# Example on how to read info and reuse it in subsequent task
- name: Read device information (always use unit when probing)
parted: device=/dev/sdb unit=MiB
register: sdb_info
- name: Remove all partitions from disk
parted:
device: /dev/sdb
number: '{{ item.num }}'
state: absent
loop: '{{ sdb_info.partitions }}'
'''
from ansible.module_utils.basic import AnsibleModule
import math
import re
import os
# Reference prefixes (International System of Units and IEC)
units_si = ['B', 'KB', 'MB', 'GB', 'TB']
units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
def parse_unit(size_str, unit=''):
"""
Parses a string containing a size of information
"""
matches = re.search(r'^([\d.]+)([\w%]+)?$', size_str)
if matches is None:
# "<cylinder>,<head>,<sector>" format
matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
if matches is None:
module.fail_json(
msg="Error interpreting parted size output: '%s'" % size_str
)
size = {
'cylinder': int(matches.group(1)),
'head': int(matches.group(2)),
'sector': int(matches.group(3))
}
unit = 'chs'
else:
# Normal format: "<number>[<unit>]"
if matches.group(2) is not None:
unit = matches.group(2)
size = float(matches.group(1))
return size, unit
def parse_partition_info(parted_output, unit):
"""
Parses the output of parted and transforms the data into
a dictionary.
Parted Machine Parseable Output:
See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
0573.html
- All lines end with a semicolon (;)
- The first line indicates the units in which the output is expressed.
CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
- The second line is made of disk information in the following format:
"path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
e":"partition-table-type":"model-name";
- If the first line was either CYL or CHS, the next line will contain
information on no. of cylinders, heads, sectors and cylinder size.
- Partition information begins from the next line. This is of the format:
(for BYT)
"number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
et";
(for CHS/CYL)
"number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
"""
lines = [x for x in parted_output.split('\n') if x.strip() != '']
# Generic device info
generic_params = lines[1].rstrip(';').split(':')
# The unit is read once, because parted always returns the same unit
size, unit = parse_unit(generic_params[1], unit)
generic = {
'dev': generic_params[0],
'size': size,
'unit': unit.lower(),
'table': generic_params[5],
'model': generic_params[6],
'logical_block': int(generic_params[3]),
'physical_block': int(generic_params[4])
}
# CYL and CHS have an additional line in the output
if unit in ['cyl', 'chs']:
chs_info = lines[2].rstrip(';').split(':')
cyl_size, cyl_unit = parse_unit(chs_info[3])
generic['chs_info'] = {
'cylinders': int(chs_info[0]),
'heads': int(chs_info[1]),
'sectors': int(chs_info[2]),
'cyl_size': cyl_size,
'cyl_size_unit': cyl_unit.lower()
}
lines = lines[1:]
parts = []
for line in lines[2:]:
part_params = line.rstrip(';').split(':')
# CHS use a different format than BYT, but contrary to what stated by
# the author, CYL is the same as BYT. I've tested this undocumented
# behaviour down to parted version 1.8.3, which is the first version
# that supports the machine parseable output.
if unit != 'chs':
size = parse_unit(part_params[3])[0]
fstype = part_params[4]
name = part_params[5]
flags = part_params[6]
else:
size = ""
fstype = part_params[3]
name = part_params[4]
flags = part_params[5]
parts.append({
'num': int(part_params[0]),
'begin': parse_unit(part_params[1])[0],
'end': parse_unit(part_params[2])[0],
'size': size,
'fstype': fstype,
'name': name,
'flags': [f.strip() for f in flags.split(', ') if f != ''],
'unit': unit.lower(),
})
return {'generic': generic, 'partitions': parts}
def format_disk_size(size_bytes, unit):
"""
Formats a size in bytes into a different unit, like parted does. It doesn't
manage CYL and CHS formats, though.
This function has been adapted from https://github.com/Distrotech/parted/blo
b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
"""
global units_si, units_iec
unit = unit.lower()
# Shortcut
if size_bytes == 0:
return 0.0, 'b'
# Cases where we default to 'compact'
if unit in ['', 'compact', 'cyl', 'chs']:
index = max(0, int(
(math.log10(size_bytes) - 1.0) / 3.0
))
unit = 'b'
if index < len(units_si):
unit = units_si[index]
# Find the appropriate multiplier
multiplier = 1.0
if unit in units_si:
multiplier = 1000.0 ** units_si.index(unit)
elif unit in units_iec:
multiplier = 1024.0 ** units_iec.index(unit)
output = size_bytes // multiplier * (1 + 1E-16)
# Corrections to round up as per IEEE754 standard
if output < 10:
w = output + 0.005
elif output < 100:
w = output + 0.05
else:
w = output + 0.5
if w < 10:
precision = 2
elif w < 100:
precision = 1
else:
precision = 0
# Round and return
return round(output, precision), unit
def get_unlabeled_device_info(device, unit):
"""
Fetches device information directly from the kernel and it is used when
parted cannot work because of a missing label. It always returns a 'unknown'
label.
"""
device_name = os.path.basename(device)
base = "/sys/block/%s" % device_name
vendor = read_record(base + "/device/vendor", "Unknown")
model = read_record(base + "/device/model", "model")
logic_block = int(read_record(base + "/queue/logical_block_size", 0))
phys_block = int(read_record(base + "/queue/physical_block_size", 0))
size_bytes = int(read_record(base + "/size", 0)) * logic_block
size, unit = format_disk_size(size_bytes, unit)
return {
'generic': {
'dev': device,
'table': "unknown",
'size': size,
'unit': unit,
'logical_block': logic_block,
'physical_block': phys_block,
'model': "%s %s" % (vendor, model),
},
'partitions': []
}
def get_device_info(device, unit):
"""
Fetches information about a disk and its partitions and it returns a
dictionary.
"""
global module, parted_exec
# If parted complains about missing labels, it means there are no partitions.
# In this case only, use a custom function to fetch information and emulate
# parted formats for the unit.
label_needed = check_parted_label(device)
if label_needed:
return get_unlabeled_device_info(device, unit)
command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
rc, out, err = module.run_command(command)
if rc != 0 and 'unrecognised disk label' not in err:
module.fail_json(msg=(
"Error while getting device information with parted "
"script: '%s'" % command),
rc=rc, out=out, err=err
)
return parse_partition_info(out, unit)
def check_parted_label(device):
"""
Determines if parted needs a label to complete its duties. Versions prior
to 3.1 don't return data when there is no label. For more information see:
http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
"""
global parted_exec
# Check the version
parted_major, parted_minor, _ = parted_version()
if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
return False
# Older parted versions return a message in the stdout and RC > 0.
rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
if rc != 0 and 'unrecognised disk label' in out.lower():
return True
return False
def parted_version():
"""
Returns the major and minor version of parted installed on the system.
"""
global module, parted_exec
rc, out, err = module.run_command("%s --version" % parted_exec)
if rc != 0:
module.fail_json(
msg="Failed to get parted version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
matches = re.search(r'^parted.+(\d+)\.(\d+)(?:\.(\d+))?$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
def parted(script, device, align):
"""
Runs a parted script.
"""
global module, parted_exec
if script and not module.check_mode:
command = "%s -s -m -a %s %s -- %s" % (parted_exec, align, device, script)
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(
msg="Error while running parted script: %s" % command.strip(),
rc=rc, out=out, err=err
)
def read_record(file_path, default=None):
"""
Reads the first line of a file and returns it.
"""
try:
f = open(file_path, 'r')
try:
return f.readline().strip()
finally:
f.close()
except IOError:
return default
def part_exists(partitions, attribute, number):
"""
Looks if a partition that has a specific value for a specific attribute
actually exists.
"""
return any(
part[attribute] and
part[attribute] == number for part in partitions
)
def check_size_format(size_str):
"""
Checks if the input string is an allowed size
"""
size, unit = parse_unit(size_str)
return unit in parted_units
def main():
global module, units_si, units_iec, parted_exec
changed = False
output_script = ""
script = ""
module = AnsibleModule(
argument_spec=dict(
device=dict(type='str', required=True),
align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal']),
number=dict(type='int'),
# unit <unit> command
unit=dict(type='str', default='KiB', choices=parted_units),
# mklabel <label-type> command
label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
# mkpart <part-type> [<fs-type>] <start> <end> command
part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
part_start=dict(type='str', default='0%'),
part_end=dict(type='str', default='100%'),
# name <partition> <name> command
name=dict(type='str'),
# set <partition> <flag> <state> command
flags=dict(type='list'),
# rm/mkpart command
state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
),
required_if=[
['state', 'present', ['number']],
['state', 'absent', ['number']],
],
supports_check_mode=True,
)
module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
# Data extraction
device = module.params['device']
align = module.params['align']
number = module.params['number']
unit = module.params['unit']
label = module.params['label']
part_type = module.params['part_type']
part_start = module.params['part_start']
part_end = module.params['part_end']
name = module.params['name']
state = module.params['state']
flags = module.params['flags']
# Parted executable
parted_exec = module.get_bin_path('parted', True)
# Conditioning
if number is not None and number < 1:
module.fail_json(msg="The partition number must be greater then 0.")
if not check_size_format(part_start):
module.fail_json(
msg="The argument 'part_start' doesn't respect required format."
"The size unit is case sensitive.",
err=parse_unit(part_start)
)
if not check_size_format(part_end):
module.fail_json(
msg="The argument 'part_end' doesn't respect required format."
"The size unit is case sensitive.",
err=parse_unit(part_end)
)
# Read the current disk information
current_device = get_device_info(device, unit)
current_parts = current_device['partitions']
if state == 'present':
# Assign label if required
if current_device['generic'].get('table', None) != label:
script += "mklabel %s " % label
# Create partition if required
if part_type and not part_exists(current_parts, 'num', number):
script += "mkpart %s %s %s " % (
part_type,
part_start,
part_end
)
# Set the unit of the run
if unit and script:
script = "unit %s %s" % (unit, script)
# Execute the script and update the data structure.
# This will create the partition for the next steps
if script:
output_script += script
parted(script, device, align)
changed = True
script = ""
current_parts = get_device_info(device, unit)['partitions']
if part_exists(current_parts, 'num', number) or module.check_mode:
partition = {'flags': []} # Empty structure for the check-mode
if not module.check_mode:
partition = [p for p in current_parts if p['num'] == number][0]
# Assign name to the partition
if name is not None and partition.get('name', None) != name:
# Wrap double quotes in single quotes so the shell doesn't strip
# the double quotes as those need to be included in the arg
# passed to parted
script += 'name %s \'"%s"\' ' % (number, name)
# Manage flags
if flags:
# Parted infers boot with esp, if you assign esp, boot is set
# and if boot is unset, esp is also unset.
if 'esp' in flags and 'boot' not in flags:
flags.append('boot')
# Compute only the changes in flags status
flags_off = list(set(partition['flags']) - set(flags))
flags_on = list(set(flags) - set(partition['flags']))
for f in flags_on:
script += "set %s %s on " % (number, f)
for f in flags_off:
script += "set %s %s off " % (number, f)
# Set the unit of the run
if unit and script:
script = "unit %s %s" % (unit, script)
# Execute the script
if script:
output_script += script
changed = True
parted(script, device, align)
elif state == 'absent':
# Remove the partition
if part_exists(current_parts, 'num', number) or module.check_mode:
script = "rm %s " % number
output_script += script
changed = True
parted(script, device, align)
elif state == 'info':
output_script = "unit '%s' print " % unit
# Final status of the device
final_device_status = get_device_info(device, unit)
module.exit_json(
changed=changed,
disk=final_device_status['generic'],
partitions=final_device_status['partitions'],
script=output_script.strip()
)
if __name__ == '__main__':
main()
|
thedanotto/thedanotto | refs/heads/master | thedanotto/urls.py | 1 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import RedirectView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT }),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT }),
# (r'^static/(?P<path>/*)$', 'django.views.static.server', {
# 'document_root': '/static/'
# }),
# (r'^media/(?P<path>/*)$', 'django.views.static.server', {
# 'document_root': '/media/'
# }),
url(r'^$', 'home.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^resume/$', 'resume.views.resume', name='resume'),
url(r'^projects/$', 'projects.views.all_projects', name='projects'),
url(r'^social/$', 'social.views.social', name='social'),
url(r'^hire-me/$', 'hire.views.hire_me', name='hire'),
url(r'^thank-you/$', 'hire.views.thank_you', name='thank_you'),
url(r'^resume2/$', 'resume.views.resume_dev'),
(r'^lyft/$', RedirectView.as_view(url='http://lft.to/1gPjqO0')),
)
if True:
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
zerc/django | refs/heads/master | tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0002_conflicting_second.py | 425 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("unspecified_app_with_conflict", "0001_initial")]
operations = [
migrations.CreateModel(
"Something",
[
("id", models.AutoField(primary_key=True)),
],
)
]
|
Zhongqilong/kbengine | refs/heads/master | kbe/src/lib/python/Lib/xml/sax/saxutils.py | 76 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
import codecs
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)):
# use a codecs stream writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
rahulrrixe/WRAN | refs/heads/master | db_repository/versions/__init__.py | 12133432 | |
ashishnitinpatil/django_appengine_project_template | refs/heads/master | django/core/handlers/__init__.py | 12133432 | |
rajexp/stepMuzic | refs/heads/master | allauth/socialaccount/providers/spotify/__init__.py | 12133432 | |
IndraVikas/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_bayes.py | 299 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
nikhil18/lightning-kernel | refs/heads/lightning-10.6.A.0.454 | scripts/gcc-wrapper.py | 234 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
"f_qdss.c:586",
"mipi_tc358764_dsi2lvds.c:746",
"dynamic_debug.h:75",
"hci_conn.c:407",
"f_qdss.c:740",
"mipi_novatek.c:569",
"swab.h:34",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
wooga/airflow | refs/heads/master | airflow/providers/amazon/aws/operators/sagemaker_transform.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.operators.sagemaker_base import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerTransformOperator(SageMakerBaseOperator):
"""
Initiate a SageMaker transform job.
This operator returns The ARN of the model created in Amazon SageMaker.
:param config: The configuration necessary to start a transform job (templated).
If you need to create a SageMaker transform job based on an existed SageMaker model::
config = transform_config
If you need to create both SageMaker model and SageMaker Transform job::
config = {
'Model': model_config,
'Transform': transform_config
}
For details of the configuration parameter of transform_config see
:py:meth:`SageMaker.Client.create_transform_job`
For details of the configuration parameter of model_config, See:
:py:meth:`SageMaker.Client.create_model`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
:param wait_for_completion: Set to True to wait until the transform job finishes.
:type wait_for_completion: bool
:param check_interval: If wait is set to True, the time interval, in seconds,
that this operation waits to check the status of the transform job.
:type check_interval: int
:param max_ingestion_time: If wait is set to True, the operation fails
if the transform job doesn't finish within max_ingestion_time seconds. If you
set this parameter to None, the operation does not timeout.
:type max_ingestion_time: int
"""
@apply_defaults
def __init__(self,
config,
wait_for_completion=True,
check_interval=30,
max_ingestion_time=None,
*args, **kwargs):
super().__init__(config=config,
*args, **kwargs)
self.config = config
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.max_ingestion_time = max_ingestion_time
self.create_integer_fields()
def create_integer_fields(self):
"""Set fields which should be casted to integers."""
self.integer_fields = [
['Transform', 'TransformResources', 'InstanceCount'],
['Transform', 'MaxConcurrentTransforms'],
['Transform', 'MaxPayloadInMB']
]
if 'Transform' not in self.config:
for field in self.integer_fields:
field.pop(0)
def expand_role(self):
if 'Model' not in self.config:
return
config = self.config['Model']
if 'ExecutionRoleArn' in config:
hook = AwsBaseHook(self.aws_conn_id, client_type='iam')
config['ExecutionRoleArn'] = hook.expand_role(config['ExecutionRoleArn'])
def execute(self, context):
self.preprocess_config()
model_config = self.config.get('Model')
transform_config = self.config.get('Transform', self.config)
if model_config:
self.log.info('Creating SageMaker Model %s for transform job', model_config['ModelName'])
self.hook.create_model(model_config)
self.log.info('Creating SageMaker transform Job %s.', transform_config['TransformJobName'])
response = self.hook.create_transform_job(
transform_config,
wait_for_completion=self.wait_for_completion,
check_interval=self.check_interval,
max_ingestion_time=self.max_ingestion_time)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker transform Job creation failed: %s' % response)
else:
return {
'Model': self.hook.describe_model(
transform_config['ModelName']
),
'Transform': self.hook.describe_transform_job(
transform_config['TransformJobName']
)
}
|
natefoo/pulsar | refs/heads/master | pulsar/client/destination.py | 2 |
from re import match
from .util import filter_destination_params
SUBMIT_PREFIX = "submit_"
def url_to_destination_params(url):
"""Convert a legacy runner URL to a job destination
>>> params_simple = url_to_destination_params("http://localhost:8913/")
>>> params_simple["url"]
'http://localhost:8913/'
>>> params_simple["private_token"] is None
True
>>> advanced_url = "https://[email protected]:8914/managers/longqueue"
>>> params_advanced = url_to_destination_params(advanced_url)
>>> params_advanced["url"]
'https://example.com:8914/managers/longqueue/'
>>> params_advanced["private_token"]
'1234x'
>>> runner_url = "pulsar://http://localhost:8913/"
>>> runner_params = url_to_destination_params(runner_url)
>>> runner_params['url']
'http://localhost:8913/'
"""
if url.startswith("pulsar://"):
url = url[len("pulsar://"):]
if not url.endswith("/"):
url += "/"
# Check for private token embedded in the URL. A URL of the form
# https://moo@cow:8913 will try to contact https://cow:8913
# with a private key of moo
private_token_format = "https?://(.*)@.*/?"
private_token_match = match(private_token_format, url)
private_token = None
if private_token_match:
private_token = private_token_match.group(1)
url = url.replace("%s@" % private_token, '', 1)
destination_args = {"url": url,
"private_token": private_token}
return destination_args
def submit_params(destination_params):
"""
>>> destination_params = {"private_token": "12345", "submit_native_specification": "-q batch"}
>>> result = submit_params(destination_params)
>>> result
{'native_specification': '-q batch'}
"""
return filter_destination_params(destination_params, SUBMIT_PREFIX)
|
CedarLogic/readthedocs.org | refs/heads/master | readthedocs/donate/migrations/0001_initial.py | 36 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Supporter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')),
('public', models.BooleanField(default=True, verbose_name='Public')),
('name', models.CharField(max_length=200, verbose_name='name', blank=True)),
('email', models.EmailField(max_length=200, verbose_name='Email', blank=True)),
('dollars', models.IntegerField(default=50, verbose_name='Amount', choices=[(5, b'$5'), (10, b'$10'), (25, b'$25'), (50, b'1 Hour ($50)'), (100, b'2 Hours ($100)'), (200, b'4 Hours ($200)'), (400, b'1 Day ($400)'), (800, b'2 Days ($800)'), (1200, b'3 Days ($1200)'), (1600, b'4 Days ($1600)'), (2000, b'5 Days ($2000)'), (4000, b'2 Weeks ($4000)'), (6000, b'3 Weeks ($6000)'), (8000, b'4 Weeks ($8000)')])),
('logo_url', models.URLField(max_length=255, null=True, verbose_name='Logo URL', blank=True)),
('site_url', models.URLField(max_length=255, null=True, verbose_name='Site URL', blank=True)),
('last_4_digits', models.CharField(max_length=4)),
('stripe_id', models.CharField(max_length=255)),
('subscribed', models.BooleanField(default=False)),
('user', models.ForeignKey(related_name='goldonce', verbose_name='User', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='SupporterPromo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')),
('name', models.CharField(max_length=200, verbose_name='Name')),
('analytics_id', models.CharField(max_length=200, verbose_name='Analytics ID')),
('text', models.TextField(verbose_name='Text', blank=True)),
('link', models.URLField(max_length=255, null=True, verbose_name='Link URL', blank=True)),
('image', models.URLField(max_length=255, null=True, verbose_name='Image URL', blank=True)),
('display_type', models.CharField(default=b'doc', max_length=200, verbose_name='Display Type', choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages')])),
('live', models.BooleanField(default=False, verbose_name='Live')),
],
),
]
|
edum1978/eduengage | refs/heads/master | testrunner.py | 24 | #!/usr/bin/python
import optparse
import sys
import unittest
import os
USAGE = """%prog SDK_PATH TEST_PATH
Run unit test for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'boilerplate/external'))
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
result = unittest.TextTestRunner(verbosity=2).run(suite)
exit_code = 0 if result.wasSuccessful() else 1
sys.exit(exit_code)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
SDK_PATH = args[0]
TEST_PATH = args[1]
main(SDK_PATH, TEST_PATH)
|
pdrobek/polcoin-core | refs/heads/1.6.2.1 | qa/rpc-tests/invalidateblock.py | 1 | #!/usr/bin/env python2
# Copyright (c) 2014 The Polcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework import PolcoinTestFramework
from polcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class InvalidateTest(PolcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].setgenerate(True, 4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].setgenerate(True, 6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].setgenerate(True, 1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
lfeldkam/AliPhysics | refs/heads/master | PWGJE/EMCALJetTasks/Tracks/analysis/write/RawSpectrumWriter.py | 41 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.FileHandler import LegoTrainFileReader
from PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectrumFitter import MinBiasFitter
from ROOT import TList, TFile, TObject
class RawSpectrumWriter(object):
def __init__(self):
self.__categories = {"Full":{}, "EMCAL":{}}
def AddTriggerToCategory(self, category, trigger, spectrum):
self.__categories[category][trigger] = spectrum
def Process(self, filename):
reader = LegoTrainFileReader(filename)
results = reader.ReadFile()
categories = {"Full":"tracksAll", "EMCAL":"tracksWithClusters"}
for trigger in ["MinBias", "EMCJHigh", "EMCJLow", "EMCGHigh", "EMCGLow"]:
data = results.GetData(trigger)
for stype,cont in categories.iteritems():
spectrum = self.MakeNormalisedSpectrum(data.FindTrackContainer(cont), trigger, stype)
self.AddTriggerToCategory(stype, trigger, spectrum)
if trigger == "MinBias":
self.AddTriggerToCategory(stype, "MinBiasFit", self.FitMinBias(spectrum, stype))
def FitMinBias(self, spectrum, category):
fitter = MinBiasFitter("mbfitter", spectrum)
param = fitter.MakeBinnedParameterisationDefault(True)
param.SetName("FitMinBias%s" %(category))
return param
def MakeNormalisedSpectrum(self, spectrum, trigger, category):
spectrum.SetVertexRange(-10., 10.)
spectrum.SetPileupRejection(True)
spectrum.SelectTrackCuts(1)
return spectrum.MakeProjection(0, "RawSpectrum%s%s" %(trigger, category))
def WriteToFile(self, outputname):
outputlists = []
for categ in self.__categories.keys():
mylist = TList()
mylist.SetName(categ)
for entry in self.__categories[categ].itervalues():
mylist.Add(entry)
outputlists.append(mylist)
outputfile = TFile(outputname, "RECREATE")
outputfile.cd()
for myobject in outputlists:
myobject.Write(myobject.GetName(), TObject.kSingleKey)
outputfile.Close()
def Create(filename):
writer = RawSpectrumWriter()
writer.Process(filename)
writer.WriteToFile("normspectra.root") |
gnu-sandhi/sandhi | refs/heads/master | modules/gr36/gr-blocks/python/qa_add_mult_v.py | 11 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import blocks_swig
class test_add_mult_v(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def help_ss(self, size, src_data, exp_data, op):
for s in zip(range (len (src_data)), src_data):
src = gr.vector_source_s(s[1])
srcv = gr.stream_to_vector(gr.sizeof_short, size)
self.tb.connect(src, srcv)
self.tb.connect(srcv, (op, s[0]))
rhs = gr.vector_to_stream(gr.sizeof_short, size)
dst = gr.vector_sink_s()
self.tb.connect(op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_ii(self, size, src_data, exp_data, op):
for s in zip(range (len (src_data)), src_data):
src = gr.vector_source_i(s[1])
srcv = gr.stream_to_vector(gr.sizeof_int, size)
self.tb.connect(src, srcv)
self.tb.connect(srcv, (op, s[0]))
rhs = gr.vector_to_stream(gr.sizeof_int, size)
dst = gr.vector_sink_i()
self.tb.connect(op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_ff(self, size, src_data, exp_data, op):
for s in zip(range (len (src_data)), src_data):
src = gr.vector_source_f(s[1])
srcv = gr.stream_to_vector(gr.sizeof_float, size)
self.tb.connect(src, srcv)
self.tb.connect(srcv, (op, s[0]))
rhs = gr.vector_to_stream(gr.sizeof_float, size)
dst = gr.vector_sink_f()
self.tb.connect(op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_cc(self, size, src_data, exp_data, op):
for s in zip(range (len (src_data)), src_data):
src = gr.vector_source_c(s[1])
srcv = gr.stream_to_vector(gr.sizeof_gr_complex, size)
self.tb.connect(src, srcv)
self.tb.connect(srcv, (op, s[0]))
rhs = gr.vector_to_stream(gr.sizeof_gr_complex, size)
dst = gr.vector_sink_c()
self.tb.connect(op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_const_ss(self, src_data, exp_data, op):
src = gr.vector_source_s(src_data)
srcv = gr.stream_to_vector(gr.sizeof_short, len(src_data))
rhs = gr.vector_to_stream(gr.sizeof_short, len(src_data))
dst = gr.vector_sink_s()
self.tb.connect(src, srcv, op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_const_ii(self, src_data, exp_data, op):
src = gr.vector_source_i(src_data)
srcv = gr.stream_to_vector(gr.sizeof_int, len(src_data))
rhs = gr.vector_to_stream(gr.sizeof_int, len(src_data))
dst = gr.vector_sink_i()
self.tb.connect(src, srcv, op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_const_ff(self, src_data, exp_data, op):
src = gr.vector_source_f(src_data)
srcv = gr.stream_to_vector(gr.sizeof_float, len(src_data))
rhs = gr.vector_to_stream(gr.sizeof_float, len(src_data))
dst = gr.vector_sink_f()
self.tb.connect(src, srcv, op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
def help_const_cc(self, src_data, exp_data, op):
src = gr.vector_source_c(src_data)
srcv = gr.stream_to_vector(gr.sizeof_gr_complex, len(src_data))
rhs = gr.vector_to_stream(gr.sizeof_gr_complex, len(src_data))
dst = gr.vector_sink_c()
self.tb.connect(src, srcv, op, rhs, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(exp_data, result_data)
# add_vXX
def test_add_vss_one(self):
src1_data = (1,)
src2_data = (2,)
src3_data = (3,)
expected_result = (6,)
op = blocks_swig.add_ss(1)
self.help_ss(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vss_five(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (6, 7, 8, 9, 10)
src3_data = (11, 12, 13, 14, 15)
expected_result = (18, 21, 24, 27, 30)
op = blocks_swig.add_ss(5)
self.help_ss(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vii_one(self):
src1_data = (1,)
src2_data = (2,)
src3_data = (3,)
expected_result = (6,)
op = blocks_swig.add_ii(1)
self.help_ii(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vii_five(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (6, 7, 8, 9, 10)
src3_data = (11, 12, 13, 14, 15)
expected_result = (18, 21, 24, 27, 30)
op = blocks_swig.add_ii(5)
self.help_ii(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vff_one(self):
src1_data = (1.0,)
src2_data = (2.0,)
src3_data = (3.0,)
expected_result = (6.0,)
op = blocks_swig.add_ff(1)
self.help_ff(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vff_five(self):
src1_data = (1.0, 2.0, 3.0, 4.0, 5.0)
src2_data = (6.0, 7.0, 8.0, 9.0, 10.0)
src3_data = (11.0, 12.0, 13.0, 14.0, 15.0)
expected_result = (18.0, 21.0, 24.0, 27.0, 30.0)
op = blocks_swig.add_ff(5)
self.help_ff(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vcc_one(self):
src1_data = (1.0+2.0j,)
src2_data = (3.0+4.0j,)
src3_data = (5.0+6.0j,)
expected_result = (9.0+12j,)
op = blocks_swig.add_cc(1)
self.help_cc(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_add_vcc_five(self):
src1_data = (1.0+2.0j, 3.0+4.0j, 5.0+6.0j, 7.0+8.0j, 9.0+10.0j)
src2_data = (11.0+12.0j, 13.0+14.0j, 15.0+16.0j, 17.0+18.0j, 19.0+20.0j)
src3_data = (21.0+22.0j, 23.0+24.0j, 25.0+26.0j, 27.0+28.0j, 29.0+30.0j)
expected_result = (33.0+36.0j, 39.0+42.0j, 45.0+48.0j, 51.0+54.0j, 57.0+60.0j)
op = blocks_swig.add_cc(5)
self.help_cc(5, (src1_data, src2_data, src3_data), expected_result, op)
# add_const_vXX
def test_add_const_vss_one(self):
src_data = (1,)
op = blocks_swig.add_const_vss((2,))
exp_data = (3,)
self.help_const_ss(src_data, exp_data, op)
def test_add_const_vss_five(self):
src_data = (1, 2, 3, 4, 5)
op = blocks_swig.add_const_vss((6, 7, 8, 9, 10))
exp_data = (7, 9, 11, 13, 15)
self.help_const_ss(src_data, exp_data, op)
def test_add_const_vii_one(self):
src_data = (1,)
op = blocks_swig.add_const_vii((2,))
exp_data = (3,)
self.help_const_ii(src_data, exp_data, op)
def test_add_const_vii_five(self):
src_data = (1, 2, 3, 4, 5)
op = blocks_swig.add_const_vii((6, 7, 8, 9, 10))
exp_data = (7, 9, 11, 13, 15)
self.help_const_ii(src_data, exp_data, op)
def test_add_const_vff_one(self):
src_data = (1.0,)
op = blocks_swig.add_const_vff((2.0,))
exp_data = (3.0,)
self.help_const_ff(src_data, exp_data, op)
def test_add_const_vff_five(self):
src_data = (1.0, 2.0, 3.0, 4.0, 5.0)
op = blocks_swig.add_const_vff((6.0, 7.0, 8.0, 9.0, 10.0))
exp_data = (7.0, 9.0, 11.0, 13.0, 15.0)
self.help_const_ff(src_data, exp_data, op)
def test_add_const_vcc_one(self):
src_data = (1.0+2.0j,)
op = blocks_swig.add_const_vcc((2.0+3.0j,))
exp_data = (3.0+5.0j,)
self.help_const_cc(src_data, exp_data, op)
def test_add_const_vcc_five(self):
src_data = (1.0+2.0j, 3.0+4.0j, 5.0+6.0j, 7.0+8.0j, 9.0+10.0j)
op = blocks_swig.add_const_vcc((11.0+12.0j, 13.0+14.0j, 15.0+16.0j, 17.0+18.0j, 19.0+20.0j))
exp_data = (12.0+14.0j, 16.0+18.0j, 20.0+22.0j, 24.0+26.0j, 28.0+30.0j)
self.help_const_cc(src_data, exp_data, op)
# multiply_vXX
def test_multiply_vss_one(self):
src1_data = (1,)
src2_data = (2,)
src3_data = (3,)
expected_result = (6,)
op = gr.multiply_vss(1)
self.help_ss(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vss_five(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (6, 7, 8, 9, 10)
src3_data = (11, 12, 13, 14, 15)
expected_result = (66, 168, 312, 504, 750)
op = gr.multiply_vss(5)
self.help_ss(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vii_one(self):
src1_data = (1,)
src2_data = (2,)
src3_data = (3,)
expected_result = (6,)
op = gr.multiply_vii(1)
self.help_ii(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vii_five(self):
src1_data = (1, 2, 3, 4, 5)
src2_data = (6, 7, 8, 9, 10)
src3_data = (11, 12, 13, 14, 15)
expected_result = (66, 168, 312, 504, 750)
op = gr.multiply_vii(5)
self.help_ii(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vff_one(self):
src1_data = (1.0,)
src2_data = (2.0,)
src3_data = (3.0,)
expected_result = (6.0,)
op = gr.multiply_vff(1)
self.help_ff(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vff_five(self):
src1_data = (1.0, 2.0, 3.0, 4.0, 5.0)
src2_data = (6.0, 7.0, 8.0, 9.0, 10.0)
src3_data = (11.0, 12.0, 13.0, 14.0, 15.0)
expected_result = (66.0, 168.0, 312.0, 504.0, 750.0)
op = gr.multiply_vff(5)
self.help_ff(5, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vcc_one(self):
src1_data = (1.0+2.0j,)
src2_data = (3.0+4.0j,)
src3_data = (5.0+6.0j,)
expected_result = (-85+20j,)
op = gr.multiply_vcc(1)
self.help_cc(1, (src1_data, src2_data, src3_data), expected_result, op)
def test_multiply_vcc_five(self):
src1_data = (1.0+2.0j, 3.0+4.0j, 5.0+6.0j, 7.0+8.0j, 9.0+10.0j)
src2_data = (11.0+12.0j, 13.0+14.0j, 15.0+16.0j, 17.0+18.0j, 19.0+20.0j)
src3_data = (21.0+22.0j, 23.0+24.0j, 25.0+26.0j, 27.0+28.0j, 29.0+30.0j)
expected_result = (-1021.0+428.0j, -2647.0+1754.0j, -4945.0+3704.0j, -8011.0+6374.0j, -11941.0+9860.0j)
op = gr.multiply_vcc(5)
self.help_cc(5, (src1_data, src2_data, src3_data), expected_result, op)
# multiply_const_vXX
def test_multiply_const_vss_one(self):
src_data = (2,)
op = gr.multiply_const_vss((3,))
exp_data = (6,)
self.help_const_ss(src_data, exp_data, op)
def test_multiply_const_vss_five(self):
src_data = (1, 2, 3, 4, 5)
op = gr.multiply_const_vss((6, 7, 8, 9, 10))
exp_data = (6, 14, 24, 36, 50)
self.help_const_ss(src_data, exp_data, op)
def test_multiply_const_vii_one(self):
src_data = (2,)
op = gr.multiply_const_vii((3,))
exp_data = (6,)
self.help_const_ii(src_data, exp_data, op)
def test_multiply_const_vii_five(self):
src_data = (1, 2, 3, 4, 5)
op = gr.multiply_const_vii((6, 7, 8, 9, 10))
exp_data = (6, 14, 24, 36, 50)
self.help_const_ii(src_data, exp_data, op)
def test_multiply_const_vff_one(self):
src_data = (2.0,)
op = gr.multiply_const_vff((3.0,))
exp_data = (6.0,)
self.help_const_ff(src_data, exp_data, op)
def test_multiply_const_vff_five(self):
src_data = (1.0, 2.0, 3.0, 4.0, 5.0)
op = gr.multiply_const_vff((6.0, 7.0, 8.0, 9.0, 10.0))
exp_data = (6.0, 14.0, 24.0, 36.0, 50.0)
self.help_const_ff(src_data, exp_data, op)
def test_multiply_const_vcc_one(self):
src_data = (1.0+2.0j,)
op = gr.multiply_const_vcc((2.0+3.0j,))
exp_data = (-4.0+7.0j,)
self.help_const_cc(src_data, exp_data, op)
def test_multiply_const_vcc_five(self):
src_data = (1.0+2.0j, 3.0+4.0j, 5.0+6.0j, 7.0+8.0j, 9.0+10.0j)
op = gr.multiply_const_vcc((11.0+12.0j, 13.0+14.0j, 15.0+16.0j, 17.0+18.0j, 19.0+20.0j))
exp_data = (-13.0+34.0j, -17.0+94.0j, -21.0+170.0j, -25.0+262.0j, -29.0+370.0j)
self.help_const_cc(src_data, exp_data, op)
if __name__ == '__main__':
gr_unittest.run(test_add_mult_v, "test_add_mult_v.xml")
|
Leits/robot_tests | refs/heads/master | op_robot_tests/__init__.py | 9480 | #
|
clonemeagain/cloudprint | refs/heads/master | ez_setup.py | 276 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
YihaoLu/statsmodels | refs/heads/master | statsmodels/genmod/families/family.py | 19 | '''
The one parameter exponential family distributions used by GLM.
'''
# TODO: quasi, quasibinomial, quasipoisson
# see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import numpy as np
from scipy import special
from . import links as L
from . import varfuncs as V
FLOAT_EPS = np.finfo(float).eps
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
See Also
--------
:ref:`links`
"""
# TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
# TODO: change the links class attribute in the families to hold
# meaningful information instead of a list of links instances such as
# [<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (repr(self.links), link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
# link property for each family is a pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
self.link = link()
self.variance = variance
def starting_mu(self, y):
"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
mu_0 = (endog + mean(endog))/2.
Notes
-----
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
`w` = 1 / (link'(`mu`)**2 * variance(`mu`))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, endog, mu, scale=1.):
"""
Deviance of (endog,mu) pair.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
endog : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional scale argument
Returns
-------
Deviance : array
The value of deviance function defined below.
Notes
-----
Deviance is defined
.. math::
\sum_i(2 loglike(y_i, y_i) - 2 * loglike(y_i, mu_i)) / scale
where y is the endogenous variable. The deviance functions are
analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, endog, mu, scale=1.):
"""
The deviance residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined for each family.
"""
raise NotImplementedError
def fitted(self, lin_pred):
"""
Fitted values based on linear predictors lin_pred.
Parameters
-----------
lin_pred : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
fits = self.link.inverse(lin_pred)
return fits
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
lin_pred : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
`endog` : array
Usually the endogenous response variable.
`mu` : array
Usually but not always the fitted mean response variable.
scale : float
The scale parameter
Returns
-------
llf : float
The value of the loglikelihood evaluated at (endog,mu).
Notes
-----
This is defined for each family. endog and mu are not restricted to
`endog` and `mu` respectively. For instance, the deviance function
calls both loglike(endog,endog) and loglike(endog,mu) to get the
likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, endog, mu):
"""
The Anscome residuals.
See also
--------
statsmodels.families.family.Family docstring and the `resid_anscombe`
for the individual families for more information.
"""
raise NotImplementedError
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
safe_links = [L.Log,]
def __init__(self, link=L.log):
self.variance = Poisson.variance
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def resid_dev(self, endog, mu, scale=1.):
"""Poisson deviance residual
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
resid_dev = sign(endog-mu)*sqrt(2*endog*log(endog/mu)-2*(endog-mu))
"""
endog_mu = self._clean(endog/mu)
return np.sign(endog - mu) * np.sqrt(2 * endog *
np.log(endog_mu) -
2 * (endog - mu))/scale
def deviance(self, endog, mu, scale=1.):
'''
Poisson deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (endog,mu) as defined below.
Notes
-----
If a constant term is included it is defined as
:math:`deviance = 2*\\sum_{i}(Y*\\log(Y/\\mu))`
'''
endog_mu = self._clean(endog/mu)
return 2*np.sum(endog*np.log(endog_mu))/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The scale parameter, defaults to 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
llf = scale * sum(-mu + endog*log(mu) - gammaln(endog+1))
where gammaln is the log gamma function
"""
return scale * np.sum(-mu + endog*np.log(mu)-special.gammaln(endog+1))
def resid_anscombe(self, endog, mu):
"""
Anscombe residuals for the Poisson exponential family distribution
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
resid_anscombe is defined
.. math:
(3/2.)*(endog^{2/3.} - \\mu**(2/3.))/\\mu^{1/6.}
"""
return (3/2.)*(endog**(2/3.)-mu**(2/3.))/mu**(1/6.)
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
safe_links = links
def __init__(self, link=L.identity):
self.variance = Gaussian.variance
self.link = link()
def resid_dev(self, endog, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
`resid_dev` = (`endog` - `mu`)/sqrt(variance(`mu`))
"""
return (endog - mu) / np.sqrt(self.variance(mu))/scale
def deviance(self, endog, mu, scale=1.):
"""
Gaussian deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (endog,mu) as defined below.
Notes
--------
`deviance` = sum((endog-mu)**2)
"""
return np.sum((endog-mu)**2)/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
llf = -(nobs/2)*(log(SSR) + (1 + log(2*pi/nobs)))
where SSR = sum((endog-link^(-1)(mu))**2)
If the links is not the identity link then the loglikelihood
function is defined as
llf = sum((`endog`*`mu`-`mu`**2/2)/`scale` - `endog`**2/(2*`scale`) - \
(1/2.)*log(2*pi*`scale`))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = endog.shape[0]/2.
SSR = np.sum((endog-self.fitted(mu))**2, axis=0)
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
# Return the loglikelihood for Gaussian GLM
return np.sum((endog * mu - mu**2/2)/scale - endog**2/(2 * scale)
- .5*np.log(2 * np.pi * scale))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the Gaussian exponential family distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
--------
`resid_anscombe` = `endog` - `mu`
"""
return endog-mu
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
safe_links = [L.Log,]
def __init__(self, link=L.inverse_power):
self.variance = Gamma.variance
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def deviance(self, endog, mu, scale=1.):
"""
Gamma deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = 2*sum((endog - mu)/mu - log(endog/mu))
"""
endog_mu = self._clean(endog/mu)
return 2 * np.sum((endog - mu)/mu - np.log(endog_mu))
def resid_dev(self, endog, mu, scale=1.):
r"""
Gamma deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`resid_dev` is defined
.. math:
sign(endog - \mu) * \sqrt{-2*(-(endog-\mu)/\mu + \log(endog/\mu))}
"""
endog_mu = self._clean(endog/mu)
return np.sign(endog - mu) * np.sqrt(-2 * (-(endog - mu)/mu +
np.log(endog_mu)))
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
--------
llf = -1/scale * sum(endog/mu + log(mu) + (scale-1)*log(endog) +\
log(scale) + scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(endog/mu + np.log(mu) + (scale - 1) *
np.log(endog) + np.log(scale) + scale *
special.gammaln(1./scale))
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs.
# our results due to an assumed difference in implementation
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for Gamma exponential family distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
resid_anscombe = 3*(endog**(1/3.)-mu**(1/3.))/mu**(1/3.)
"""
return 3*(endog**(1/3.)-mu**(1/3.))/mu**(1/3.)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog, L.identity]
variance = V.binary # this is not used below in an effort to include n
# Other safe links, e.g. cloglog and probit are subclasses
safe_links = [L.Logit, L.CDFLink]
def __init__(self, link=L.logit): # , n=1.):
# TODO: it *should* work for a constant n>1 actually, if data_weights
# is equal to n
self.n = 1
# overwritten by initialize if needed but always used to initialize
# variance since endog is assumed/forced to be (0,1)
self.variance = V.Binomial(n=self.n)
self.link = link()
def starting_mu(self, y):
"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is
starting_mu = (y + .5)/2
"""
return (y + .5)/2
def initialize(self, endog):
'''
Initialize the response variable.
Parameters
----------
endog : array
Endogenous response variable
Returns
--------
If `endog` is binary, returns `endog`
If `endog` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
if (endog.ndim > 1 and endog.shape[1] > 1):
y = endog[:, 0]
self.n = endog.sum(1) # overwrite self.n for deviance below
return y*1./self.n
else:
return endog
def deviance(self, endog, mu, scale=1.):
'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
If the endogenous variable is binary:
`deviance` = -2*sum(I_one * log(mu) + (I_zero)*log(1-mu))
where I_one is an indicator function that evalueates to 1 if
endog_i == 1. and I_zero is an indicator function that evaluates to
1 if endog_i == 0.
If the model is ninomial:
`deviance` = 2*sum(log(endog/mu) + (n-endog)*log((n-endog)/(n-mu)))
where endog and n are as defined in Binomial.initialize.
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return -2 * np.sum(one * np.log(mu + 1e-200) + (1-one) *
np.log(1 - mu + 1e-200))
else:
return 2 * np.sum(self.n * (endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog) /
(1 - mu) +
1e-200)))
def resid_dev(self, endog, mu, scale=1.):
"""
Binomial deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
If `endog` is binary:
resid_dev = sign(endog-mu)*sqrt(-2*log(I_one*mu + I_zero*(1-mu)))
where I_one is an indicator function that evaluates as 1 if endog == 1
and I_zero is an indicator function that evaluates as 1 if endog == 0.
If `endog` is binomial:
resid_dev = sign(endog - mu) * sqrt(2 * n * (endog * log(endog/mu) +
(1 - endog) * log((1 - endog)/(1 - mu))))
where endog and n are as defined in Binomial.initialize.
"""
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return np.sign(endog-mu)*np.sqrt(-2 * np.log(one * mu + (1 - one) *
(1 - mu)))/scale
else:
return (np.sign(endog - mu) *
np.sqrt(2 * self.n * (endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog)/(1 - mu) +
1e-200)))/scale)
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
--------
If `endog` is binary:
`llf` = scale*sum(endog*log(mu/(1-mu))+log(1-mu))
If `endog` is binomial:
`llf` = scale*sum(gammaln(n+1) - gammaln(y+1) - gammaln(n-y+1) +\
y*log(mu/(1-mu)) + n*log(1-mu)
where gammaln is the log gamma function and y = endog*n with endog
and n as defined in Binomial initialize. This simply makes y the
original number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return scale * np.sum(endog * np.log(mu/(1 - mu) + 1e-200) +
np.log(1 - mu))
else:
y = endog * self.n # convert back to successes
return scale * np.sum(special.gammaln(self.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.n - y + 1) + y *
np.log(mu/(1 - mu)) + self.n *
np.log(1 - mu))
def resid_anscombe(self, endog, mu):
'''
The Anscombe residuals
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
sqrt(n)*(cox_snell(endog)-cox_snell(mu))/(mu**(1/6.)*(1-mu)**(1/6.))
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
cox_snell(x) = x**(2/3.)/(2/3.)*hyp2f1(2/3.,1/3.,5/3.,x)
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
cox_snell = lambda x: (special.betainc(2/3., 2/3., x)
* special.beta(2/3., 2/3.))
return np.sqrt(self.n) * ((cox_snell(endog) - cox_snell(mu)) /
(mu**(1/6.) * (1 - mu)**(1/6.)))
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the Wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
safe_links = [L.inverse_squared, L.Log,]
def __init__(self, link=L.inverse_squared):
self.variance = InverseGaussian.variance
self.link = link()
def resid_dev(self, endog, mu, scale=1.):
"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`dev_resid` = sign(endog-mu)*sqrt((endog-mu)**2/(endog*mu**2))
"""
return np.sign(endog-mu) * np.sqrt((endog-mu)**2/(endog*mu**2))/scale
def deviance(self, endog, mu, scale=1.):
"""
Inverse Gaussian deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum((endog=mu)**2/(endog*mu**2))
"""
return np.sum((endog-mu)**2/(endog*mu**2))/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
`llf` = -(1/2.)*sum((endog-mu)**2/(endog*mu**2*scale)
+ log(scale*endog**3) + log(2*pi))
"""
return -.5 * np.sum((endog - mu)**2/(endog * mu**2 * scale)
+ np.log(scale * endog**3) + np.log(2 * np.pi))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
`resid_anscombe` = log(endog/mu)/sqrt(mu)
"""
return np.log(endog/mu)/np.sqrt(mu)
class NegativeBinomial(Family):
"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Power link functions are not yet supported.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
# TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
safe_links = [L.Log,]
def __init__(self, link=L.log, alpha=1.):
self.alpha = 1. * alpha # make it at least float
self.variance = V.NegativeBinomial(alpha=self.alpha)
if isinstance(link, L.NegativeBinomial):
self.link = link(alpha=self.alpha)
else:
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def deviance(self, endog, mu, scale=1.):
r"""
Returns the value of the deviance function.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum(piecewise)
where piecewise is defined as
If :math:`Y_{i} == 0`:
.. math::
piecewise_i = 2\log(1+\alpha*\mu)/\alpha
If :math:`Y_{i} > 0`:
.. math::
piecewise_i = math2 Y \log(Y/\mu)-2/\alpha(1+\alpha Y) * \log((1+\alpha Y)/(1+\alpha\mu))
"""
iszero = np.equal(endog, 0)
notzero = 1 - iszero
endog_mu = self._clean(endog/mu)
tmp = iszero * 2 * np.log(1 + self.alpha * mu)/self.alpha
tmp += notzero * (2 * endog * np.log(endog_mu) - 2/self.alpha *
(1 + self.alpha*endog) *
np.log((1 + self.alpha * endog) /
(1 + self.alpha * mu)))
return np.sum(tmp)/scale
def resid_dev(self, endog, mu, scale=1.):
r'''
Negative Binomial Deviance Residual
Parameters
----------
endog : array-like
`endog` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by scale
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
`resid_dev` = sign(endog-mu) * sqrt(piecewise)
where piecewise is defined as
If :math:`Y_i = 0`:
.. math::
piecewise_i = 2*log(1+alpha*mu)/alpha
If :math:`Y_i > 0`:
.. math::
piecewise_i = 2*Y*log(Y/\mu) - 2/\alpha * (1 + \alpha * Y) * \log((1 + \alpha * Y)/(1 + \alpha * \mu))
'''
iszero = np.equal(endog, 0)
notzero = 1 - iszero
endog_mu = self._clean(endog/mu)
tmp = iszero * 2 * np.log(1 + self.alpha * mu)/self.alpha
tmp += notzero * (2 * endog * np.log(endog_mu) - 2/self.alpha *
(1 + self.alpha * endog) *
np.log((1 + self.alpha * endog) /
(1 + self.alpha * mu)))
return np.sign(endog - mu) * np.sqrt(tmp)/scale
def loglike(self, endog, mu, scale):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
The fitted mean response values
scale : float
The scale parameter
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
sum(endog*log(alpha*exp(lin_pred)/(1+alpha*exp(lin_pred))) -
log(1+alpha*exp(lin_pred))/alpha + constant)
where constant is defined as::
constant = gammaln(endog + 1/alpha) - gammaln(endog + 1) -
gammaln(1/alpha)
"""
lin_pred = self._link(mu)
constant = special.gammaln(endog + 1/self.alpha) - special.gammaln(endog+1)\
-special.gammaln(1/self.alpha)
exp_lin_pred = np.exp(lin_pred)
return (np.sum(endog * np.log(self.alpha * exp_lin_pred /
(1 + self.alpha * exp_lin_pred)) -
np.log(1 + self.alpha * exp_lin_pred)/self.alpha + constant))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the negative binomial family
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
`resid_anscombe` = (hyp2f1(-alpha*endog)-hyp2f1(-alpha*mu)+\
1.5*(endog**(2/3.)-mu**(2/3.)))/(mu+alpha*mu**2)**(1/6.)
where hyp2f1 is the hypergeometric 2f1 function parameterized as
hyp2f1(x) = hyp2f1(2/3.,1/3.,5/3.,x)
"""
hyp2f1 = lambda x : special.hyp2f1(2/3., 1/3., 5/3., x)
return ((hyp2f1(-self.alpha * endog) - hyp2f1(-self.alpha * mu) +
1.5 * (endog**(2/3.)-mu**(2/3.))) /
(mu + self.alpha*mu**2)**(1/6.))
|
hackolite/PRJ-medtec_sigproc | refs/heads/master | echopen-leaderboard/bootcamp/feeds/migrations/__init__.py | 12133432 | |
blmlove409/python-goose | refs/heads/master | tests/configuration.py | 22 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tempfile
import unittest
from goose import Goose
class TestTempDir(unittest.TestCase):
def test_tmp_defaut(self):
g = Goose()
default_local_storage_path = os.path.join(tempfile.gettempdir(), 'goose')
self.assertEquals(g.config.local_storage_path, default_local_storage_path)
def test_tmp_overwritten(self):
path = '/tmp/bla'
g = Goose({'local_storage_path': path})
self.assertEquals(g.config.local_storage_path, path)
|
charles-vdulac/xapian-haystack | refs/heads/master | xapian_backend.py | 1 | from __future__ import unicode_literals
import datetime
import pickle
import os
import re
import shutil
import sys
from django.utils import six
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_text
from haystack import connections
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, SearchNode, log_query
from haystack.constants import ID, DJANGO_ID, DJANGO_CT, DEFAULT_OPERATOR
from haystack.exceptions import HaystackError, MissingDependency
from haystack.inputs import AutoQuery
from haystack.models import SearchResult
from haystack.utils import get_identifier, get_model_ct
NGRAM_MIN_LENGTH = 2
NGRAM_MAX_LENGTH = 15
try:
import xapian
except ImportError:
raise MissingDependency("The 'xapian' backend requires the installation of 'Xapian'. "
"Please refer to the documentation.")
class NotSupportedError(Exception):
"""
When the installed version of Xapian doesn't support something and we have
the old implementation.
"""
pass
# this maps the different reserved fields to prefixes used to
# create the database:
# id str: unique document id.
# django_id int: id of the django model instance.
# django_ct str: of the content type of the django model.
# field str: name of the field of the index.
TERM_PREFIXES = {
ID: 'Q',
DJANGO_ID: 'QQ',
DJANGO_CT: 'CONTENTTYPE',
'field': 'X'
}
MEMORY_DB_NAME = ':memory:'
DEFAULT_XAPIAN_FLAGS = (
xapian.QueryParser.FLAG_PHRASE |
xapian.QueryParser.FLAG_BOOLEAN |
xapian.QueryParser.FLAG_LOVEHATE |
xapian.QueryParser.FLAG_WILDCARD |
xapian.QueryParser.FLAG_PURE_NOT
)
# Mapping from `HAYSTACK_DEFAULT_OPERATOR` to Xapian operators
XAPIAN_OPTS = {'AND': xapian.Query.OP_AND,
'OR': xapian.Query.OP_OR,
'PHRASE': xapian.Query.OP_PHRASE,
'NEAR': xapian.Query.OP_NEAR
}
# number of documents checked by default when building facets
# this must be improved to be relative to the total number of docs.
DEFAULT_CHECK_AT_LEAST = 1000
# field types accepted to be serialized as values in Xapian
FIELD_TYPES = {'text', 'integer', 'date', 'datetime', 'float', 'boolean',
'edge_ngram', 'ngram'}
# defines the format used to store types in Xapian
# this format ensures datetimes are sorted correctly
DATETIME_FORMAT = '%Y%m%d%H%M%S'
INTEGER_FORMAT = '%012d'
# defines the distance given between
# texts with positional information
TERMPOS_DISTANCE = 100
class InvalidIndexError(HaystackError):
"""Raised when an index can not be opened."""
pass
class XHValueRangeProcessor(xapian.ValueRangeProcessor):
"""
A Processor to construct ranges of values
"""
def __init__(self, backend):
self.backend = backend
xapian.ValueRangeProcessor.__init__(self)
def __call__(self, begin, end):
"""
Construct a tuple for value range processing.
`begin` -- a string in the format '<field_name>:[low_range]'
If 'low_range' is omitted, assume the smallest possible value.
`end` -- a string in the the format '[high_range|*]'. If '*', assume
the highest possible value.
Return a tuple of three strings: (column, low, high)
"""
colon = begin.find(':')
field_name = begin[:colon]
begin = begin[colon + 1:len(begin)]
for field_dict in self.backend.schema:
if field_dict['field_name'] == field_name:
field_type = field_dict['type']
if not begin:
if field_type == 'text':
begin = 'a' # TODO: A better way of getting a min text value?
elif field_type == 'integer':
begin = -sys.maxsize - 1
elif field_type == 'float':
begin = float('-inf')
elif field_type == 'date' or field_type == 'datetime':
begin = '00010101000000'
elif end == '*':
if field_type == 'text':
end = 'z' * 100 # TODO: A better way of getting a max text value?
elif field_type == 'integer':
end = sys.maxsize
elif field_type == 'float':
end = float('inf')
elif field_type == 'date' or field_type == 'datetime':
end = '99990101000000'
if field_type == 'float':
begin = _term_to_xapian_value(float(begin), field_type)
end = _term_to_xapian_value(float(end), field_type)
elif field_type == 'integer':
begin = _term_to_xapian_value(int(begin), field_type)
end = _term_to_xapian_value(int(end), field_type)
return field_dict['column'], str(begin), str(end)
class XHExpandDecider(xapian.ExpandDecider):
def __call__(self, term):
"""
Return True if the term should be used for expanding the search
query, False otherwise.
Ignore terms related with the content type of objects.
"""
if term.decode('utf-8').startswith(TERM_PREFIXES[DJANGO_CT]):
return False
return True
class XapianSearchBackend(BaseSearchBackend):
"""
`SearchBackend` defines the Xapian search backend for use with the Haystack
API for Django search.
It uses the Xapian Python bindings to interface with Xapian, and as
such is subject to this bug: <http://trac.xapian.org/ticket/364> when
Django is running with mod_python or mod_wsgi under Apache.
Until this issue has been fixed by Xapian, it is neccessary to set
`WSGIApplicationGroup to %{GLOBAL}` when using mod_wsgi, or
`PythonInterpreter main_interpreter` when using mod_python.
In order to use this backend, `PATH` must be included in the
`connection_options`. This should point to a location where you would your
indexes to reside.
"""
inmemory_db = None
def __init__(self, connection_alias, **connection_options):
"""
Instantiates an instance of `SearchBackend`.
Optional arguments:
`connection_alias` -- The name of the connection
`language` -- The stemming language (default = 'english')
`**connection_options` -- The various options needed to setup
the backend.
Also sets the stemming language to be used to `language`.
"""
super(XapianSearchBackend, self).__init__(connection_alias, **connection_options)
if not 'PATH' in connection_options:
raise ImproperlyConfigured("You must specify a 'PATH' in your settings for connection '%s'."
% connection_alias)
self.path = connection_options.get('PATH')
if self.path != MEMORY_DB_NAME and not os.path.exists(self.path):
os.makedirs(self.path)
self.flags = connection_options.get('FLAGS', DEFAULT_XAPIAN_FLAGS)
self.language = getattr(settings, 'HAYSTACK_XAPIAN_LANGUAGE', 'english')
stemming_strategy_string = getattr(settings, 'HAYSTACK_XAPIAN_STEMMING_STRATEGY', 'STEM_SOME')
self.stemming_strategy = getattr(xapian.QueryParser, stemming_strategy_string, xapian.QueryParser.STEM_SOME)
# these 4 attributes are caches populated in `build_schema`
# they are checked in `_update_cache`
# use property to retrieve them
self._fields = {}
self._schema = []
self._content_field_name = None
self._columns = {}
def _update_cache(self):
"""
To avoid build_schema every time, we cache
some values: they only change when a SearchIndex
changes, which typically restarts the Python.
"""
fields = connections[self.connection_alias].get_unified_index().all_searchfields()
if self._fields != fields:
self._fields = fields
self._content_field_name, self._schema = self.build_schema(self._fields)
@property
def schema(self):
self._update_cache()
return self._schema
@property
def content_field_name(self):
self._update_cache()
return self._content_field_name
@property
def column(self):
"""
Returns the column in the database of a given field name.
"""
self._update_cache()
return self._columns
def update(self, index, iterable):
"""
Updates the `index` with any objects in `iterable` by adding/updating
the database as needed.
Required arguments:
`index` -- The `SearchIndex` to process
`iterable` -- An iterable of model instances to index
For each object in `iterable`, a document is created containing all
of the terms extracted from `index.full_prepare(obj)` with field prefixes,
and 'as-is' as needed. Also, if the field type is 'text' it will be
stemmed and stored with the 'Z' prefix as well.
eg. `content:Testing` ==> `testing, Ztest, ZXCONTENTtest, XCONTENTtest`
Each document also contains an extra term in the format:
`XCONTENTTYPE<app_name>.<model_name>`
As well as a unique identifier in the the format:
`Q<app_name>.<model_name>.<pk>`
eg.: foo.bar (pk=1) ==> `Qfoo.bar.1`, `XCONTENTTYPEfoo.bar`
This is useful for querying for a specific document corresponding to
a model instance.
The document also contains a pickled version of the object itself and
the document ID in the document data field.
Finally, we also store field values to be used for sorting data. We
store these in the document value slots (position zero is reserver
for the document ID). All values are stored as unicode strings with
conversion of float, int, double, values being done by Xapian itself
through the use of the :method:xapian.sortable_serialise method.
"""
database = self._database(writable=True)
try:
term_generator = xapian.TermGenerator()
term_generator.set_database(database)
term_generator.set_stemmer(xapian.Stem(self.language))
try:
term_generator.set_stemming_strategy(self.stemming_strategy)
except AttributeError:
# Versions before Xapian 1.2.11 do not support stemming strategies for TermGenerator
pass
if self.include_spelling is True:
term_generator.set_flags(xapian.TermGenerator.FLAG_SPELLING)
def _add_text(termpos, text, weight, prefix=''):
"""
indexes text appending 2 extra terms
to identify beginning and ending of the text.
"""
term_generator.set_termpos(termpos)
start_term = '%s^' % prefix
end_term = '%s$' % prefix
# add begin
document.add_posting(start_term, termpos, weight)
# add text
term_generator.index_text(text, weight, prefix)
termpos = term_generator.get_termpos()
# add ending
termpos += 1
document.add_posting(end_term, termpos, weight)
# increase termpos
term_generator.set_termpos(termpos)
term_generator.increase_termpos(TERMPOS_DISTANCE)
return term_generator.get_termpos()
def _add_literal_text(termpos, text, weight, prefix=''):
"""
Adds sentence to the document with positional information
but without processing.
The sentence is bounded by "^" "$" to allow exact matches.
"""
text = '^ %s $' % text
for word in text.split():
term = '%s%s' % (prefix, word)
document.add_posting(term, termpos, weight)
termpos += 1
termpos += TERMPOS_DISTANCE
return termpos
def add_text(termpos, prefix, text, weight):
"""
Adds text to the document with positional information
and processing (e.g. stemming).
"""
termpos = _add_text(termpos, text, weight, prefix=prefix)
termpos = _add_text(termpos, text, weight, prefix='')
termpos = _add_literal_text(termpos, text, weight, prefix=prefix)
termpos = _add_literal_text(termpos, text, weight, prefix='')
return termpos
def _get_ngram_lengths(value):
values = value.split()
for item in values:
for ngram_length in six.moves.range(NGRAM_MIN_LENGTH, NGRAM_MAX_LENGTH + 1):
yield item, ngram_length
for obj in iterable:
document = xapian.Document()
term_generator.set_document(document)
def ngram_terms(value):
for item, length in _get_ngram_lengths(value):
item_length = len(item)
for start in six.moves.range(0, item_length - length + 1):
for size in six.moves.range(length, length + 1):
end = start + size
if end > item_length:
continue
yield _to_xapian_term(item[start:end])
def edge_ngram_terms(value):
for item, length in _get_ngram_lengths(value):
yield _to_xapian_term(item[0:length])
def add_edge_ngram_to_document(prefix, value, weight):
"""
Splits the term in ngrams and adds each ngram to the index.
The minimum and maximum size of the ngram is respectively
NGRAM_MIN_LENGTH and NGRAM_MAX_LENGTH.
"""
for term in edge_ngram_terms(value):
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_ngram_to_document(prefix, value, weight):
"""
Splits the term in ngrams and adds each ngram to the index.
The minimum and maximum size of the ngram is respectively
NGRAM_MIN_LENGTH and NGRAM_MAX_LENGTH.
"""
for term in ngram_terms(value):
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_non_text_to_document(prefix, term, weight):
"""
Adds term to the document without positional information
and without processing.
If the term is alone, also adds it as "^<term>$"
to allow exact matches on single terms.
"""
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_datetime_to_document(termpos, prefix, term, weight):
"""
Adds a datetime to document with positional order
to allow exact matches on it.
"""
date, time = term.split()
document.add_posting(date, termpos, weight)
termpos += 1
document.add_posting(time, termpos, weight)
termpos += 1
document.add_posting(prefix + date, termpos, weight)
termpos += 1
document.add_posting(prefix + time, termpos, weight)
termpos += TERMPOS_DISTANCE + 1
return termpos
data = index.full_prepare(obj)
weights = index.get_field_weights()
termpos = term_generator.get_termpos() # identifies the current position in the document.
for field in self.schema:
if field['field_name'] not in list(data.keys()):
# not supported fields are ignored.
continue
if field['field_name'] in weights:
weight = int(weights[field['field_name']])
else:
weight = 1
value = data[field['field_name']]
if field['field_name'] in (ID, DJANGO_ID, DJANGO_CT):
# Private fields are indexed in a different way:
# `django_id` is an int and `django_ct` is text;
# besides, they are indexed by their (unstemmed) value.
if field['field_name'] == DJANGO_ID:
try:
value = int(value)
except ValueError:
# Django_id is a string
field['type'] = 'text'
value = _term_to_xapian_value(value, field['type'])
document.add_term(TERM_PREFIXES[field['field_name']] + value, weight)
document.add_value(field['column'], value)
continue
else:
prefix = TERM_PREFIXES['field'] + field['field_name'].upper()
# if not multi_valued, we add as a document value
# for sorting and facets
if field['multi_valued'] == 'false':
document.add_value(field['column'], _term_to_xapian_value(value, field['type']))
else:
for t in value:
# add the exact match of each value
term = _to_xapian_term(t)
termpos = add_text(termpos, prefix, term, weight)
continue
term = _to_xapian_term(value)
if term == '':
continue
# from here on the term is a string;
# we now decide how it is indexed
if field['type'] == 'text':
# text is indexed with positional information
termpos = add_text(termpos, prefix, term, weight)
elif field['type'] == 'datetime':
termpos = add_datetime_to_document(termpos, prefix, term, weight)
elif field['type'] == 'ngram':
add_ngram_to_document(prefix, value, weight)
elif field['type'] == 'edge_ngram':
add_edge_ngram_to_document(prefix, value, weight)
else:
# all other terms are added without positional information
add_non_text_to_document(prefix, term, weight)
# store data without indexing it
document.set_data(pickle.dumps(
(obj._meta.app_label, obj._meta.model_name, obj.pk, data),
pickle.HIGHEST_PROTOCOL
))
# add the id of the document
document_id = TERM_PREFIXES[ID] + get_identifier(obj)
document.add_term(document_id)
# finally, replace or add the document to the database
database.replace_document(document_id, document)
except UnicodeDecodeError:
sys.stderr.write('Chunk failed.\n')
pass
finally:
database.close()
def remove(self, obj):
"""
Remove indexes for `obj` from the database.
We delete all instances of `Q<app_name>.<model_name>.<pk>` which
should be unique to this object.
"""
database = self._database(writable=True)
database.delete_document(TERM_PREFIXES[ID] + get_identifier(obj))
database.close()
def clear(self, models=(), commit=True):
"""
Clear all instances of `models` from the database or all models, if
not specified.
Optional Arguments:
`models` -- Models to clear from the database (default = [])
If `models` is empty, an empty query is executed which matches all
documents in the database. Afterwards, each match is deleted.
Otherwise, for each model, a `delete_document` call is issued with
the term `XCONTENTTYPE<app_name>.<model_name>`. This will delete
all documents with the specified model type.
"""
if not models:
# Because there does not appear to be a "clear all" method,
# it's much quicker to remove the contents of the `self.path`
# folder than it is to remove each document one at a time.
if os.path.exists(self.path):
shutil.rmtree(self.path)
else:
database = self._database(writable=True)
for model in models:
database.delete_document(TERM_PREFIXES[DJANGO_CT] + get_model_ct(model))
database.close()
def document_count(self):
try:
return self._database().get_doccount()
except InvalidIndexError:
return 0
def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query
def _check_field_names(self, field_names):
"""
Raises InvalidIndexError if any of a field_name in field_names is
not indexed.
"""
if field_names:
for field_name in field_names:
try:
self.column[field_name]
except KeyError:
raise InvalidIndexError('Trying to use non indexed field "%s"' % field_name)
@log_query
def search(self, query, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None, date_facets=None,
query_facets=None, narrow_queries=None, spelling_query=None,
limit_to_registered_models=None, result_class=None, **kwargs):
"""
Executes the Xapian::query as defined in `query`.
Required arguments:
`query` -- Search query to execute
Optional arguments:
`sort_by` -- Sort results by specified field (default = None)
`start_offset` -- Slice results from `start_offset` (default = 0)
`end_offset` -- Slice results at `end_offset` (default = None), if None, then all documents
`fields` -- Filter results on `fields` (default = '')
`highlight` -- Highlight terms in results (default = False)
`facets` -- Facet results on fields (default = None)
`date_facets` -- Facet results on date ranges (default = None)
`query_facets` -- Facet results on queries (default = None)
`narrow_queries` -- Narrow queries (default = None)
`spelling_query` -- An optional query to execute spelling suggestion on
`limit_to_registered_models` -- Limit returned results to models registered in
the current `SearchSite` (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
`facets` - A dictionary of facets with the following keys:
`fields` -- A list of field facets
`dates` -- A list of date facets
`queries` -- A list of query facets
If faceting was not used, the `facets` key will not be present
If `query` is None, returns no results.
If `INCLUDE_SPELLING` was enabled in the connection options, the
extra flag `FLAG_SPELLING_CORRECTION` will be passed to the query parser
and any suggestions for spell correction will be returned as well as
the results.
"""
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
self._check_field_names(facets)
self._check_field_names(date_facets)
self._check_field_names(query_facets)
database = self._database()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if result_class is None:
result_class = SearchResult
if self.include_spelling is True:
spelling_suggestion = self._do_spelling_suggestion(database, query, spelling_query)
else:
spelling_suggestion = ''
if narrow_queries is not None:
query = xapian.Query(
xapian.Query.OP_AND, query, xapian.Query(
xapian.Query.OP_AND, [self.parse_query(narrow_query) for narrow_query in narrow_queries]
)
)
if limit_to_registered_models:
query = self._build_models_query(query)
enquire = xapian.Enquire(database)
if hasattr(settings, 'HAYSTACK_XAPIAN_WEIGHTING_SCHEME'):
enquire.set_weighting_scheme(xapian.BM25Weight(*settings.HAYSTACK_XAPIAN_WEIGHTING_SCHEME))
enquire.set_query(query)
if sort_by:
try:
_xapian_sort(enquire, sort_by, self.column)
except NotSupportedError:
_old_xapian_sort(enquire, sort_by, self.column)
results = []
facets_dict = {
'fields': {},
'dates': {},
'queries': {},
}
if not end_offset:
end_offset = database.get_doccount() - start_offset
## prepare spies in case of facets
if facets:
facets_spies = self._prepare_facet_field_spies(facets)
for spy in facets_spies:
enquire.add_matchspy(spy)
# print enquire.get_query()
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
if highlight:
model_data['highlighted'] = {
self.content_field_name: self._do_highlight(
model_data.get(self.content_field_name), query
)
}
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
if facets:
# pick single valued facets from spies
single_facets_dict = self._process_facet_field_spies(facets_spies)
# pick multivalued valued facets from results
multi_facets_dict = self._do_multivalued_field_facets(results, facets)
# merge both results (http://stackoverflow.com/a/38990/931303)
facets_dict['fields'] = dict(list(single_facets_dict.items()) + list(multi_facets_dict.items()))
if date_facets:
facets_dict['dates'] = self._do_date_facets(results, date_facets)
if query_facets:
facets_dict['queries'] = self._do_query_facets(results, query_facets)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': facets_dict,
'spelling_suggestion': spelling_suggestion,
}
def more_like_this(self, model_instance, additional_query=None,
start_offset=0, end_offset=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit returned results to models registered in the search (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
Opens a database connection, then builds a simple query using the
`model_instance` to build the unique identifier.
For each document retrieved(should always be one), adds an entry into
an RSet (relevance set) with the document id, then, uses the RSet
to query for an ESet (A set of terms that can be used to suggest
expansions to the original query), omitting any document that was in
the original query.
Finally, processes the resulting matches and returns.
"""
database = self._database()
if result_class is None:
result_class = SearchResult
query = xapian.Query(TERM_PREFIXES[ID] + get_identifier(model_instance))
enquire = xapian.Enquire(database)
enquire.set_query(query)
rset = xapian.RSet()
if not end_offset:
end_offset = database.get_doccount()
match = None
for match in self._get_enquire_mset(database, enquire, 0, end_offset):
rset.add_document(match.docid)
if match is None:
if not self.silently_fail:
raise InvalidIndexError('Instance %s with id "%d" not indexed' %
(get_identifier(model_instance), model_instance.id))
else:
return {'results': [],
'hits': 0}
query = xapian.Query(
xapian.Query.OP_ELITE_SET,
[expand.term for expand in enquire.get_eset(match.document.termlist_count(), rset, XHExpandDecider())],
match.document.termlist_count()
)
query = xapian.Query(
xapian.Query.OP_AND_NOT, [query, TERM_PREFIXES[ID] + get_identifier(model_instance)]
)
if limit_to_registered_models:
query = self._build_models_query(query)
if additional_query:
query = xapian.Query(
xapian.Query.OP_AND, query, additional_query
)
enquire.set_query(query)
results = []
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': {
'fields': {},
'dates': {},
'queries': {},
},
'spelling_suggestion': None,
}
def parse_query(self, query_string):
"""
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
"""
if query_string == '*':
return xapian.Query('') # Match everything
elif query_string == '':
return xapian.Query() # Match nothing
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
# since 'django_ct' has a boolean_prefix,
# we ignore it here.
if field_dict['field_name'] == DJANGO_CT:
continue
qp.add_prefix(
field_dict['field_name'],
TERM_PREFIXES['field'] + field_dict['field_name'].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags)
def build_schema(self, fields):
"""
Build the schema from fields.
:param fields: A list of fields in the index
:returns: list of dictionaries
Each dictionary has the keys
field_name: The name of the field index
type: what type of value it is
'multi_valued': if it allows more than one value
'column': a number identifying it
'type': the type of the field
'multi_valued': 'false', 'column': 0}
"""
content_field_name = ''
schema_fields = [
{'field_name': ID,
'type': 'text',
'multi_valued': 'false',
'column': 0},
{'field_name': DJANGO_ID,
'type': 'integer',
'multi_valued': 'false',
'column': 1},
{'field_name': DJANGO_CT,
'type': 'text',
'multi_valued': 'false',
'column': 2},
]
self._columns[ID] = 0
self._columns[DJANGO_ID] = 1
self._columns[DJANGO_CT] = 2
column = len(schema_fields)
for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]):
if field_class.document is True:
content_field_name = field_class.index_fieldname
if field_class.indexed is True:
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text',
'multi_valued': 'false',
'column': column,
}
if field_class.field_type == 'date':
field_data['type'] = 'date'
elif field_class.field_type == 'datetime':
field_data['type'] = 'datetime'
elif field_class.field_type == 'integer':
field_data['type'] = 'integer'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
schema_fields.append(field_data)
self._columns[field_data['field_name']] = column
column += 1
return content_field_name, schema_fields
@staticmethod
def _do_highlight(content, query, tag='em'):
"""
Highlight `query` terms in `content` with html `tag`.
This method assumes that the input text (`content`) does not contain
any special formatting. That is, it does not contain any html tags
or similar markup that could be screwed up by the highlighting.
Required arguments:
`content` -- Content to search for instances of `text`
`text` -- The text to be highlighted
"""
for term in query:
term = term.decode('utf-8')
for match in re.findall('[^A-Z]+', term): # Ignore field identifiers
match_re = re.compile(match, re.I)
content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content)
return content
def _prepare_facet_field_spies(self, facets):
"""
Returns a list of spies based on the facets
used to count frequencies.
"""
spies = []
for facet in facets:
slot = self.column[facet]
spy = xapian.ValueCountMatchSpy(slot)
# add attribute "slot" to know which column this spy is targeting.
spy.slot = slot
spies.append(spy)
return spies
def _process_facet_field_spies(self, spies):
"""
Returns a dict of facet names with lists of
tuples of the form (term, term_frequency)
from a list of spies that observed the enquire.
"""
facet_dict = {}
for spy in spies:
field = self.schema[spy.slot]
field_name, field_type = field['field_name'], field['type']
facet_dict[field_name] = []
for facet in list(spy.values()):
if field_type == 'float':
# the float term is a Xapian serialized object, which is
# in bytes.
term = facet.term
else:
term = facet.term.decode('utf-8')
facet_dict[field_name].append((_from_xapian_value(term, field_type),
facet.termfreq))
return facet_dict
def _do_multivalued_field_facets(self, results, field_facets):
"""
Implements a multivalued field facet on the results.
This is implemented using brute force - O(N^2) -
because Xapian does not have it implemented yet
(see http://trac.xapian.org/ticket/199)
"""
facet_dict = {}
for field in field_facets:
facet_list = {}
if not self._multi_value_field(field):
continue
for result in results:
field_value = getattr(result, field)
for item in field_value: # Facet each item in a MultiValueField
facet_list[item] = facet_list.get(item, 0) + 1
facet_dict[field] = list(facet_list.items())
return facet_dict
@staticmethod
def _do_date_facets(results, date_facets):
"""
Private method that facets a document by date ranges
Required arguments:
`results` -- A list SearchResults to facet
`date_facets` -- A dictionary containing facet parameters:
{'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}}
nb., gap must be one of the following:
year|month|day|hour|minute|second
For each date facet field in `date_facets`, generates a list
of date ranges (from `start_date` to `end_date` by `gap_by`) then
iterates through `results` and tallies the count for each date_facet.
Returns a dictionary of date facets (fields) containing a list with
entries for each range and a count of documents matching the range.
eg. {
'pub_date': [
(datetime.datetime(2009, 1, 1, 0, 0), 5),
(datetime.datetime(2009, 2, 1, 0, 0), 0),
(datetime.datetime(2009, 3, 1, 0, 0), 0),
(datetime.datetime(2008, 4, 1, 0, 0), 1),
(datetime.datetime(2008, 5, 1, 0, 0), 2),
],
}
"""
def next_datetime(previous, gap_value, gap_type):
year = previous.year
month = previous.month
if gap_type == 'year':
next = previous.replace(year=year + gap_value)
elif gap_type == 'month':
if month + gap_value <= 12:
next = previous.replace(month=month + gap_value)
else:
next = previous.replace(
month=((month + gap_value) % 12),
year=(year + (month + gap_value) // 12)
)
elif gap_type == 'day':
next = previous + datetime.timedelta(days=gap_value)
elif gap_type == 'hour':
return previous + datetime.timedelta(hours=gap_value)
elif gap_type == 'minute':
next = previous + datetime.timedelta(minutes=gap_value)
elif gap_type == 'second':
next = previous + datetime.timedelta(seconds=gap_value)
else:
raise TypeError('\'gap_by\' must be '
'{second, minute, day, month, year}')
return next
facet_dict = {}
for date_facet, facet_params in list(date_facets.items()):
gap_type = facet_params.get('gap_by')
gap_value = facet_params.get('gap_amount', 1)
date_range = facet_params['start_date']
# construct the bins of the histogram
facet_list = []
while date_range < facet_params['end_date']:
facet_list.append((date_range, 0))
date_range = next_datetime(date_range, gap_value, gap_type)
facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True)
for result in results:
result_date = getattr(result, date_facet)
# convert date to datetime
if not isinstance(result_date, datetime.datetime):
result_date = datetime.datetime(result_date.year,
result_date.month,
result_date.day)
# ignore results outside the boundaries.
if facet_list[0][0] < result_date < facet_list[-1][0]:
continue
# populate the histogram by putting the result on the right bin.
for n, facet_date in enumerate(facet_list):
if result_date > facet_date[0]:
# equal to facet_list[n][1] += 1, but for a tuple
facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1))
break # bin found; go to next result
facet_dict[date_facet] = facet_list
return facet_dict
def _do_query_facets(self, results, query_facets):
"""
Private method that facets a document by query
Required arguments:
`results` -- A list SearchResults to facet
`query_facets` -- A dictionary containing facet parameters:
{'field': 'query', [...]}
For each query in `query_facets`, generates a dictionary entry with
the field name as the key and a tuple with the query and result count
as the value.
eg. {'name': ('a*', 5)}
"""
facet_dict = {}
for field, query in list(dict(query_facets).items()):
facet_dict[field] = (query, self.search(self.parse_query(query))['hits'])
return facet_dict
@staticmethod
def _do_spelling_suggestion(database, query, spelling_query):
"""
Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling
"""
if spelling_query:
if ' ' in spelling_query:
return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()])
else:
return database.get_spelling_suggestion(spelling_query).decode('utf-8')
term_set = set()
for term in query:
for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers
term_set.add(database.get_spelling_suggestion(match).decode('utf-8'))
return ' '.join(term_set)
def _database(self, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if self.path == MEMORY_DB_NAME:
if not self.inmemory_db:
self.inmemory_db = xapian.inmemory_open()
return self.inmemory_db
if writable:
database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(self.path)
except xapian.DatabaseOpeningError:
raise InvalidIndexError('Unable to open index at %s' % self.path)
return database
@staticmethod
def _get_enquire_mset(database, enquire, start_offset, end_offset, checkatleast=DEFAULT_CHECK_AT_LEAST):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`enquire` -- An instance of an Xapian.enquire object
`start_offset` -- The start offset to pass to `enquire.get_mset`
`end_offset` -- The end offset to pass to `enquire.get_mset`
"""
try:
return enquire.get_mset(start_offset, end_offset, checkatleast)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, end_offset, checkatleast)
@staticmethod
def _get_document_data(database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
def _multi_value_field(self, field):
"""
Private method that returns `True` if a field is multi-valued, else
`False`.
Required arguemnts:
`field` -- The field to lookup
Returns a boolean value indicating whether the field is multi-valued.
"""
for field_dict in self.schema:
if field_dict['field_name'] == field:
return field_dict['multi_valued'] == 'true'
return False
class XapianSearchQuery(BaseSearchQuery):
"""
This class is the Xapian specific version of the SearchQuery class.
It acts as an intermediary between the ``SearchQuerySet`` and the
``SearchBackend`` itself.
"""
def build_params(self, *args, **kwargs):
kwargs = super(XapianSearchQuery, self).build_params(*args, **kwargs)
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset - self.start_offset
return kwargs
def build_query(self):
if not self.query_filter:
query = xapian.Query('')
else:
query = self._query_from_search_node(self.query_filter)
if self.models:
subqueries = [
xapian.Query(
xapian.Query.OP_SCALE_WEIGHT,
xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], get_model_ct(model))),
0 # Pure boolean sub-query
) for model in self.models
]
query = xapian.Query(
xapian.Query.OP_AND, query,
xapian.Query(xapian.Query.OP_OR, subqueries)
)
if self.boost:
subqueries = [
xapian.Query(
xapian.Query.OP_SCALE_WEIGHT,
self._term_query(term, None, None), value
) for term, value in list(self.boost.items())
]
query = xapian.Query(
xapian.Query.OP_AND_MAYBE, query,
xapian.Query(xapian.Query.OP_OR, subqueries)
)
return query
def _query_from_search_node(self, search_node, is_not=False):
query_list = []
for child in search_node.children:
if isinstance(child, SearchNode):
query_list.append(
self._query_from_search_node(child, child.negated)
)
else:
expression, term = child
field_name, filter_type = search_node.split_expression(expression)
constructed_query_list = self._query_from_term(term, field_name, filter_type, is_not)
query_list.extend(constructed_query_list)
if search_node.connector == 'OR':
return xapian.Query(xapian.Query.OP_OR, query_list)
else:
return xapian.Query(xapian.Query.OP_AND, query_list)
def _query_from_term(self, term, field_name, filter_type, is_not):
"""
Uses arguments to construct a list of xapian.Query's.
"""
if field_name != 'content' and field_name not in self.backend.column:
raise InvalidIndexError('field "%s" not indexed' % field_name)
# It it is an AutoQuery, it has no filters
# or others, thus we short-circuit the procedure.
if isinstance(term, AutoQuery):
if field_name != 'content':
query = '%s:%s' % (field_name, term.prepare(self))
else:
query = term.prepare(self)
return [self.backend.parse_query(query)]
query_list = []
# Handle `ValuesListQuerySet`.
if hasattr(term, 'values_list'):
term = list(term)
if field_name == 'content':
# content is the generic search:
# force no field_name search
# and the field_type to be 'text'.
field_name = None
field_type = 'text'
# we don't know what is the type(term), so we parse it.
# Ideally this would not be required, but
# some filters currently depend on the term to make decisions.
term = _to_xapian_term(term)
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
# when filter has no filter_type, haystack uses
# filter_type = 'content'. Here we remove it
# since the above query is already doing this
if filter_type == 'content':
filter_type = None
else:
# get the field_type from the backend
field_type = self.backend.schema[self.backend.column[field_name]]['type']
# private fields don't accept 'contains' or 'startswith'
# since they have no meaning.
if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT):
filter_type = 'exact'
if field_type == 'text':
# we don't know what type "term" is, but we know we are searching as text
# so we parse it like that.
# Ideally this would not be required since _term_query does it, but
# some filters currently depend on the term to make decisions.
if isinstance(term, list):
term = [_to_xapian_term(term) for term in term]
else:
term = _to_xapian_term(term)
# todo: we should check that the filter is valid for this field_type or raise InvalidIndexError
if filter_type == 'contains':
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
elif filter_type in ('content', 'exact'):
query_list.append(self._filter_exact(term, field_name, field_type, is_not))
elif filter_type == 'in':
query_list.append(self._filter_in(term, field_name, field_type, is_not))
elif filter_type == 'startswith':
query_list.append(self._filter_startswith(term, field_name, field_type, is_not))
elif filter_type == 'endswith':
raise NotImplementedError("The Xapian search backend doesn't support endswith queries.")
elif filter_type == 'gt':
query_list.append(self._filter_gt(term, field_name, field_type, is_not))
elif filter_type == 'gte':
query_list.append(self._filter_gte(term, field_name, field_type, is_not))
elif filter_type == 'lt':
query_list.append(self._filter_lt(term, field_name, field_type, is_not))
elif filter_type == 'lte':
query_list.append(self._filter_lte(term, field_name, field_type, is_not))
elif filter_type == 'range':
query_list.append(self._filter_range(term, field_name, field_type, is_not))
return query_list
def _all_query(self):
"""
Returns a match all query.
"""
return xapian.Query('')
def _filter_contains(self, term, field_name, field_type, is_not):
"""
Splits the sentence in terms and join them with OR,
using stemmed and un-stemmed.
Assumes term is not a list.
"""
if field_type == 'text':
term_list = term.split()
else:
term_list = [term]
query = self._or_query(term_list, field_name, field_type)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query
def _filter_in(self, term_list, field_name, field_type, is_not):
"""
Returns a query that matches exactly ANY term in term_list.
Notice that:
A in {B,C} <=> (A = B or A = C)
~(A in {B,C}) <=> ~(A = B or A = C)
Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`.
Assumes term is a list.
"""
query_list = [self._filter_exact(term, field_name, field_type, is_not=False)
for term in term_list]
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(),
xapian.Query(xapian.Query.OP_OR, query_list))
else:
return xapian.Query(xapian.Query.OP_OR, query_list)
def _filter_exact(self, term, field_name, field_type, is_not):
"""
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
"""
if field_type == 'text' and field_name not in (DJANGO_CT, DJANGO_ID) :
term = '^ %s $' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
query = self._term_query(term, field_name, field_type, stemmed=False)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query
def _filter_startswith(self, term, field_name, field_type, is_not):
"""
Returns a startswith query on the un-stemmed term.
Assumes term is not a list.
"""
if field_type == 'text':
if len(term.split()) == 1:
term = '^ %s*' % term
query = self.backend.parse_query(term)
else:
term = '^ %s' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
term = '^%s*' % term
query = self.backend.parse_query(term)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
return query
def _or_query(self, term_list, field, field_type):
"""
Joins each item of term_list decorated by _term_query with an OR.
"""
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list)
def _phrase_query(self, term_list, field_name, field_type):
"""
Returns a query that matches exact terms with
positional order (i.e. ["this", "thing"] != ["thing", "this"])
and no stem.
If `field_name` is not `None`, restrict to the field.
"""
term_list = [self._term_query(term, field_name, field_type,
stemmed=False) for term in term_list]
query = xapian.Query(xapian.Query.OP_PHRASE, term_list)
return query
def _term_query(self, term, field_name, field_type, stemmed=True):
"""
Constructs a query of a single term.
If `field_name` is not `None`, the term is search on that field only.
If exact is `True`, the search is restricted to boolean matches.
"""
constructor = '{prefix}{term}'
# construct the prefix to be used.
prefix = ''
if field_name:
prefix = TERM_PREFIXES['field'] + field_name.upper()
term = _to_xapian_term(term)
if field_name in (ID, DJANGO_ID, DJANGO_CT):
# to ensure the value is serialized correctly.
if field_name == DJANGO_ID:
try:
term = int(term)
except ValueError:
# Django_id is a string
field_type = 'text'
term = _term_to_xapian_value(term, field_type)
return xapian.Query('%s%s' % (TERM_PREFIXES[field_name], term))
# we construct the query dates in a slightly different way
if field_type == 'datetime':
date, time = term.split()
return xapian.Query(xapian.Query.OP_AND_MAYBE,
constructor.format(prefix=prefix, term=date),
constructor.format(prefix=prefix, term=time)
)
# only use stem if field is text or "None"
if field_type not in ('text', None):
stemmed = False
unstemmed_term = constructor.format(prefix=prefix, term=term)
if stemmed:
stem = xapian.Stem(self.backend.language)
stemmed_term = 'Z' + constructor.format(prefix=prefix, term=stem(term).decode('utf-8'))
return xapian.Query(xapian.Query.OP_OR,
xapian.Query(stemmed_term),
xapian.Query(unstemmed_term)
)
else:
return xapian.Query(unstemmed_term)
def _filter_gt(self, term, field_name, field_type, is_not):
return self._filter_lte(term, field_name, field_type, is_not=not is_not)
def _filter_lt(self, term, field_name, field_type, is_not):
return self._filter_gte(term, field_name, field_type, is_not=not is_not)
def _filter_gte(self, term, field_name, field_type, is_not):
"""
Private method that returns a xapian.Query that searches for any term
that is greater than `term` in a specified `field`.
"""
vrp = XHValueRangeProcessor(self.backend)
pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term, field_type)), '*')
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT,
self._all_query(),
xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
)
return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
def _filter_lte(self, term, field_name, field_type, is_not):
"""
Private method that returns a xapian.Query that searches for any term
that is less than `term` in a specified `field`.
"""
vrp = XHValueRangeProcessor(self.backend)
pos, begin, end = vrp('%s:' % field_name, '%s' % _term_to_xapian_value(term, field_type))
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT,
self._all_query(),
xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
)
return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
def _filter_range(self, term, field_name, field_type, is_not):
"""
Private method that returns a xapian.Query that searches for any term
that is between the values from the `term` list.
"""
vrp = XHValueRangeProcessor(self.backend)
pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term[0], field_type)),
'%s' % _term_to_xapian_value(term[1], field_type))
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT,
self._all_query(),
xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
)
return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
def _term_to_xapian_value(term, field_type):
"""
Converts a term to a serialized
Xapian value based on the field_type.
"""
assert field_type in FIELD_TYPES
def strf(dt):
"""
Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT)
but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303)
"""
return '%04d%02d%02d%02d%02d%02d' % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
if field_type == 'boolean':
assert isinstance(term, bool)
if term:
value = 't'
else:
value = 'f'
elif field_type == 'integer':
value = INTEGER_FORMAT % term
elif field_type == 'float':
value = xapian.sortable_serialise(term)
elif field_type == 'date' or field_type == 'datetime':
if field_type == 'date':
# http://stackoverflow.com/a/1937636/931303 and comments
term = datetime.datetime.combine(term, datetime.time())
value = strf(term)
else: # field_type == 'text'
value = _to_xapian_term(term)
return value
def _to_xapian_term(term):
"""
Converts a Python type to a
Xapian term that can be indexed.
"""
return force_text(term).lower()
def _from_xapian_value(value, field_type):
"""
Converts a serialized Xapian value
to Python equivalent based on the field_type.
Doesn't accept multivalued fields.
"""
assert field_type in FIELD_TYPES
if field_type == 'boolean':
if value == 't':
return True
elif value == 'f':
return False
else:
InvalidIndexError('Field type "%d" does not accept value "%s"' % (field_type, value))
elif field_type == 'integer':
return int(value)
elif field_type == 'float':
return xapian.sortable_unserialise(value)
elif field_type == 'date' or field_type == 'datetime':
datetime_value = datetime.datetime.strptime(value, DATETIME_FORMAT)
if field_type == 'datetime':
return datetime_value
else:
return datetime_value.date()
else: # field_type == 'text'
return value
def _old_xapian_sort(enquire, sort_by, column):
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(column[sort_field], reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
def _xapian_sort(enquire, sort_by, column):
try:
sorter = xapian.MultiValueKeyMaker()
except AttributeError:
raise NotSupportedError
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = False
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = True
sorter.add_value(column[sort_field], reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
class XapianEngine(BaseEngine):
backend = XapianSearchBackend
query = XapianSearchQuery
|
lokeshjindal15/pd-gem5 | refs/heads/master | ext/ply/test/yacc_error1.py | 174 | # -----------------------------------------------------------------------------
# yacc_error1.py
#
# Bad p_error() function
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t,s):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
fangeugene/the-blue-alliance | refs/heads/master | database/dict_converters/event_details_converter.py | 2 | from collections import defaultdict
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 3,
}
@classmethod
def _convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventsDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventsDetailsConverter_v3(cls, event_details):
event_details = map(cls.eventDetailsConverter_v3, event_details)
return event_details
@classmethod
def eventDetailsConverter_v3(cls, event_details):
normalized_oprs = defaultdict(dict)
if event_details and event_details.matchstats:
for stat_type, stats in event_details.matchstats.items():
if stat_type in {'oprs', 'dprs', 'ccwms'}:
for team, value in stats.items():
if 'frc' not in team: # Normalize output
team = 'frc{}'.format(team)
normalized_oprs[stat_type][team] = value
event_details.matchstats if event_details else None
event_details_dict = {
'alliances': event_details.alliance_selections if event_details else None,
'district_points': event_details.district_points if event_details else None,
'insights': event_details.insights if event_details else None,
'oprs': normalized_oprs if normalized_oprs else None, # OPRs, DPRs, CCWMs
'predictions': event_details.predictions if event_details else None,
'rankings': event_details.renderable_rankings if event_details else None,
}
return event_details_dict
|
jnovinger/django | refs/heads/master | django/views/csrf.py | 437 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template's <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
})
return HttpResponseForbidden(t.render(c), content_type='text/html')
|
marcuschia/ShaniXBMCWork | refs/heads/master | other/jsunpackMM.py | 12 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
Updated by Shani_08 for muchmovies, here they have done the double encrypt.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
def unpack(sJavascript,iteration=1, totaliterations=2 ):
print 'iteration',iteration
if sJavascript.startswith('var _0xcb8a='):
aSplit=sJavascript.split('var _0xcb8a=')
ss="myarray="+aSplit[1].split("eval(")[0]
exec(ss)
a1=62
c1=int(aSplit[1].split(",62,")[1].split(',')[0])
p1=myarray[0]
k1=myarray[3]
with open('temp file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(k1))
#aa=1/0
else:
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=('"+aSplit[1].split(".spli")[0]+')'
exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
# print ' p array is ',len(aSplit)
# print len(aSplit )
#p=str(aSplit[0]+'))')#.replace("\\","")#.replace('\\\\','\\')
#print aSplit[1]
#aSplit = aSplit[1].split(",")
#print aSplit[0]
#a = int(aSplit[1])
#c = int(aSplit[2])
#k = aSplit[3].split(".")[0].replace("'", '').split('|')
#a=int(a)
#c=int(c)
#p=p.replace('\\', '')
# print 'p val is ',p[0:100],'............',p[-100:],len(p)
# print 'p1 val is ',p1[0:100],'............',p1[-100:],len(p1)
#print a,a1
#print c,a1
#print 'k val is ',k[-10:],len(k)
# print 'k1 val is ',k1[-10:],len(k1)
e = ''
d = ''#32823
#sUnpacked = str(__unpack(p, a, c, k, e, d))
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
#print sUnpacked[:200]+'....'+sUnpacked[-100:], len(sUnpacked)
# print sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
#exec('sUnpacked1="'+sUnpacked1+'"')
if iteration>=totaliterations:
# print 'final res',sUnpacked1[:200]+'....'+sUnpacked1[-100:], len(sUnpacked1)
return sUnpacked1#.replace('\\\\', '\\')
else:
# print 'final res for this iteration is',iteration
return unpack(sUnpacked1,iteration+1)#.replace('\\', ''),iteration)#.replace('\\', '');#unpack(sUnpacked.replace('\\', ''))
def __unpack(p, a, c, k, e, d, iteration):
with open('before file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(p))
while (c > 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
#re.sub('\\b' + aa +'\\b', k[c], p) THIS IS Bloody slow!
p=findAndReplaceWord(p,aa,k[c])
with open('after file'+str(iteration)+'.js', "wb") as filewriter:
filewriter.write(str(p))
return p
#
#function equalavent to re.sub('\\b' + aa +'\\b', k[c], p)
def findAndReplaceWord(source_str, word_to_find,replace_with):
splits=None
splits=source_str.split(word_to_find)
if len(splits)>1:
new_string=[]
current_index=0
for current_split in splits:
#print 'here',i
new_string.append(current_split)
val=word_to_find#by default assume it was wrong to split
#if its first one and item is blank then check next item is valid or not
if current_index==len(splits)-1:
val='' # last one nothing to append normally
else:
if len(current_split)==0: #if blank check next one with current split value
if ( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'):# first just just check next
val=replace_with
#not blank, then check current endvalue and next first value
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') and (( len(splits[current_index+1])==0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') or (len(splits[current_index+1])>0 and splits[current_index+1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')):# first just just check next
val=replace_with
new_string.append(val)
current_index+=1
#aaaa=1/0
source_str=''.join(new_string)
return source_str
def __itoa(num, radix):
# print 'num red',num, radix
result = ""
if num==0: return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
|
CrushAndRun/Cloudbot-Fluke | refs/heads/master | cloudbot/plugin.py | 5 | import asyncio
import glob
import importlib
import inspect
import logging
import os
import re
import sqlalchemy
from cloudbot.event import Event
from cloudbot.util import botvars
logger = logging.getLogger("cloudbot")
def find_hooks(parent, module):
"""
:type parent: Plugin
:type module: object
:rtype: (list[CommandHook], list[RegexHook], list[RawHook], list[SieveHook], List[EventHook], list[OnStartHook])
"""
# set the loaded flag
module._cloudbot_loaded = True
command = []
regex = []
raw = []
sieve = []
event = []
periodic = []
on_start = []
type_lists = {"command": command, "regex": regex, "irc_raw": raw, "sieve": sieve, "event": event,
"periodic": periodic, "on_start": on_start}
for name, func in module.__dict__.items():
if hasattr(func, "_cloudbot_hook"):
# if it has cloudbot hook
func_hooks = func._cloudbot_hook
for hook_type, func_hook in func_hooks.items():
type_lists[hook_type].append(_hook_name_to_plugin[hook_type](parent, func_hook))
# delete the hook to free memory
del func._cloudbot_hook
return command, regex, raw, sieve, event, periodic, on_start
def find_tables(code):
"""
:type code: object
:rtype: list[sqlalchemy.Table]
"""
tables = []
for name, obj in code.__dict__.items():
if isinstance(obj, sqlalchemy.Table) and obj.metadata == botvars.metadata:
# if it's a Table, and it's using our metadata, append it to the list
tables.append(obj)
return tables
class PluginManager:
"""
PluginManager is the core of CloudBot plugin loading.
PluginManager loads Plugins, and adds their Hooks to easy-access dicts/lists.
Each Plugin represents a file, and loads hooks onto itself using find_hooks.
Plugins are the lowest level of abstraction in this class. There are four different plugin types:
- CommandPlugin is for bot commands
- RawPlugin hooks onto irc_raw irc lines
- RegexPlugin loads a regex parameter, and executes on irc lines which match the regex
- SievePlugin is a catch-all sieve, which all other plugins go through before being executed.
:type bot: cloudbot.bot.CloudBot
:type plugins: dict[str, Plugin]
:type commands: dict[str, CommandHook]
:type raw_triggers: dict[str, list[RawHook]]
:type catch_all_triggers: list[RawHook]
:type event_type_hooks: dict[cloudbot.event.EventType, list[EventHook]]
:type regex_hooks: list[(re.__Regex, RegexHook)]
:type sieves: list[SieveHook]
"""
def __init__(self, bot):
"""
Creates a new PluginManager. You generally only need to do this from inside cloudbot.bot.CloudBot
:type bot: cloudbot.bot.CloudBot
"""
self.bot = bot
self.plugins = {}
self.commands = {}
self.raw_triggers = {}
self.catch_all_triggers = []
self.event_type_hooks = {}
self.regex_hooks = []
self.sieves = []
self._hook_waiting_queues = {}
@asyncio.coroutine
def load_all(self, plugin_dir):
"""
Load a plugin from each *.py file in the given directory.
Won't load any plugins listed in "disabled_plugins".
:type plugin_dir: str
"""
path_list = glob.iglob(os.path.join(plugin_dir, '*.py'))
# Load plugins asynchronously :O
yield from asyncio.gather(*[self.load_plugin(path) for path in path_list], loop=self.bot.loop)
@asyncio.coroutine
def load_plugin(self, path):
"""
Loads a plugin from the given path and plugin object, then registers all hooks from that plugin.
Won't load any plugins listed in "disabled_plugins".
:type path: str
"""
file_path = os.path.abspath(path)
file_name = os.path.basename(path)
title = os.path.splitext(file_name)[0]
if "plugin_loading" in self.bot.config:
pl = self.bot.config.get("plugin_loading")
if pl.get("use_whitelist", False):
if title not in pl.get("whitelist", []):
logger.info('Not loading plugin module "{}": plugin not whitelisted'.format(file_name))
return
else:
if title in pl.get("blacklist", []):
logger.info('Not loading plugin module "{}": plugin blacklisted'.format(file_name))
return
# make sure to unload the previously loaded plugin from this path, if it was loaded.
if file_name in self.plugins:
yield from self.unload_plugin(file_path)
module_name = "plugins.{}".format(title)
try:
plugin_module = importlib.import_module(module_name)
# if this plugin was loaded before, reload it
if hasattr(plugin_module, "_cloudbot_loaded"):
importlib.reload(plugin_module)
except Exception:
logger.exception("Error loading {}:".format(file_name))
return
# create the plugin
plugin = Plugin(file_path, file_name, title, plugin_module)
# proceed to register hooks
# create database tables
yield from plugin.create_tables(self.bot)
# run on_start hooks
for on_start_hook in plugin.run_on_start:
success = yield from self.launch(on_start_hook, Event(bot=self.bot, hook=on_start_hook))
if not success:
logger.warning("Not registering hooks from plugin {}: on_start hook errored".format(plugin.title))
# unregister databases
plugin.unregister_tables(self.bot)
return
self.plugins[plugin.file_name] = plugin
for periodic_hook in plugin.periodic:
asyncio.async(self._start_periodic(periodic_hook))
self._log_hook(periodic_hook)
# register commands
for command_hook in plugin.commands:
for alias in command_hook.aliases:
if alias in self.commands:
logger.warning(
"Plugin {} attempted to register command {} which was already registered by {}. "
"Ignoring new assignment.".format(plugin.title, alias, self.commands[alias].plugin.title))
else:
self.commands[alias] = command_hook
self._log_hook(command_hook)
# register raw hooks
for raw_hook in plugin.raw_hooks:
if raw_hook.is_catch_all():
self.catch_all_triggers.append(raw_hook)
else:
for trigger in raw_hook.triggers:
if trigger in self.raw_triggers:
self.raw_triggers[trigger].append(raw_hook)
else:
self.raw_triggers[trigger] = [raw_hook]
self._log_hook(raw_hook)
# register events
for event_hook in plugin.events:
for event_type in event_hook.types:
if event_type in self.event_type_hooks:
self.event_type_hooks[event_type].append(event_hook)
else:
self.event_type_hooks[event_type] = [event_hook]
self._log_hook(event_hook)
# register regexps
for regex_hook in plugin.regexes:
for regex_match in regex_hook.regexes:
self.regex_hooks.append((regex_match, regex_hook))
self._log_hook(regex_hook)
# register sieves
for sieve_hook in plugin.sieves:
self.sieves.append(sieve_hook)
self._log_hook(sieve_hook)
# sort sieve hooks by priority
self.sieves.sort(key=lambda x: x.priority)
# we don't need this anymore
del plugin.run_on_start
@asyncio.coroutine
def unload_plugin(self, path):
"""
Unloads the plugin from the given path, unregistering all hooks from the plugin.
Returns True if the plugin was unloaded, False if the plugin wasn't loaded in the first place.
:type path: str
:rtype: bool
"""
file_name = os.path.basename(path)
title = os.path.splitext(file_name)[0]
if "disabled_plugins" in self.bot.config and title in self.bot.config['disabled_plugins']:
# this plugin hasn't been loaded, so no need to unload it
return False
# make sure this plugin is actually loaded
if not file_name in self.plugins:
return False
# get the loaded plugin
plugin = self.plugins[file_name]
# unregister commands
for command_hook in plugin.commands:
for alias in command_hook.aliases:
if alias in self.commands and self.commands[alias] == command_hook:
# we need to make sure that there wasn't a conflict, so we don't delete another plugin's command
del self.commands[alias]
# unregister raw hooks
for raw_hook in plugin.raw_hooks:
if raw_hook.is_catch_all():
self.catch_all_triggers.remove(raw_hook)
else:
for trigger in raw_hook.triggers:
assert trigger in self.raw_triggers # this can't be not true
self.raw_triggers[trigger].remove(raw_hook)
if not self.raw_triggers[trigger]: # if that was the last hook for this trigger
del self.raw_triggers[trigger]
# unregister events
for event_hook in plugin.events:
for event_type in event_hook.types:
assert event_type in self.event_type_hooks # this can't be not true
self.event_type_hooks[event_type].remove(event_hook)
if not self.event_type_hooks[event_type]: # if that was the last hook for this event type
del self.event_type_hooks[event_type]
# unregister regexps
for regex_hook in plugin.regexes:
for regex_match in regex_hook.regexes:
self.regex_hooks.remove((regex_match, regex_hook))
# unregister sieves
for sieve_hook in plugin.sieves:
self.sieves.remove(sieve_hook)
# unregister databases
plugin.unregister_tables(self.bot)
# remove last reference to plugin
del self.plugins[plugin.file_name]
if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.info("Unloaded all plugins from {}.py".format(plugin.title))
return True
def _log_hook(self, hook):
"""
Logs registering a given hook
:type hook: Hook
"""
if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.info("Loaded {}".format(hook))
logger.debug("Loaded {}".format(repr(hook)))
def _prepare_parameters(self, hook, event):
"""
Prepares arguments for the given hook
:type hook: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:rtype: list
"""
parameters = []
for required_arg in hook.required_args:
if hasattr(event, required_arg):
value = getattr(event, required_arg)
parameters.append(value)
else:
logger.error("Plugin {} asked for invalid argument '{}', cancelling execution!"
.format(hook.description, required_arg))
logger.debug("Valid arguments are: {} ({})".format(dir(event), event))
return None
return parameters
def _execute_hook_threaded(self, hook, event):
"""
:type hook: Hook
:type event: cloudbot.event.Event
"""
event.prepare_threaded()
parameters = self._prepare_parameters(hook, event)
if parameters is None:
return None
try:
return hook.function(*parameters)
finally:
event.close_threaded()
@asyncio.coroutine
def _execute_hook_sync(self, hook, event):
"""
:type hook: Hook
:type event: cloudbot.event.Event
"""
yield from event.prepare()
parameters = self._prepare_parameters(hook, event)
if parameters is None:
return None
try:
return (yield from hook.function(*parameters))
finally:
yield from event.close()
@asyncio.coroutine
def _execute_hook(self, hook, event):
"""
Runs the specific hook with the given bot and event.
Returns False if the hook errored, True otherwise.
:type hook: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:rtype: bool
"""
try:
# _internal_run_threaded and _internal_run_coroutine prepare the database, and run the hook.
# _internal_run_* will prepare parameters and the database session, but won't do any error catching.
if hook.threaded:
out = yield from self.bot.loop.run_in_executor(None, self._execute_hook_threaded, hook, event)
else:
out = yield from self._execute_hook_sync(hook, event)
except Exception:
logger.exception("Error in hook {}".format(hook.description))
return False
if out is not None:
if isinstance(out, (list, tuple)):
# if there are multiple items in the response, return them on multiple lines
event.reply(*out)
else:
event.reply(*str(out).split('\n'))
return True
@asyncio.coroutine
def _sieve(self, sieve, event, hook):
"""
:type sieve: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:type hook: cloudbot.plugin.Hook
:rtype: cloudbot.event.Event
"""
try:
if sieve.threaded:
result = yield from self.bot.loop.run_in_executor(None, sieve.function, self.bot, event, hook)
else:
result = yield from sieve.function(self.bot, event, hook)
except Exception:
logger.exception("Error running sieve {} on {}:".format(sieve.description, hook.description))
return None
else:
return result
@asyncio.coroutine
def _start_periodic(self, hook):
interval = hook.interval
initial_interval = hook.initial_interval
yield from asyncio.sleep(initial_interval)
while True:
event = Event(bot=self.bot, hook=hook)
yield from self.launch(hook, event)
yield from asyncio.sleep(interval)
@asyncio.coroutine
def launch(self, hook, event):
"""
Dispatch a given event to a given hook using a given bot object.
Returns False if the hook didn't run successfully, and True if it ran successfully.
:type event: cloudbot.event.Event | cloudbot.event.CommandEvent
:type hook: cloudbot.plugin.Hook | cloudbot.plugin.CommandHook
:rtype: bool
"""
if hook.type not in ("on_start", "periodic"): # we don't need sieves on on_start hooks.
for sieve in self.bot.plugin_manager.sieves:
event = yield from self._sieve(sieve, event, hook)
if event is None:
return False
if hook.type == "command" and hook.auto_help and not event.text and hook.doc is not None:
event.notice_doc()
return False
if hook.single_thread:
# There should only be one running instance of this hook, so let's wait for the last event to be processed
# before starting this one.
key = (hook.plugin.title, hook.function_name)
if key in self._hook_waiting_queues:
queue = self._hook_waiting_queues[key]
if queue is None:
# there's a hook running, but the queue hasn't been created yet, since there's only one hook
queue = asyncio.Queue()
self._hook_waiting_queues[key] = queue
assert isinstance(queue, asyncio.Queue)
# create a future to represent this task
future = asyncio.Future()
queue.put_nowait(future)
# wait until the last task is completed
yield from future
else:
# set to None to signify that this hook is running, but there's no need to create a full queue
# in case there are no more hooks that will wait
self._hook_waiting_queues[key] = None
# Run the plugin with the message, and wait for it to finish
result = yield from self._execute_hook(hook, event)
queue = self._hook_waiting_queues[key]
if queue is None or queue.empty():
# We're the last task in the queue, we can delete it now.
del self._hook_waiting_queues[key]
else:
# set the result for the next task's future, so they can execute
next_future = yield from queue.get()
next_future.set_result(None)
else:
# Run the plugin with the message, and wait for it to finish
result = yield from self._execute_hook(hook, event)
# Return the result
return result
class Plugin:
"""
Each Plugin represents a plugin file, and contains loaded hooks.
:type file_path: str
:type file_name: str
:type title: str
:type commands: list[CommandHook]
:type regexes: list[RegexHook]
:type raw_hooks: list[RawHook]
:type sieves: list[SieveHook]
:type events: list[EventHook]
:type tables: list[sqlalchemy.Table]
"""
def __init__(self, filepath, filename, title, code):
"""
:type filepath: str
:type filename: str
:type code: object
"""
self.file_path = filepath
self.file_name = filename
self.title = title
self.commands, self.regexes, self.raw_hooks, self.sieves, self.events, self.periodic, self.run_on_start = find_hooks(self, code)
# we need to find tables for each plugin so that they can be unloaded from the global metadata when the
# plugin is reloaded
self.tables = find_tables(code)
@asyncio.coroutine
def create_tables(self, bot):
"""
Creates all sqlalchemy Tables that are registered in this plugin
:type bot: cloudbot.bot.CloudBot
"""
if self.tables:
# if there are any tables
logger.info("Registering tables for {}".format(self.title))
for table in self.tables:
if not (yield from bot.loop.run_in_executor(None, table.exists, bot.db_engine)):
yield from bot.loop.run_in_executor(None, table.create, bot.db_engine)
def unregister_tables(self, bot):
"""
Unregisters all sqlalchemy Tables registered to the global metadata by this plugin
:type bot: cloudbot.bot.CloudBot
"""
if self.tables:
# if there are any tables
logger.info("Unregistering tables for {}".format(self.title))
for table in self.tables:
bot.db_metadata.remove(table)
class Hook:
"""
Each hook is specific to one function. This class is never used by itself, rather extended.
:type type; str
:type plugin: Plugin
:type function: callable
:type function_name: str
:type required_args: list[str]
:type threaded: bool
:type permissions: list[str]
:type single_thread: bool
"""
def __init__(self, _type, plugin, func_hook):
"""
:type _type: str
:type plugin: Plugin
:type func_hook: hook._Hook
"""
self.type = _type
self.plugin = plugin
self.function = func_hook.function
self.function_name = self.function.__name__
self.required_args = inspect.getargspec(self.function)[0]
if self.required_args is None:
self.required_args = []
# don't process args starting with "_"
self.required_args = [arg for arg in self.required_args if not arg.startswith("_")]
if asyncio.iscoroutine(self.function) or asyncio.iscoroutinefunction(self.function):
self.threaded = False
else:
self.threaded = True
self.permissions = func_hook.kwargs.pop("permissions", [])
self.single_thread = func_hook.kwargs.pop("singlethread", False)
if func_hook.kwargs:
# we should have popped all the args, so warn if there are any left
logger.warning("Ignoring extra args {} from {}".format(func_hook.kwargs, self.description))
@property
def description(self):
return "{}:{}".format(self.plugin.title, self.function_name)
def __repr__(self):
return "type: {}, plugin: {}, permissions: {}, single_thread: {}, threaded: {}".format(
self.type, self.plugin.title, self.permissions, self.single_thread, self.threaded
)
class CommandHook(Hook):
"""
:type name: str
:type aliases: list[str]
:type doc: str
:type auto_help: bool
"""
def __init__(self, plugin, cmd_hook):
"""
:type plugin: Plugin
:type cmd_hook: cloudbot.util.hook._CommandHook
"""
self.auto_help = cmd_hook.kwargs.pop("autohelp", True)
self.name = cmd_hook.main_alias.lower()
self.aliases = [alias.lower() for alias in cmd_hook.aliases] # turn the set into a list
self.aliases.remove(self.name)
self.aliases.insert(0, self.name) # make sure the name, or 'main alias' is in position 0
self.doc = cmd_hook.doc
super().__init__("command", plugin, cmd_hook)
def __repr__(self):
return "Command[name: {}, aliases: {}, {}]".format(self.name, self.aliases[1:], Hook.__repr__(self))
def __str__(self):
return "command {} from {}".format("/".join(self.aliases), self.plugin.file_name)
class RegexHook(Hook):
"""
:type regexes: set[re.__Regex]
"""
def __init__(self, plugin, regex_hook):
"""
:type plugin: Plugin
:type regex_hook: cloudbot.util.hook._RegexHook
"""
self.run_on_cmd = regex_hook.kwargs.pop("run_on_cmd", False)
self.regexes = regex_hook.regexes
super().__init__("regex", plugin, regex_hook)
def __repr__(self):
return "Regex[regexes: [{}], {}]".format(", ".join(regex.pattern for regex in self.regexes),
Hook.__repr__(self))
def __str__(self):
return "regex {} from {}".format(self.function_name, self.plugin.file_name)
class PeriodicHook(Hook):
"""
:type interval: int
"""
def __init__(self, plugin, periodic_hook):
"""
:type plugin: Plugin
:type periodic_hook: cloudbot.util.hook._PeriodicHook
"""
self.interval = periodic_hook.interval
self.initial_interval = periodic_hook.kwargs.pop("initial_interval", self.interval)
super().__init__("periodic", plugin, periodic_hook)
def __repr__(self):
return "Periodic[interval: [{}], {}]".format(self.interval, Hook.__repr__(self))
def __str__(self):
return "periodic hook ({} seconds) {} from {}".format(self.interval, self.function_name, self.plugin.file_name)
class RawHook(Hook):
"""
:type triggers: set[str]
"""
def __init__(self, plugin, irc_raw_hook):
"""
:type plugin: Plugin
:type irc_raw_hook: cloudbot.util.hook._RawHook
"""
super().__init__("irc_raw", plugin, irc_raw_hook)
self.triggers = irc_raw_hook.triggers
def is_catch_all(self):
return "*" in self.triggers
def __repr__(self):
return "Raw[triggers: {}, {}]".format(list(self.triggers), Hook.__repr__(self))
def __str__(self):
return "irc raw {} ({}) from {}".format(self.function_name, ",".join(self.triggers), self.plugin.file_name)
class SieveHook(Hook):
def __init__(self, plugin, sieve_hook):
"""
:type plugin: Plugin
:type sieve_hook: cloudbot.util.hook._SieveHook
"""
self.priority = sieve_hook.kwargs.pop("priority", 100)
# We don't want to thread sieves by default - this is retaining old behavior for compatibility
super().__init__("sieve", plugin, sieve_hook)
def __repr__(self):
return "Sieve[{}]".format(Hook.__repr__(self))
def __str__(self):
return "sieve {} from {}".format(self.function_name, self.plugin.file_name)
class EventHook(Hook):
"""
:type types: set[cloudbot.event.EventType]
"""
def __init__(self, plugin, event_hook):
"""
:type plugin: Plugin
:type event_hook: cloudbot.util.hook._EventHook
"""
super().__init__("event", plugin, event_hook)
self.types = event_hook.types
def __repr__(self):
return "Event[types: {}, {}]".format(list(self.types), Hook.__repr__(self))
def __str__(self):
return "event {} ({}) from {}".format(self.function_name, ",".join(str(t) for t in self.types),
self.plugin.file_name)
class OnStartHook(Hook):
def __init__(self, plugin, on_start_hook):
"""
:type plugin: Plugin
:type on_start_hook: cloudbot.util.hook._On_startHook
"""
super().__init__("on_start", plugin, on_start_hook)
def __repr__(self):
return "On_start[{}]".format(Hook.__repr__(self))
def __str__(self):
return "on_start {} from {}".format(self.function_name, self.plugin.file_name)
_hook_name_to_plugin = {
"command": CommandHook,
"regex": RegexHook,
"irc_raw": RawHook,
"sieve": SieveHook,
"event": EventHook,
"periodic": PeriodicHook,
"on_start": OnStartHook
}
|
jpaton/xen-4.1-LJX1 | refs/heads/master | tools/xm-test/tests/pause/01_pause_basic_pos.py | 42 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Paul Larson <[email protected]>
# Description:
# Positive Tests:
# Tests for xm pause
# 1) Create domain, verify it's up with console
# 2) pause the domain
# 3) verify it's paused by failure to connect console
import time
import commands
from XmTestLib import *
# Create a domain (default XmTestDomain, with our ramdisk)
domain = XmTestDomain()
# Start it
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
try:
# Make sure a command succeeds
run = console.runCmd("ls")
except ConsoleError, e:
FAIL(str(e))
# Close the console
domain.closeConsole()
# Pause the domain
status, output = traceCommand("xm pause %s" % domain.getName())
if status != 0:
FAIL("xm pause returned invalid %i != 0", status)
# Try to attach a console to it
try:
console = domain.getConsole()
console.setHistorySaveCmds(value=True)
run = console.runCmd("ls")
#If we get here, console attached to paused domain (unexpected)
FAIL("console attached to supposedly paused domain")
except ConsoleError, e:
pass
# Close the console
domain.closeConsole()
status, output = traceCommand("xm unpause %s" % domain.getName())
if status != 0:
FAIL("xm unpause returned invalid %i != 0", status)
# Stop the domain (nice shutdown)
domain.stop()
|
rezometz/django-paiji2-infoconcert | refs/heads/master | paiji2_infoconcert/models.py | 84 | # from django.db import models
# Create your models here.
|
cloudbase/nova-virtualbox | refs/heads/virtualbox_driver | nova/tests/unit/keymgr/test_conf_key_mgr.py | 71 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the conf key manager.
"""
import array
from oslo_config import cfg
from nova.keymgr import conf_key_mgr
from nova.keymgr import key
from nova.tests.unit.keymgr import test_single_key_mgr
CONF = cfg.CONF
CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase):
def __init__(self, *args, **kwargs):
super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs)
self._hex_key = '0' * 64
def _create_key_manager(self):
CONF.set_default('fixed_key', default=self._hex_key, group='keymgr')
return conf_key_mgr.ConfKeyManager()
def setUp(self):
super(ConfKeyManagerTestCase, self).setUp()
encoded_key = array.array('B', self._hex_key.decode('hex')).tolist()
self.key = key.SymmetricKey('AES', encoded_key)
def test_init(self):
key_manager = self._create_key_manager()
self.assertEqual(self._hex_key, key_manager._hex_key)
def test_init_value_error(self):
CONF.set_default('fixed_key', default=None, group='keymgr')
self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager)
def test_generate_hex_key(self):
key_manager = self._create_key_manager()
self.assertEqual(self._hex_key, key_manager._generate_hex_key())
|
junhuac/MQUIC | refs/heads/master | src/tools/gyp/test/ios/gyptest-framework.py | 3 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that ios app frameworks are built correctly.
"""
import TestGyp
import TestMac
import subprocess
import sys
if sys.platform == 'darwin' and TestMac.Xcode.Version()>="0700":
test = TestGyp.TestGyp(formats=['ninja'])
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'iOSFramework', chdir='framework')
test.built_file_must_exist(
'iOSFramework.framework/Headers/iOSFramework.h',
chdir='framework')
test.built_file_must_exist(
'iOSFramework.framework/iOSFramework',
chdir='framework')
test.pass_test()
|
gm2211/vpnAlfredWorkflow | refs/heads/develop | src/alp/request/requests/sessions.py | 3 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from datetime import datetime
from .compat import cookielib
from .cookies import cookiejar_from_dict, extract_cookies_to_jar
from .models import Request, PreparedRequest
from .hooks import default_hooks, dispatch_hook
from .utils import from_key_val_list, default_headers
from .exceptions import TooManyRedirects, InvalidSchema
from .compat import urlparse, urljoin
from .adapters import HTTPAdapter
from .utils import requote_uri, get_environ_proxies, get_netrc_auth
from .status_codes import codes
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_moved, # 307
)
DEFAULT_REDIRECT_LIMIT = 30
def merge_kwargs(local_kwarg, default_kwarg):
"""Merges kwarg dictionaries.
If a local key in the dictionary is set to None, it will be removed.
"""
if default_kwarg is None:
return local_kwarg
if isinstance(local_kwarg, str):
return local_kwarg
if local_kwarg is None:
return default_kwarg
# Bypass if not a dictionary (e.g. timeout)
if not hasattr(default_kwarg, 'items'):
return local_kwarg
default_kwarg = from_key_val_list(default_kwarg)
local_kwarg = from_key_val_list(local_kwarg)
# Update new values in a case-insensitive way
def get_original_key(original_keys, new_key):
"""
Finds the key from original_keys that case-insensitive matches new_key.
"""
for original_key in original_keys:
if key.lower() == original_key.lower():
return original_key
return new_key
kwargs = default_kwarg.copy()
original_keys = kwargs.keys()
for key, value in local_kwarg.items():
kwargs[get_original_key(original_keys, key)] = value
# Remove keys that are set to None.
for (k, v) in local_kwarg.items():
if v is None:
del kwargs[k]
return kwargs
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
prepared_request = PreparedRequest()
prepared_request.body = req.body
prepared_request.headers = req.headers.copy()
prepared_request.hooks = req.hooks
prepared_request.method = req.method
prepared_request.url = req.url
# ((resp.status_code is codes.see_other))
while (('location' in resp.headers and resp.status_code in REDIRECT_STATI)):
resp.content # Consume socket so it can be released
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = prepared_request.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
# Compliant with RFC3986, we percent encode the url.
url = urljoin(resp.url, requote_uri(url))
prepared_request.url = url
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if (resp.status_code == codes.see_other and
prepared_request.method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
if (resp.status_code in (codes.moved, codes.found) and
prepared_request.method not in ('GET', 'HEAD')):
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary, codes.resume):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
prepared_request.prepare_cookies(self.cookies)
resp = self.send(
prepared_request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistience, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream',
'trust_env', 'max_redirects']
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
# Set up a CookieJar to be used by default
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = {}
self.mount('http://', HTTPAdapter())
self.mount('https://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the
request.
:param allow_redirects: (optional) Boolean. Set to True by default.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
cookies = cookies or {}
proxies = proxies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = self.cookies.copy()
merged_cookies.update(cookies)
cookies = merged_cookies
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Set environment's basic authentication.
if not auth:
auth = get_netrc_auth(url)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not verify and verify is not False:
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
params = merge_kwargs(params, self.params)
headers = merge_kwargs(headers, self.headers)
auth = merge_kwargs(auth, self.auth)
proxies = merge_kwargs(proxies, self.proxies)
hooks = merge_kwargs(hooks, self.hooks)
stream = merge_kwargs(stream, self.stream)
verify = merge_kwargs(verify, self.verify)
cert = merge_kwargs(cert, self.cert)
# Create the Request.
req = Request()
req.method = method.upper()
req.url = url
req.headers = headers
req.files = files
req.data = data
req.params = params
req.auth = auth
req.cookies = cookies
req.hooks = hooks
# Prepare the Request.
prep = req.prepare()
# Send the request.
send_kwargs = {
'stream': stream,
'timeout': timeout,
'verify': verify,
'cert': cert,
'proxies': proxies,
'allow_redirects': allow_redirects,
}
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if getattr(request, 'prepare', None):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of
# hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, stream=stream,
timeout=timeout, verify=verify, cert=cert,
proxies=proxies)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = tuple(history)
return r
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for _, v in self.adapters.items():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix."""
self.adapters[prefix] = adapter
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
|
trungdtbk/faucet | refs/heads/master | faucet/watcher_conf.py | 2 | """Gauge watcher configuration."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from faucet.conf import Conf, test_config_condition
class WatcherConf(Conf):
"""Stores the state and configuration to monitor a single stat.
Watcher Config
Watchers are configured in the watchers config block in the config for gauge.
The following elements can be configured for each watcher, at the level of
/watchers/<watcher name>/:
* type (string): The type of watcher (IE what stat this watcher monitors). \
The types are 'port_state', 'port_stats' or 'flow_table'.
* dps (list): A list of dps that should be monitored with this watcher.
* db (string): The db that will be used to store the data once it is retreived.
* interval (int): if this watcher requires polling the switch, it will \
monitor at this interval.
The config for a db should be created in the gauge config file under the dbs
config block.
The following elements can be configured for each db, at the level of
/dbs/<db name>/:
* type (string): the type of db. The available types are 'text' and 'influx' \
for port_state, 'text', 'influx'and 'prometheus' for port_stats and \
'text' and flow_table.
The following config elements then depend on the type.
For text:
* file (string): the filename of the file to write output to.
* path (string): path where files should be written when writing to \
muiltiple files
* compress (bool): compress (with gzip) flow_table output while writing it
For influx:
* influx_db (str): The name of the influxdb database. Defaults to 'faucet'.
* influx_host (str): The host where the influxdb is reachable. Defaults to \
'localhost'.
* influx_port (int): The port that the influxdb host will listen on. Defaults \
to 8086.
* influx_user (str): The username for accessing influxdb. Defaults to ''.
* influx_pwd (str): The password for accessing influxdb. Defaults to ''.
* influx_timeout (int): The timeout in seconds for connecting to influxdb. \
Defaults to 10.
* influx_retries (int): The number of times to retry connecting to influxdb \
after failure. Defaults to 3.
For Prometheus:
* prometheus_port (int): The port used to export prometheus data. Defaults to \
9303.
* prometheus_addr (ip addr str): The address used to export prometheus data. \
Defaults to '127.0.0.1'.
"""
db_defaults = {
'type': None,
'file': None,
'path': None,
'compress': False,
# compress flow table file
'influx_db': 'faucet',
# influx database name
'influx_host': 'localhost',
# influx database location
'influx_port': 8086,
'influx_user': '',
# influx username
'influx_pwd': '',
# influx password
'influx_timeout': 10,
# timeout on influx requests
'influx_retries': 3,
# attempts to retry influx request
# prometheus config
'prometheus_port': 9303,
'prometheus_addr': '0.0.0.0',
'prometheus_test_thread': False,
}
db_defaults_types = {
'type': str,
'file': str,
'path': str,
'compress': bool,
'influx_db': str,
'influx_host': str,
'influx_port': int,
'influx_user': str,
'influx_pwd': str,
'influx_timeout': int,
'influx_retries': int,
'prometheus_port': int,
'prometheus_addr': str,
'prometheus_test_thread': bool,
}
defaults = {
'name': None,
'type': None,
'dps': None,
'all_dps': False,
'interval': 30,
'db': None,
'dbs': None,
'db_type': 'text',
}
defaults_types = {
'name': str,
'type': str,
'dps': list,
'all_dps': bool,
'interval': int,
'db': str,
'dbs': list,
'db_type': str,
}
def __init__(self, _id, dp_id, conf, prom_client):
self.db = None # pylint: disable=invalid-name
self.dbs = None
self.dp = None # pylint: disable=invalid-name
self.all_dps = None
self.type = None
self.interval = None
self.db_type = None
self.dps = None
self.compress = None
self.file = None
self.path = None
self.influx_db = None
self.influx_host = None
self.influx_port = None
self.influx_user = None
self.influx_pwd = None
self.influx_timeout = None
self.influx_retries = None
self.name = None
self.prometheus_port = None
self.prometheus_addr = None
self.prometheus_test_thread = None
self.defaults.update(self.db_defaults)
self.defaults_types.update(self.db_defaults_types)
super(WatcherConf, self).__init__(_id, dp_id, conf)
self.name = str(self._id)
self.prom_client = prom_client
def add_db(self, db_conf):
"""Add database config to this watcher."""
self._check_conf_types(db_conf, self.db_defaults_types)
db_conf = deepcopy(db_conf)
db_type = db_conf.pop('type')
db_conf['db_type'] = db_type
self.update(db_conf)
test_config_condition(
self.file is not None and not
(os.path.dirname(self.file) and os.access(os.path.dirname(self.file), os.W_OK)),
'%s is not writable' % self.file)
test_config_condition(
self.path is not None and not os.access(self.path, os.W_OK),
'%s is not writable' % self.file)
def add_dp(self, dp): # pylint: disable=invalid-name
"""Add a datapath to this watcher."""
self.dp = dp # pylint: disable=invalid-name
def check_config(self):
super(WatcherConf, self).check_config()
test_config_condition(
self.all_dps and self.dps is not None,
'all_dps and dps cannot be set together')
test_config_condition(
not self.type, 'type must be set')
valid_types = {'flow_table', 'port_stats', 'port_state', 'meter_stats'}
test_config_condition(
self.type not in valid_types,
'type %s not one of %s' % (self.type, valid_types))
|
normtown/SickRage | refs/heads/master | lib/unidecode/x08c.py | 251 | data = (
'Yu ', # 0x00
'Shui ', # 0x01
'Shen ', # 0x02
'Diao ', # 0x03
'Chan ', # 0x04
'Liang ', # 0x05
'Zhun ', # 0x06
'Sui ', # 0x07
'Tan ', # 0x08
'Shen ', # 0x09
'Yi ', # 0x0a
'Mou ', # 0x0b
'Chen ', # 0x0c
'Die ', # 0x0d
'Huang ', # 0x0e
'Jian ', # 0x0f
'Xie ', # 0x10
'Nue ', # 0x11
'Ye ', # 0x12
'Wei ', # 0x13
'E ', # 0x14
'Yu ', # 0x15
'Xuan ', # 0x16
'Chan ', # 0x17
'Zi ', # 0x18
'An ', # 0x19
'Yan ', # 0x1a
'Di ', # 0x1b
'Mi ', # 0x1c
'Pian ', # 0x1d
'Xu ', # 0x1e
'Mo ', # 0x1f
'Dang ', # 0x20
'Su ', # 0x21
'Xie ', # 0x22
'Yao ', # 0x23
'Bang ', # 0x24
'Shi ', # 0x25
'Qian ', # 0x26
'Mi ', # 0x27
'Jin ', # 0x28
'Man ', # 0x29
'Zhe ', # 0x2a
'Jian ', # 0x2b
'Miu ', # 0x2c
'Tan ', # 0x2d
'Zen ', # 0x2e
'Qiao ', # 0x2f
'Lan ', # 0x30
'Pu ', # 0x31
'Jue ', # 0x32
'Yan ', # 0x33
'Qian ', # 0x34
'Zhan ', # 0x35
'Chen ', # 0x36
'Gu ', # 0x37
'Qian ', # 0x38
'Hong ', # 0x39
'Xia ', # 0x3a
'Jue ', # 0x3b
'Hong ', # 0x3c
'Han ', # 0x3d
'Hong ', # 0x3e
'Xi ', # 0x3f
'Xi ', # 0x40
'Huo ', # 0x41
'Liao ', # 0x42
'Han ', # 0x43
'Du ', # 0x44
'Long ', # 0x45
'Dou ', # 0x46
'Jiang ', # 0x47
'Qi ', # 0x48
'Shi ', # 0x49
'Li ', # 0x4a
'Deng ', # 0x4b
'Wan ', # 0x4c
'Bi ', # 0x4d
'Shu ', # 0x4e
'Xian ', # 0x4f
'Feng ', # 0x50
'Zhi ', # 0x51
'Zhi ', # 0x52
'Yan ', # 0x53
'Yan ', # 0x54
'Shi ', # 0x55
'Chu ', # 0x56
'Hui ', # 0x57
'Tun ', # 0x58
'Yi ', # 0x59
'Tun ', # 0x5a
'Yi ', # 0x5b
'Jian ', # 0x5c
'Ba ', # 0x5d
'Hou ', # 0x5e
'E ', # 0x5f
'Cu ', # 0x60
'Xiang ', # 0x61
'Huan ', # 0x62
'Jian ', # 0x63
'Ken ', # 0x64
'Gai ', # 0x65
'Qu ', # 0x66
'Fu ', # 0x67
'Xi ', # 0x68
'Bin ', # 0x69
'Hao ', # 0x6a
'Yu ', # 0x6b
'Zhu ', # 0x6c
'Jia ', # 0x6d
'[?] ', # 0x6e
'Xi ', # 0x6f
'Bo ', # 0x70
'Wen ', # 0x71
'Huan ', # 0x72
'Bin ', # 0x73
'Di ', # 0x74
'Zong ', # 0x75
'Fen ', # 0x76
'Yi ', # 0x77
'Zhi ', # 0x78
'Bao ', # 0x79
'Chai ', # 0x7a
'Han ', # 0x7b
'Pi ', # 0x7c
'Na ', # 0x7d
'Pi ', # 0x7e
'Gou ', # 0x7f
'Na ', # 0x80
'You ', # 0x81
'Diao ', # 0x82
'Mo ', # 0x83
'Si ', # 0x84
'Xiu ', # 0x85
'Huan ', # 0x86
'Kun ', # 0x87
'He ', # 0x88
'He ', # 0x89
'Mo ', # 0x8a
'Han ', # 0x8b
'Mao ', # 0x8c
'Li ', # 0x8d
'Ni ', # 0x8e
'Bi ', # 0x8f
'Yu ', # 0x90
'Jia ', # 0x91
'Tuan ', # 0x92
'Mao ', # 0x93
'Pi ', # 0x94
'Xi ', # 0x95
'E ', # 0x96
'Ju ', # 0x97
'Mo ', # 0x98
'Chu ', # 0x99
'Tan ', # 0x9a
'Huan ', # 0x9b
'Jue ', # 0x9c
'Bei ', # 0x9d
'Zhen ', # 0x9e
'Yuan ', # 0x9f
'Fu ', # 0xa0
'Cai ', # 0xa1
'Gong ', # 0xa2
'Te ', # 0xa3
'Yi ', # 0xa4
'Hang ', # 0xa5
'Wan ', # 0xa6
'Pin ', # 0xa7
'Huo ', # 0xa8
'Fan ', # 0xa9
'Tan ', # 0xaa
'Guan ', # 0xab
'Ze ', # 0xac
'Zhi ', # 0xad
'Er ', # 0xae
'Zhu ', # 0xaf
'Shi ', # 0xb0
'Bi ', # 0xb1
'Zi ', # 0xb2
'Er ', # 0xb3
'Gui ', # 0xb4
'Pian ', # 0xb5
'Bian ', # 0xb6
'Mai ', # 0xb7
'Dai ', # 0xb8
'Sheng ', # 0xb9
'Kuang ', # 0xba
'Fei ', # 0xbb
'Tie ', # 0xbc
'Yi ', # 0xbd
'Chi ', # 0xbe
'Mao ', # 0xbf
'He ', # 0xc0
'Bi ', # 0xc1
'Lu ', # 0xc2
'Ren ', # 0xc3
'Hui ', # 0xc4
'Gai ', # 0xc5
'Pian ', # 0xc6
'Zi ', # 0xc7
'Jia ', # 0xc8
'Xu ', # 0xc9
'Zei ', # 0xca
'Jiao ', # 0xcb
'Gai ', # 0xcc
'Zang ', # 0xcd
'Jian ', # 0xce
'Ying ', # 0xcf
'Xun ', # 0xd0
'Zhen ', # 0xd1
'She ', # 0xd2
'Bin ', # 0xd3
'Bin ', # 0xd4
'Qiu ', # 0xd5
'She ', # 0xd6
'Chuan ', # 0xd7
'Zang ', # 0xd8
'Zhou ', # 0xd9
'Lai ', # 0xda
'Zan ', # 0xdb
'Si ', # 0xdc
'Chen ', # 0xdd
'Shang ', # 0xde
'Tian ', # 0xdf
'Pei ', # 0xe0
'Geng ', # 0xe1
'Xian ', # 0xe2
'Mai ', # 0xe3
'Jian ', # 0xe4
'Sui ', # 0xe5
'Fu ', # 0xe6
'Tan ', # 0xe7
'Cong ', # 0xe8
'Cong ', # 0xe9
'Zhi ', # 0xea
'Ji ', # 0xeb
'Zhang ', # 0xec
'Du ', # 0xed
'Jin ', # 0xee
'Xiong ', # 0xef
'Shun ', # 0xf0
'Yun ', # 0xf1
'Bao ', # 0xf2
'Zai ', # 0xf3
'Lai ', # 0xf4
'Feng ', # 0xf5
'Cang ', # 0xf6
'Ji ', # 0xf7
'Sheng ', # 0xf8
'Ai ', # 0xf9
'Zhuan ', # 0xfa
'Fu ', # 0xfb
'Gou ', # 0xfc
'Sai ', # 0xfd
'Ze ', # 0xfe
'Liao ', # 0xff
)
|
endlessm/chromium-browser | refs/heads/master | third_party/grpc/src/src/python/grpcio_status/grpc_status/__init__.py | 90 | # Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
kk9599/django-cms | refs/heads/develop | cms/test_utils/project/urls_2.py | 47 | from cms.utils.conf import get_cms_setting
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^media/cms/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': get_cms_setting('MEDIA_ROOT'), 'show_indexes': True}),
url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += i18n_patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^example/$', 'cms.test_utils.project.placeholderapp.views.example_view'),
url(r'^', include('cms.urls')),
)
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
RachitKansal/scikit-learn | refs/heads/master | sklearn/datasets/olivetti_faces.py | 198 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
try:
# Python 2
import urllib2
urlopen = urllib2.urlopen
except ImportError:
# Python 3
import urllib.request
urlopen = urllib.request.urlopen
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from ..utils import check_random_state
from ..externals import joblib
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Read more in the :ref:`User Guide <olivetti_faces>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Returns
-------
An object with the following attributes:
data : numpy array of shape (400, 4096)
Each row corresponds to a ravelled face image of original size 64 x 64 pixels.
images : numpy array of shape (400, 64, 64)
Each row is a face image corresponding to one of the 40 subjects of the dataset.
target : numpy array of shape (400, )
Labels associated to each face image. Those labels are ranging from
0-39 and correspond to the Subject IDs.
DESCR : string
Description of the modified Olivetti Faces Dataset.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
mfile = loadmat(buf)
faces = mfile['faces'].T.copy()
joblib.dump(faces, join(data_home, TARGET_FILENAME), compress=6)
del mfile
else:
faces = joblib.load(join(data_home, TARGET_FILENAME))
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
|
m-ober/byceps | refs/heads/master | byceps/services/news/models/__init__.py | 12133432 | |
tugluck/galah | refs/heads/v0.2dev | galah/sheep/utility/__init__.py | 12133432 | |
gerrive/horizon | refs/heads/master | openstack_dashboard/dashboards/project/networks/ports/__init__.py | 12133432 | |
dhorelik/django-cms | refs/heads/develop | cms/tests/test_signals.py | 23 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from cms.api import create_page
from cms.models import UrlconfRevision
from cms.signals import urls_need_reloading
from cms.test_utils.testcases import CMSTestCase
APP_NAME = 'SampleApp'
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
@contextmanager
def signal_tester(signal):
env = SignalTester()
signal.connect(env, weak=True)
try:
yield env
finally:
signal.disconnect(env, weak=True)
class SignalTests(TestCase):
def test_urls_need_reloading_signal_create(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_delete(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
page.delete()
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_change_slug(self):
with signal_tester(urls_need_reloading) as env:
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
title = page.title_set.get(language="en")
title.slug += 'test'
title.save()
page.publish('en')
self.client.get('/')
self.assertEqual(env.call_count, 2)
@override_settings(
MIDDLEWARE_CLASSES=[
'cms.middleware.utils.ApphookReloadMiddleware'
] + settings.MIDDLEWARE_CLASSES,
)
class ApphooksReloadTests(CMSTestCase):
def test_urls_reloaded(self):
"""
Tests that URLs are automatically reloaded when the ApphookReload
middleware is installed.
"""
#
# Sets up an apphook'ed page, but does not yet publish it.
#
superuser = get_user_model().objects.create_superuser(
'admin', '[email protected]', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
page.publish('en')
app_page = create_page("app_page", "nav_playground.html", "en",
created_by=superuser, parent=page,
published=False, apphook="SampleApp")
self.client.get('/') # Required to invoke the middleware
#
# Gets the current urls revision for testing against later.
#
current_revision, _ = UrlconfRevision.get_or_create_revision()
#
# Publishes the apphook. This is one of many ways to trigger the
# firing of the signal. The tests above test some of the other ways
# already.
#
app_page.publish('en')
self.client.get('/') # Required to invoke the middleware
# And, this should result in a the updating of the UrlconfRevision
new_revision, _ = UrlconfRevision.get_or_create_revision()
self.assertNotEquals(current_revision, new_revision)
|
adityapb/pyspace | refs/heads/master | pyspace/tests/test_vtk.py | 1 | from pyspace.utils import dump_vtk
import numpy
from pyspace.planet import PlanetArray
from pyspace.simulator import BruteForceSimulator
import unittest
class TestVTK(unittest.TestCase):
def setUp(self):
x, y, z = numpy.mgrid[0:1:0.04, 0:1:0.04, 0:1:0.04]
self.x = x.ravel(); self.y = y.ravel(); self.z = z.ravel()
self.pa = PlanetArray(x=self.x, y=self.y, z=self.z)
def test_vtk_dump(self):
dump_vtk(self.pa, "points")
def test_simulator_custom_vtk_dump(self):
sim = BruteForceSimulator(self.pa, G = 1, dt = 1, sim_name = "test_vtk")
sim.set_data(a_x = 'a_x')
sim.simulate(1, dump_output = True)
|
cenkbircanoglu/similarityPy | refs/heads/master | tests/algorihtm_tests/standart_deviation_test.py | 2 | from unittest import TestCase
from similarityPy.algorithms.standart_deviation import StandartDeviation
from tests import test_logger
__author__ = 'cenk'
class StandartDeviationTest(TestCase):
def setUp(self):
pass
def test_algorithm_with_list(self):
test_logger.debug("StandartDeviationTest - test_algorithm_with_list Starts")
standart_deviation = StandartDeviation()
data_list = [1, 2, 3, 4, 5]
self.assertEquals(1.5811, standart_deviation.calculate(data_list))
data_list = [1, 2, 3, 4]
self.assertEquals(1.291, standart_deviation.calculate(data_list))
test_logger.debug("StandartDeviationTest - test_algorithm_with_list Ends")
def test_algorithm_with_tuple(self):
test_logger.debug("StandartDeviationTest - test_algorithm_with_tuple Starts")
standart_deviation = StandartDeviation()
data_list = [("a", 1), ("b", 2), ("c", 3), ( "d", 4), ("e", 5)]
self.assertEquals(1.5811, standart_deviation.calculate(data_list, is_tuple=True, index=1))
data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4), ("e", "e", 5)]
self.assertEquals(1.5811, standart_deviation.calculate(data_list, is_tuple=True, index=2))
data_list = [("a", "a", 1), ("b", "b", 2), ("c", "c", 3), ("d", "d", 4)]
self.assertEquals(1.291, standart_deviation.calculate(data_list, is_tuple=True, index=2))
test_logger.debug("StandartDeviationTest - test_algorithm_with_tuple Ends") |
pinterb/st2 | refs/heads/master | st2debug/st2debug/cmd/submit_debug_info.py | 2 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script submits information which helps StackStorm employees debug different
user problems and issues to StackStorm.
By default the following information is included:
- Logs from /var/log/st2
- StackStorm and mistral config file (/etc/st2/st2.conf, /etc/mistral/mistral.conf)
- All the content (integration packs).
- Information about your system and StackStorm installation (Operating system,
Python version, StackStorm version, Mistral version)
Note: This script currently assumes it's running on Linux.
"""
import os
import sys
import shutil
import socket
import logging
import tarfile
import argparse
import platform
import tempfile
import datetime
import httplib
import six
import yaml
import gnupg
import requests
from distutils.spawn import find_executable
import st2common
from st2common.content.utils import get_packs_base_paths
from st2common import __version__ as st2_version
from st2common import config
from st2debug.constants import GPG_KEY
from st2debug.constants import GPG_KEY_FINGERPRINT
from st2debug.constants import S3_BUCKET_URL
from st2debug.utils.fs import copy_files
from st2debug.utils.fs import get_full_file_list
from st2debug.utils.fs import get_dirs_in_path
from st2debug.utils.fs import remove_file
from st2debug.utils.system_info import get_cpu_info
from st2debug.utils.system_info import get_memory_info
from st2debug.utils.system_info import get_package_list
from st2debug.utils.git_utils import get_repo_latest_revision_hash
from st2debug.processors import process_st2_config
from st2debug.processors import process_mistral_config
from st2debug.processors import process_content_pack_dir
LOG = logging.getLogger(__name__)
# Constants
GPG_INSTALLED = find_executable('gpg') is not None
ST2_LOG_FILES_PATH = '/var/log/st2/*.log'
MISTRAL_LOG_FILES_PATH = '/var/log/mistral*.log'
LOG_FILE_PATHS = [
ST2_LOG_FILES_PATH,
MISTRAL_LOG_FILES_PATH
]
ST2_CONFIG_FILE_PATH = '/etc/st2/st2.conf'
MISTRAL_CONFIG_FILE_PATH = '/etc/mistral/mistral.conf'
ST2_CONFIG_FILE_NAME = os.path.split(ST2_CONFIG_FILE_PATH)[1]
MISTRAL_CONFIG_FILE_NAME = os.path.split(MISTRAL_CONFIG_FILE_PATH)[1]
CONFIG_FILE_PATHS = [
ST2_CONFIG_FILE_PATH,
MISTRAL_CONFIG_FILE_PATH
]
# Directory structure inside tarball
DIRECTORY_STRUCTURE = [
'configs/',
'logs/',
'content/'
]
# Options which should be removed from the st2 config
ST2_CONF_OPTIONS_TO_REMOVE = {
'database': ['username', 'password'],
'messaging': ['url']
}
REMOVE_VALUE_NAME = '**removed**'
OUTPUT_FILENAME_TEMPLATE = 'st2-debug-output-%(hostname)s-%(date)s.tar.gz'
try:
config.parse_args(args=[])
except Exception:
pass
def setup_logging():
root = LOG
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
def get_system_information():
"""
Retrieve system information which is included in the report.
:rtype: ``dict``
"""
system_information = {
'hostname': socket.gethostname(),
'operating_system': {},
'hardware': {
'cpu': {},
'memory': {}
},
'python': {},
'stackstorm': {},
'mistral': {}
}
# Operating system information
system_information['operating_system']['system'] = platform.system()
system_information['operating_system']['release'] = platform.release()
system_information['operating_system']['operating_system'] = platform.platform()
system_information['operating_system']['platform'] = platform.system()
system_information['operating_system']['architecture'] = ' '.join(platform.architecture())
if platform.system().lower() == 'linux':
distribution = ' '.join(platform.linux_distribution())
system_information['operating_system']['distribution'] = distribution
system_information['python']['version'] = sys.version.split('\n')[0]
# Hardware information
cpu_info = get_cpu_info()
if cpu_info:
core_count = len(cpu_info)
model = cpu_info[0]['model_name']
system_information['hardware']['cpu'] = {
'core_count': core_count,
'model_name': model
}
else:
# Unsupported platform
system_information['hardware']['cpu'] = 'unsupported platform'
memory_info = get_memory_info()
if memory_info:
total = memory_info['MemTotal'] / 1024
free = memory_info['MemFree'] / 1024
used = (total - free)
system_information['hardware']['memory'] = {
'total': total,
'used': used,
'free': free
}
else:
# Unsupported platform
system_information['hardware']['memory'] = 'unsupported platform'
# StackStorm information
system_information['stackstorm']['version'] = st2_version
st2common_path = st2common.__file__
st2common_path = os.path.dirname(st2common_path)
if 'st2common/st2common' in st2common_path:
# Assume we are running source install
base_install_path = st2common_path.replace('/st2common/st2common', '')
revision_hash = get_repo_latest_revision_hash(repo_path=base_install_path)
system_information['stackstorm']['installation_method'] = 'source'
system_information['stackstorm']['revision_hash'] = revision_hash
else:
package_list = get_package_list(name_startswith='st2')
system_information['stackstorm']['installation_method'] = 'package'
system_information['stackstorm']['packages'] = package_list
# Mistral information
repo_path = '/opt/openstack/mistral'
revision_hash = get_repo_latest_revision_hash(repo_path=repo_path)
system_information['mistral']['installation_method'] = 'source'
system_information['mistral']['revision_hash'] = revision_hash
return system_information
def create_archive(include_logs, include_configs, include_content, include_system_info,
debug=False):
"""
Create an archive with debugging information.
:return: Path to the generated archive.
:rtype: ``str``
"""
date = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
values = {'hostname': socket.gethostname(), 'date': date}
output_file_name = OUTPUT_FILENAME_TEMPLATE % values
output_file_path = os.path.join('/tmp', output_file_name)
# 1. Create temporary directory with the final directory structure where we will move files
# which will be processed and included in the tarball
temp_dir_path = tempfile.mkdtemp()
output_paths = {
'logs': os.path.join(temp_dir_path, 'logs/'),
'configs': os.path.join(temp_dir_path, 'configs/'),
'content': os.path.join(temp_dir_path, 'content/'),
'system_info': os.path.join(temp_dir_path, 'system_info.yaml')
}
for directory_name in DIRECTORY_STRUCTURE:
full_path = os.path.join(temp_dir_path, directory_name)
os.mkdir(full_path)
# 2. Moves all the files to the temporary directory
LOG.info('Collecting files...')
# Logs
if include_logs:
LOG.debug('Including log files')
for file_path_glob in LOG_FILE_PATHS:
log_file_list = get_full_file_list(file_path_glob=file_path_glob)
copy_files(file_paths=log_file_list, destination=output_paths['logs'])
# Config files
if include_configs:
LOG.debug('Including config files')
copy_files(file_paths=CONFIG_FILE_PATHS, destination=output_paths['configs'])
# Content
if include_content:
LOG.debug('Including content')
packs_base_paths = get_packs_base_paths()
for index, packs_base_path in enumerate(packs_base_paths, 1):
dst = os.path.join(output_paths['content'], 'dir-%s' % (index))
try:
shutil.copytree(src=packs_base_path, dst=dst)
except IOError:
continue
# System information
if include_system_info:
LOG.debug('Including system info')
system_information = get_system_information()
system_information = yaml.dump(system_information, default_flow_style=False)
with open(output_paths['system_info'], 'w') as fp:
fp.write(system_information)
# Configs
st2_config_path = os.path.join(output_paths['configs'], ST2_CONFIG_FILE_NAME)
process_st2_config(config_path=st2_config_path)
mistral_config_path = os.path.join(output_paths['configs'], MISTRAL_CONFIG_FILE_NAME)
process_mistral_config(config_path=mistral_config_path)
# Content
base_pack_dirs = get_dirs_in_path(file_path=output_paths['content'])
for base_pack_dir in base_pack_dirs:
pack_dirs = get_dirs_in_path(file_path=base_pack_dir)
for pack_dir in pack_dirs:
process_content_pack_dir(pack_dir=pack_dir)
# 4. Create a tarball
LOG.info('Creating tarball...')
with tarfile.open(output_file_path, 'w:gz') as tar:
for file_path in output_paths.values():
file_path = os.path.normpath(file_path)
source_dir = file_path
if '.' in file_path:
arcname = os.path.basename(file_path)
else:
arcname = os.path.split(file_path)[-1]
tar.add(source_dir, arcname=arcname)
return output_file_path
def encrypt_archive(archive_file_path, debug=False):
"""
Encrypt archive with debugging information using our public key.
:param archive_file_path: Path to the non-encrypted tarball file.
:type archive_file_path: ``str``
:return: Path to the encrypted archive.
:rtype: ``str``
"""
assert(archive_file_path.endswith('.tar.gz'))
LOG.info('Encrypting tarball...')
gpg = gnupg.GPG(verbose=debug)
# Import our public key
import_result = gpg.import_keys(GPG_KEY)
# pylint: disable=no-member
assert(import_result.count == 1)
encrypted_archive_output_file_path = archive_file_path + '.asc'
with open(archive_file_path, 'rb') as fp:
gpg.encrypt_file(fp,
recipients=GPG_KEY_FINGERPRINT,
always_trust=True,
output=encrypted_archive_output_file_path)
return encrypted_archive_output_file_path
def upload_archive(archive_file_path):
assert(archive_file_path.endswith('.asc'))
LOG.debug('Uploading tarball...')
files = {'file': open(archive_file_path, 'rb')}
file_name = os.path.split(archive_file_path)[1]
url = S3_BUCKET_URL + file_name
assert url.startswith('https://')
response = requests.put(url=url, files=files)
assert response.status_code == httplib.OK
def create_and_review_archive(include_logs, include_configs, include_content, include_system_info,
debug=False):
try:
plain_text_output_path = create_archive(include_logs=include_logs,
include_configs=include_configs,
include_content=include_content,
include_system_info=include_system_info)
except Exception:
LOG.exception('Failed to generate tarball', exc_info=True)
else:
LOG.info('Debug tarball successfully generated and can be reviewed at: %s' %
(plain_text_output_path))
def create_and_upload_archive(include_logs, include_configs, include_content, include_system_info,
debug=False):
try:
plain_text_output_path = create_archive(include_logs=include_logs,
include_configs=include_configs,
include_content=include_content,
include_system_info=include_system_info)
encrypted_output_path = encrypt_archive(archive_file_path=plain_text_output_path)
upload_archive(archive_file_path=encrypted_output_path)
except Exception:
LOG.exception('Failed to upload tarball to StackStorm', exc_info=True)
plain_text_output_path = None
encrypted_output_path = None
else:
tarball_name = os.path.basename(encrypted_output_path)
LOG.info('Debug tarball successfully uploaded to StackStorm (name=%s)' % (tarball_name))
LOG.info('When communicating with support, please let them know the tarball name - %s' %
(tarball_name))
finally:
# Remove tarballs
if plain_text_output_path:
assert(plain_text_output_path.startswith('/tmp'))
remove_file(file_path=plain_text_output_path)
if encrypted_output_path:
assert(encrypted_output_path.startswith('/tmp'))
remove_file(file_path=encrypted_output_path)
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--exclude-logs', action='store_true', default=False,
help='Don\'t include logs in the generated tarball')
parser.add_argument('--exclude-configs', action='store_true', default=False,
help='Don\'t include configs in the generated tarball')
parser.add_argument('--exclude-content', action='store_true', default=False,
help='Don\'t include content packs in the generated tarball')
parser.add_argument('--exclude-system-info', action='store_true', default=False,
help='Don\'t include system information in the generated tarball')
parser.add_argument('--yes', action='store_true', default=False,
help='Run in non-interative mode and answer "yes" to all the questions')
parser.add_argument('--review', action='store_true', default=False,
help='Generate the tarball, but don\'t encrypt and upload it')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug mode')
args = parser.parse_args()
arg_names = ['exclude_logs', 'exclude_configs', 'exclude_content',
'exclude_system_info']
abort = True
for arg_name in arg_names:
value = getattr(args, arg_name, False)
abort &= value
if abort:
print('Generated tarball would be empty. Aborting.')
sys.exit(2)
submited_content = [name.replace('exclude_', '') for name in arg_names if
not getattr(args, name, False)]
submited_content = ', '.join(submited_content)
if not args.yes and not args.review:
# When not running in review mode, GPG needs to be installed and
# available
if not GPG_INSTALLED:
msg = ('"gpg" binary not found, can\'t proceed. Make sure "gpg" is installed '
'and available in PATH.')
raise ValueError(msg)
print('This will submit the following information to StackStorm: %s' % (submited_content))
value = six.moves.input('Are you sure you want to proceed? [y/n] ')
if value.strip().lower() not in ['y', 'yes']:
print('Aborting')
sys.exit(1)
setup_logging()
if args.review:
create_and_review_archive(include_logs=not args.exclude_logs,
include_configs=not args.exclude_configs,
include_content=not args.exclude_content,
include_system_info=not args.exclude_system_info,
debug=args.debug)
else:
create_and_upload_archive(include_logs=not args.exclude_logs,
include_configs=not args.exclude_configs,
include_content=not args.exclude_content,
include_system_info=not args.exclude_system_info,
debug=args.debug)
|
SimenB/thefuck | refs/heads/master | tests/rules/test_brew_update_formula.py | 4 | import pytest
from thefuck.types import Command
from thefuck.rules.brew_update_formula import get_new_command, match
output = ("Error: This command updates brew itself, and does not take formula"
" names.\nUse 'brew upgrade thefuck'.")
def test_match():
command = Command('brew update thefuck', output)
assert match(command)
@pytest.mark.parametrize('script', [
'brew upgrade foo',
'brew update'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, formula, ', [
('brew update foo', 'foo'),
('brew update bar zap', 'bar zap')])
def test_get_new_command(script, formula):
command = Command(script, output)
new_command = 'brew upgrade {}'.format(formula)
assert get_new_command(command) == new_command
|
michaelBenin/autopep8 | refs/heads/master | test/acid_github.py | 1 | #!/usr/bin/env python
"""Run acid test against latest repositories on Github."""
import os
import re
import subprocess
import sys
import acid
TMP_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'github_tmp')
def latest_repositories():
"""Return names of latest released repositories on Github."""
import requests
try:
for result in requests.get('https://github.com/timeline.json').json():
try:
repository = result['repository']
size = repository['size']
if 0 < size < 1000 and repository['language'] == 'Python':
yield repository['url']
except KeyError:
continue
except (requests.exceptions.RequestException, ValueError):
# Ignore GitHub server flakiness.
pass
def download_repository(name, output_directory):
"""Download repository to output_directory.
Raise CalledProcessError on failure.
"""
subprocess.check_call(['git', 'clone', name],
cwd=output_directory)
def interesting(repository_path):
"""Return True if interesting."""
print(repository_path)
process = subprocess.Popen(['git', 'log'],
cwd=repository_path,
stdout=subprocess.PIPE)
try:
return len(re.findall(
'pep8',
process.communicate()[0].decode('utf-8'))) > 2
except UnicodeDecodeError:
return False
def complete(repository):
"""Fill in missing paths of URL."""
if ':' in repository:
return repository
else:
assert '/' in repository
return 'https://github.com/' + repository.strip()
def main():
"""Run main."""
try:
os.mkdir(TMP_DIR)
except OSError:
pass
args = acid.process_args()
if args.paths:
names = [complete(a) for a in args.paths]
else:
names = None
checked_repositories = []
skipped_repositories = []
interesting_repositories = []
while True:
if args.paths:
if not names:
break
else:
while not names:
# Continually populate if user did not specify a repository
# explicitly.
names = [p for p in latest_repositories()
if p not in checked_repositories and
p not in skipped_repositories]
if not names:
import time
time.sleep(1)
repository_name = names.pop(0)
print(repository_name)
user_tmp_dir = os.path.join(
TMP_DIR,
os.path.basename(os.path.split(repository_name)[0]))
try:
os.mkdir(user_tmp_dir)
except OSError:
pass
repository_tmp_dir = os.path.join(
user_tmp_dir,
os.path.basename(repository_name))
try:
os.mkdir(repository_tmp_dir)
except OSError:
print('Skipping already checked repository')
skipped_repositories.append(repository_name)
continue
try:
download_repository(repository_name,
output_directory=repository_tmp_dir)
except subprocess.CalledProcessError:
print('ERROR: git clone failed')
continue
if acid.check([repository_tmp_dir], args):
checked_repositories.append(repository_name)
if interesting(
os.path.join(repository_tmp_dir,
os.path.basename(repository_name))):
interesting_repositories.append(repository_name)
else:
return 1
if checked_repositories:
print('\nTested repositories:')
for name in checked_repositories:
print(' ' + name +
(' *' if name in interesting_repositories else ''))
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.