repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
twitter/pants | src/python/pants/base/workunit.py | 1 | 9444 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import time
import uuid
from builtins import object, range
from collections import namedtuple
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_method
from pants.util.rwbuf import FileBackedRWBuf
class WorkUnitLabel(object):
"""
:API: public
"""
# Labels describing a workunit. Reporting code can use this to decide how to display
# information about this workunit.
#
# Note that a workunit can have multiple labels where this makes sense, e.g., TOOL, COMPILER
# and NAILGUN.
SETUP = 'SETUP' # Parsing build files etc.
GOAL = 'GOAL' # Executing a goal.
TASK = 'TASK' # Executing a task within a goal.
GROUP = 'GROUP' # Executing a group.
BOOTSTRAP = 'BOOTSTRAP' # Invocation of code to fetch a tool.
TOOL = 'TOOL' # Single invocations of a tool.
MULTITOOL = 'MULTITOOL' # Multiple consecutive invocations of the same tool.
COMPILER = 'COMPILER' # Invocation of a compiler.
LINKER = 'LINKER' # Invocation of a linker.
TEST = 'TEST' # Running a test.
JVM = 'JVM' # Running a tool via the JVM.
NAILGUN = 'NAILGUN' # Running a tool via nailgun.
RUN = 'RUN' # Running a binary.
REPL = 'REPL' # Running a repl.
PREP = 'PREP' # Running a prep command
LINT = 'LINT' # Running a lint or static analysis tool.
# Do not attempt to print workunit's label upon invocation
# This has nothing to do with a process's own stderr/stdout.
SUPPRESS_LABEL = 'SUPPRESS_LABEL'
@classmethod
@memoized_method
def keys(cls):
"""
:API: public
"""
return [key for key in dir(cls) if not key.startswith('_') and key.isupper()]
class WorkUnit(object):
"""A hierarchical unit of work, for the purpose of timing and reporting.
A WorkUnit can be subdivided into further WorkUnits. The WorkUnit concept is deliberately
decoupled from the goal/task hierarchy. This allows some flexibility in having, say,
sub-units inside a task. E.g., there might be one WorkUnit representing an entire pants run,
and that can be subdivided into WorkUnits for each goal. Each of those can be subdivided into
WorkUnits for each task, and a task can subdivide that into further work units, if finer-grained
timing and reporting is needed.
:API: public
"""
# The outcome of a workunit.
# It can only be set to a new value <= the old one.
ABORTED = 0
FAILURE = 1
WARNING = 2
SUCCESS = 3
UNKNOWN = 4
# Generic workunit log config.
# log_level: Display log messages up to this level.
# color: log color settings.
LogConfig = namedtuple('LogConfig', ['level', 'colors'])
@staticmethod
def outcome_string(outcome):
"""Returns a human-readable string describing the outcome.
:API: public
"""
return ['ABORTED', 'FAILURE', 'WARNING', 'SUCCESS', 'UNKNOWN'][outcome]
def __init__(self, run_info_dir, parent, name, labels=None, cmd='', log_config=None):
"""
- run_info_dir: The path of the run_info_dir from the RunTracker that tracks this WorkUnit.
- parent: The containing workunit, if any. E.g., 'compile' might contain 'java', 'scala' etc.,
'scala' might contain 'compile', 'split' etc.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple of registered options affecting reporting output.
"""
self._outcome = WorkUnit.UNKNOWN
self.run_info_dir = run_info_dir
self.parent = parent
self.children = []
self.name = name
self.labels = set(labels or ())
self.cmd = cmd
self.id = uuid.uuid4()
self.log_config = log_config
# In seconds since the epoch. Doubles, to account for fractional seconds.
self.start_time = 0
self.end_time = 0
# A workunit may have multiple outputs, which we identify by a name.
# E.g., a tool invocation may have 'stdout', 'stderr', 'debug_log' etc.
self._outputs = {} # name -> output buffer.
self._output_paths = {}
# Do this last, as the parent's _self_time() might get called before we're
# done initializing ourselves.
# TODO: Ensure that a parent can't be ended before all its children are.
if self.parent:
if not log_config:
self.log_config = self.parent.log_config
self.parent.children.append(self)
def has_label(self, label):
"""
:API: public
"""
return label in self.labels
def start(self, start_time=None):
"""Mark the time at which this workunit started."""
self.start_time = start_time or time.time()
def end(self):
"""Mark the time at which this workunit ended."""
self.end_time = time.time()
return self.path(), self.duration(), self._self_time(), self.has_label(WorkUnitLabel.TOOL)
def cleanup(self):
"""Cleanup by closing all output streams."""
for output in self._outputs.values():
output.close()
def outcome(self):
"""Returns the outcome of this workunit.
:API: public
"""
return self._outcome
def set_outcome(self, outcome):
"""Set the outcome of this work unit.
We can set the outcome on a work unit directly, but that outcome will also be affected by
those of its subunits. The right thing happens: The outcome of a work unit is the
worst outcome of any of its subunits and any outcome set on it directly."""
if outcome not in range(0, 5):
raise Exception('Invalid outcome: {}'.format(outcome))
if outcome < self._outcome:
self._outcome = outcome
if self.parent: self.parent.set_outcome(self._outcome)
_valid_name_re = re.compile(r'\w+')
def output(self, name):
"""Returns the output buffer for the specified output name (e.g., 'stdout'), creating it if
necessary.
:API: public
"""
m = WorkUnit._valid_name_re.match(name)
if not m or m.group(0) != name:
raise Exception('Invalid output name: {}'.format(name))
if name not in self._outputs:
workunit_name = re.sub(r'\W', '_', self.name)
path = os.path.join(self.run_info_dir,
'tool_outputs', '{workunit_name}-{id}.{output_name}'
.format(workunit_name=workunit_name,
id=self.id,
output_name=name))
safe_mkdir_for(path)
self._outputs[name] = FileBackedRWBuf(path)
self._output_paths[name] = path
return self._outputs[name]
def outputs(self):
"""Returns the map of output name -> output buffer.
:API: public
"""
return self._outputs
def output_paths(self):
"""Returns the map of output name -> path of the output file.
:API: public
"""
return self._output_paths
def duration(self):
"""Returns the time (in fractional seconds) spent in this workunit and its children.
:API: public
"""
return (self.end_time or time.time()) - self.start_time
@property
def start_time_string(self):
"""A convenient string representation of start_time.
:API: public
"""
return time.strftime('%H:%M:%S', time.localtime(self.start_time))
@property
def start_delta_string(self):
"""A convenient string representation of how long after the run started we started.
:API: public
"""
delta = int(self.start_time) - int(self.root().start_time)
return '{:02}:{:02}'.format(int(delta / 60), delta % 60)
def root(self):
"""
:API: public
"""
ret = self
while ret.parent is not None:
ret = ret.parent
return ret
def ancestors(self):
"""Returns a list consisting of this workunit and those enclosing it, up to the root.
:API: public
"""
ret = []
workunit = self
while workunit is not None:
ret.append(workunit)
workunit = workunit.parent
return ret
def path(self):
"""Returns a path string for this workunit, E.g., 'all:compile:jvm:scalac'.
:API: public
"""
return ':'.join(reversed([w.name for w in self.ancestors()]))
def unaccounted_time(self):
"""Returns non-leaf time spent in this workunit.
This assumes that all major work should be done in leaves.
TODO: Is this assumption valid?
:API: public
"""
return 0 if len(self.children) == 0 else self._self_time()
def to_dict(self):
"""Useful for providing arguments to templates.
:API: public
"""
ret = {}
for key in ['name', 'cmd', 'id', 'start_time', 'end_time',
'outcome', 'start_time_string', 'start_delta_string']:
val = getattr(self, key)
ret[key] = val() if hasattr(val, '__call__') else val
ret['parent'] = self.parent.to_dict() if self.parent else None
return ret
def _self_time(self):
"""Returns the time spent in this workunit outside of any children."""
return self.duration() - sum([child.duration() for child in self.children])
| apache-2.0 | 8,871,256,780,476,791,000 | 31.453608 | 98 | 0.64443 | false |
Aravinthu/odoo | addons/website_sale/models/product.py | 1 | 11241 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.addons import decimal_precision as dp
from odoo.tools import pycompat
from odoo.tools.translate import html_translate
class ProductStyle(models.Model):
_name = "product.style"
name = fields.Char(string='Style Name', required=True)
html_class = fields.Char(string='HTML Classes')
class ProductPricelist(models.Model):
_inherit = "product.pricelist"
def _default_website(self):
return self.env['website'].search([], limit=1)
website_id = fields.Many2one('website', string="website", default=_default_website)
code = fields.Char(string='E-commerce Promotional Code', groups="base.group_user")
selectable = fields.Boolean(help="Allow the end user to choose this price list")
def clear_cache(self):
# website._get_pl() is cached to avoid to recompute at each request the
# list of available pricelists. So, we need to invalidate the cache when
# we change the config of website price list to force to recompute.
website = self.env['website']
website._get_pl_partner_order.clear_cache(website)
@api.model
def create(self, data):
res = super(ProductPricelist, self).create(data)
self.clear_cache()
return res
@api.multi
def write(self, data):
res = super(ProductPricelist, self).write(data)
self.clear_cache()
return res
@api.multi
def unlink(self):
res = super(ProductPricelist, self).unlink()
self.clear_cache()
return res
class ProductPublicCategory(models.Model):
_name = "product.public.category"
_inherit = ["website.seo.metadata"]
_description = "Website Product Category"
_order = "sequence, name"
name = fields.Char(required=True, translate=True)
parent_id = fields.Many2one('product.public.category', string='Parent Category', index=True)
child_id = fields.One2many('product.public.category', 'parent_id', string='Children Categories')
sequence = fields.Integer(help="Gives the sequence order when displaying a list of product categories.")
# NOTE: there is no 'default image', because by default we don't show
# thumbnails for categories. However if we have a thumbnail for at least one
# category, then we display a default image on the other, so that the
# buttons have consistent styling.
# In this case, the default image is set by the js code.
image = fields.Binary(attachment=True, help="This field holds the image used as image for the category, limited to 1024x1024px.")
image_medium = fields.Binary(string='Medium-sized image', attachment=True,
help="Medium-sized image of the category. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary(string='Small-sized image', attachment=True,
help="Small-sized image of the category. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ProductPublicCategory, self).write(vals)
@api.constrains('parent_id')
def check_parent_id(self):
if not self._check_recursion():
raise ValueError(_('Error ! You cannot create recursive categories.'))
@api.multi
def name_get(self):
res = []
for category in self:
names = [category.name]
parent_category = category.parent_id
while parent_category:
names.append(parent_category.name)
parent_category = parent_category.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
class ProductTemplate(models.Model):
_inherit = ["product.template", "website.seo.metadata", 'website.published.mixin', 'rating.mixin']
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
_mail_post_access = 'read'
website_description = fields.Html('Description for the website', sanitize_attributes=False, translate=html_translate)
alternative_product_ids = fields.Many2many('product.template', 'product_alternative_rel', 'src_id', 'dest_id',
string='Alternative Products', help='Suggest more expensive alternatives to '
'your customers (upsell strategy). Those products show up on the product page.')
accessory_product_ids = fields.Many2many('product.product', 'product_accessory_rel', 'src_id', 'dest_id',
string='Accessory Products', help='Accessories show up when the customer reviews the '
'cart before paying (cross-sell strategy, e.g. for computers: mouse, keyboard, etc.). '
'An algorithm figures out a list of accessories based on all the products added to cart.')
website_size_x = fields.Integer('Size X', default=1)
website_size_y = fields.Integer('Size Y', default=1)
website_style_ids = fields.Many2many('product.style', string='Styles')
website_sequence = fields.Integer('Website Sequence', help="Determine the display order in the Website E-commerce",
default=lambda self: self._default_website_sequence())
public_categ_ids = fields.Many2many('product.public.category', string='Website Product Category',
help="Categories can be published on the Shop page (online catalog grid) to help "
"customers find all the items within a category. To publish them, go to the Shop page, "
"hit Customize and turn *Product Categories* on. A product can belong to several categories.")
product_image_ids = fields.One2many('product.image', 'product_tmpl_id', string='Images')
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
def _website_price(self):
# First filter out the ones that have no variant:
# This makes sure that every template below has a corresponding product in the zipped result.
self = self.filtered('product_variant_id')
# use mapped who returns a recordset with only itself to prefetch (and don't prefetch every product_variant_ids)
for template, product in pycompat.izip(self, self.mapped('product_variant_id')):
template.website_price = product.website_price
template.website_public_price = product.website_public_price
def _default_website_sequence(self):
self._cr.execute("SELECT MIN(website_sequence) FROM %s" % self._table)
min_sequence = self._cr.fetchone()[0]
return min_sequence and min_sequence - 1 or 10
def set_sequence_top(self):
self.website_sequence = self.sudo().search([], order='website_sequence desc', limit=1).website_sequence + 1
def set_sequence_bottom(self):
self.website_sequence = self.sudo().search([], order='website_sequence', limit=1).website_sequence - 1
def set_sequence_up(self):
previous_product_tmpl = self.sudo().search(
[('website_sequence', '>', self.website_sequence), ('website_published', '=', self.website_published)],
order='website_sequence', limit=1)
if previous_product_tmpl:
previous_product_tmpl.website_sequence, self.website_sequence = self.website_sequence, previous_product_tmpl.website_sequence
else:
self.set_sequence_top()
def set_sequence_down(self):
next_prodcut_tmpl = self.search([('website_sequence', '<', self.website_sequence), ('website_published', '=', self.website_published)], order='website_sequence desc', limit=1)
if next_prodcut_tmpl:
next_prodcut_tmpl.website_sequence, self.website_sequence = self.website_sequence, next_prodcut_tmpl.website_sequence
else:
return self.set_sequence_bottom()
@api.multi
def _compute_website_url(self):
super(ProductTemplate, self)._compute_website_url()
for product in self:
product.website_url = "/shop/product/%s" % (product.id,)
class Product(models.Model):
_inherit = "product.product"
website_price = fields.Float('Website price', compute='_website_price', digits=dp.get_precision('Product Price'))
website_public_price = fields.Float('Website public price', compute='_website_price', digits=dp.get_precision('Product Price'))
def _website_price(self):
qty = self._context.get('quantity', 1.0)
partner = self.env.user.partner_id
current_website = self.env['website'].get_current_website()
pricelist = current_website.get_current_pricelist()
company_id = current_website.company_id
context = dict(self._context, pricelist=pricelist.id, partner=partner)
self2 = self.with_context(context) if self._context != context else self
ret = self.env.user.has_group('sale.group_show_price_subtotal') and 'total_excluded' or 'total_included'
for p, p2 in pycompat.izip(self, self2):
taxes = partner.property_account_position_id.map_tax(p.taxes_id.filtered(lambda x: x.company_id == company_id))
p.website_price = taxes.compute_all(p2.price, pricelist.currency_id, quantity=qty, product=p2, partner=partner)[ret]
p.website_public_price = taxes.compute_all(p2.lst_price, quantity=qty, product=p2, partner=partner)[ret]
@api.multi
def website_publish_button(self):
self.ensure_one()
return self.product_tmpl_id.website_publish_button()
class ProductAttribute(models.Model):
_inherit = "product.attribute"
type = fields.Selection([('radio', 'Radio'), ('select', 'Select'), ('color', 'Color')], default='radio')
class ProductAttributeValue(models.Model):
_inherit = "product.attribute.value"
html_color = fields.Char(string='HTML Color Index', oldname='color', help="Here you can set a "
"specific HTML color index (e.g. #ff0000) to display the color on the website if the "
"attibute type is 'Color'.")
class ProductImage(models.Model):
_name = 'product.image'
name = fields.Char('Name')
image = fields.Binary('Image', attachment=True)
product_tmpl_id = fields.Many2one('product.template', 'Related Product', copy=True)
| agpl-3.0 | -4,400,069,424,017,847,000 | 48.96 | 183 | 0.649675 | false |
jumpstarter-io/horizon | openstack_dashboard/dashboards/project/access_and_security/tests.py | 1 | 7431 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy # noqa
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security \
import api_access
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
class AccessAndSecurityTests(test.TestCase):
def setUp(self):
super(AccessAndSecurityTests, self).setUp()
def test_index(self):
keypairs = self.keypairs.list()
sec_groups = self.security_groups.list()
floating_ips = self.floating_ips.list()
quota_data = self.quota_usages.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'security_group_list')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'server_list')
self.mox.StubOutWithMock(quotas, 'tenant_quota_usages')
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(keypairs)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(sec_groups)
quotas.tenant_quota_usages(IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(quota_data)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest),
'ec2').MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:access_and_security:index')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/access_and_security/index.html')
self.assertItemsEqual(res.context['keypairs_table'].data, keypairs)
self.assertItemsEqual(res.context['security_groups_table'].data,
sec_groups)
self.assertItemsEqual(res.context['floating_ips_table'].data,
floating_ips)
self.assertTrue(any(map(
lambda x: isinstance(x, api_access.tables.DownloadEC2),
res.context['endpoints_table'].get_table_actions()
)))
def test_index_with_ec2_disabled(self):
keypairs = self.keypairs.list()
sec_groups = self.security_groups.list()
floating_ips = self.floating_ips.list()
quota_data = self.quota_usages.first()
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'security_group_list')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
self.mox.StubOutWithMock(api.nova, 'server_list')
self.mox.StubOutWithMock(quotas, 'tenant_quota_usages')
self.mox.StubOutWithMock(api.base, 'is_service_enabled')
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(keypairs)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(sec_groups)
quotas.tenant_quota_usages(IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(quota_data)
api.base.is_service_enabled(IsA(http.HttpRequest),
'network').MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest),
'ec2').MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
url = reverse('horizon:project:access_and_security:index')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/access_and_security/index.html')
self.assertItemsEqual(res.context['keypairs_table'].data, keypairs)
self.assertItemsEqual(res.context['security_groups_table'].data,
sec_groups)
self.assertItemsEqual(res.context['floating_ips_table'].data,
floating_ips)
self.assertFalse(any(map(
lambda x: isinstance(x, api_access.tables.DownloadEC2),
res.context['endpoints_table'].get_table_actions()
)))
def test_association(self):
servers = [api.nova.Server(s, self.request)
for s in self.servers.list()]
# Add duplicate instance name to test instance name with [ID]
# Change id and private IP
server3 = api.nova.Server(self.servers.first(), self.request)
server3.id = 101
server3.addresses = deepcopy(server3.addresses)
server3.addresses['private'][0]['addr'] = "10.0.0.5"
servers.append(server3)
targets = [api.nova.FloatingIpTarget(s) for s in servers]
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api.network, 'floating_ip_target_list')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(targets)
self.mox.ReplayAll()
res = self.client.get(reverse("horizon:project:access_and_security:"
"floating_ips:associate"))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res,
'<option value="1">server_1 (1)</option>')
self.assertContains(res,
'<option value="101">server_1 (101)</option>')
self.assertContains(res, '<option value="2">server_2 (2)</option>')
class AccessAndSecurityNeutronProxyTests(AccessAndSecurityTests):
def setUp(self):
super(AccessAndSecurityNeutronProxyTests, self).setUp()
self.floating_ips = self.floating_ips_uuid
| apache-2.0 | 4,643,339,110,446,456,000 | 44.310976 | 78 | 0.647288 | false |
rzabini/gradle-sphinx | src/main/jython/os.py | 1 | 25197 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
name = 'java'
if 'posix' in _names:
_name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
_name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
_name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
_name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
_name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
elif 'ibmi' in _names:
_name = 'ibmi'
linesep = '\n'
from ibmi import *
try:
from ibmi import _exit
except ImportError:
pass
import posixpath as path
import ibmi
__all__.extend(_get_exports_list(ibmi))
del ibmi
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
name=name.replace("\r","\\r")
if path.exists(name):
return
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def _exists(name):
# CPython eval's the name, whereas looking in __all__ works for
# Jython and is much faster
return name in __all__
if _exists('execv'):
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if _name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif _name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
import UserDict
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if sys.platform.startswith('java') or _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
# Supply os.popen()
def popen(cmd, mode='r', bufsize=-1):
"""popen(command [, mode='r' [, bufsize]]) -> pipe
Open a pipe to/from a command returning a file object.
"""
if not isinstance(cmd, (str, unicode)):
raise TypeError('invalid cmd type (%s, expected string)' % type(cmd))
if mode not in ('r', 'w'):
raise ValueError("invalid mode %r" % mode)
import subprocess
if mode == 'r':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdout=subprocess.PIPE)
fp = proc.stdout
elif mode == 'w':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdin=subprocess.PIPE)
fp = proc.stdin
# files from subprocess are in binary mode but popen needs text mode
fp = fdopen(fp.fileno(), mode, bufsize)
return _wrap_close(fp, proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close(object):
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if _name == 'nt':
return returncode
else:
return returncode
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
| apache-2.0 | 1,019,734,515,747,745,500 | 33.706612 | 83 | 0.604794 | false |
barneygale/cedar | cedar/cedar.py | 1 | 3829 | import os
#temp directory
import tempfile
import shutil
import contextlib
#jar downloader
import urllib2
import json
#cedar code
from blocks import BlockColours
from spiral import Spiral
from world import World
from wrapper import Wrapper
from lib.png import Writer
@contextlib.contextmanager
def tempdir(*a, **k):
tmp = tempfile.mkdtemp(*a, **k)
try:
yield tmp
finally:
shutil.rmtree(tmp)
class Cedar:
def __init__(self, center=None, jar=None, output='{seed}.png', radius=1000, resolution=2, seed=None, verbosity=1):
self.verbosity = verbosity
# Resolve paths before we change directory
output = os.path.abspath(output)
if jar:
jar = os.path.abspath(jar)
#Make a temporary directory and switch to it
self.log(1, 'creating temporary directory')
with tempdir(prefix='cedar-tmp-') as tmp:
os.chdir(tmp)
#Copy in the server jar, or download it
if jar:
self.log(1, 'copying jar')
source = open(jar, 'rb')
else:
self.log(1, 'downloading jar')
source = self.download_server()
dest = open('minecraft_server.jar', 'wb')
dest.write(source.read())
source.close()
dest.close()
#Write server.properties
self.log(1, 'writing server.properties')
with open('server.properties', 'w') as props:
props.write('level-seed={seed}\nlisten-port=65349\n'.format(seed=seed if seed else ''))
#Do a first-run of the server
self.log(1, 'initialising world')
wrapper = Wrapper(self.log)
wrapper.run()
#Grab spawn point and seed, if they haven't been specified
world = World(self.log)
if not center:
center = world.spawn
if not seed:
seed = world.seed
center = tuple((c//16)*16 for c in center)
output = output.format(seed=seed)
#Open output image
img = open(output, 'wb')
#Generate the world!
path = list(Spiral.spiral(radius, center))
for i, spawn in enumerate(path):
self.log(1, "generating world ({0} of {1})".format(i+1, len(path)))
world.set_spawn(spawn)
wrapper.run()
#Generate the carto!
colours = BlockColours()
size = 2 * radius // resolution
writer = Writer(size, size)
pixels = [0] * (size * size * 3)
for b_x, b_z, b_data, b_meta, b_height, b_biome in world.carto(radius, center, resolution):
try:
colour = colours.get_colour(b_data, b_meta)
except KeyError:
self.log(1, "unknown block at {0}, {1}! id: {2} meta: {3}".format(b_x, b_z, b_data, b_meta))
continue
b_x = (b_x + radius - center[0]) // resolution
b_z = (b_z + radius - center[1]) // resolution
for i, c in enumerate(colour):
pixels[i + 3 * (b_x + size*b_z)] = c
writer.write_array(img, pixels)
img.close()
self.log(1, "saved as {0}".format(output))
def download_server(self):
base = 'http://s3.amazonaws.com/Minecraft.Download/versions/'
#get version
data = urllib2.urlopen(base + 'versions.json').read()
data = json.loads(data)
version = data['latest']['release']
#get server
return urllib2.urlopen(base + '{0}/minecraft_server.{0}.jar'.format(version))
def log(self, level, msg):
if self.verbosity >= level:
print "... {0}".format(msg) | mit | -7,296,568,467,212,249,000 | 30.916667 | 118 | 0.538783 | false |
michal-ruzicka/archivematica | src/MCPClient/lib/clientScripts/archivematicaCreateProcessedStructmap.py | 1 | 3081 | #!/usr/bin/python -OO
from __future__ import print_function
import os
import sys
from custom_handlers import get_script_logger
from archivematicaCreateMETS import createFileSec, each_child
from archivematicaCreateMETS2 import createDigiprovMD
import archivematicaXMLNamesSpace as ns
from lxml import etree
def create_amdSecs(path, file_group_identifier, base_path, base_path_name, sip_uuid):
amdSecs = []
for child in each_child(path, file_group_identifier, base_path, base_path_name, sip_uuid):
if isinstance(child, basestring): # directory
amdSecs.extend(create_amdSecs(child, file_group_identifier, base_path, base_path_name, sip_uuid))
else: # file
admid = "digiprov-" + child.uuid
amdSec = etree.Element(ns.metsBNS + 'amdSec',
ID=admid)
amdSec.extend(createDigiprovMD(child.uuid))
amdSecs.append(amdSec)
return amdSecs
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.createProcessedStructmap")
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-s", "--basePath", action="store", dest="basePath", default="")
parser.add_argument("-b", "--basePathString", action="store", dest="basePathString", default="SIPDirectory") # transferDirectory
parser.add_argument("-f", "--fileGroupIdentifier", action="store", dest="fileGroupIdentifier", default="sipUUID") # transferUUID
parser.add_argument("-S", "--sipUUID", action="store", dest="sipUUID", default="")
parser.add_argument("-x", "--xmlFile", action="store", dest="xmlFile", default="")
opts = parser.parse_args()
if not os.path.exists(opts.xmlFile):
print("Unable to find specified METS file:", opts.xmlFile, file=sys.stderr)
sys.exit(1)
try:
parser = etree.XMLParser(remove_blank_text=True)
doc = etree.parse(opts.xmlFile, parser)
except (etree.ParseError, etree.XMLSyntaxError):
print("Unable to parse XML file at path:", opts.xmlFile, file=sys.stderr)
sys.exit(1)
fileGrp = doc.find(".//mets:fileSec/mets:fileGrp", namespaces=ns.NSMAP)
root = doc.getroot()
structMap = etree.SubElement(root, ns.metsBNS + "structMap",
TYPE="physical",
LABEL="processed")
structMapDiv = etree.SubElement(structMap, ns.metsBNS + "div")
basePathString = "%%%s%%" % (opts.basePathString)
createFileSec(opts.basePath, opts.fileGroupIdentifier, opts.basePath, basePathString, fileGrp, structMapDiv, opts.sipUUID)
# insert <amdSec>s after the <metsHdr>, which must be the first element
# within the <mets> element if present.
for el in create_amdSecs(opts.basePath, opts.fileGroupIdentifier, opts.basePath, basePathString, opts.sipUUID):
root.insert(1, el)
with open(opts.xmlFile, "w") as f:
f.write(etree.tostring(doc,
pretty_print=True,
xml_declaration=True))
| agpl-3.0 | -3,433,260,524,036,228,600 | 41.205479 | 133 | 0.654982 | false |
jimsize/PySolFC | pysollib/util.py | 1 | 5339 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
import sys
import os
# PySol imports
from pysollib.settings import DATA_DIRS, TOOLKIT
from pysollib.mfxutil import Image
from pysollib.mygettext import _
# ************************************************************************
# * constants
# ************************************************************************
# Suits values are 0-3. This maps to colors 0-1.
SUITS = (_("Club"), _("Spade"), _("Heart"), _("Diamond"))
COLORS = (_("black"), _("red"))
# Card ranks are 0-12. We also define symbolic names for the picture cards.
RANKS = (_("Ace"), "2", "3", "4", "5", "6", "7", "8", "9", "10",
_("Jack"), _("Queen"), _("King"))
ACE = 0
JACK = 10
QUEEN = 11
KING = 12
# Special values for Stack.cap:
ANY_SUIT = -1
ANY_COLOR = -1
ANY_RANK = -1
NO_SUIT = 999999 # no card can ever match this suit
NO_COLOR = 999999 # no card can ever match this color
NO_RANK = 999999 # no card can ever match this rank
UNLIMITED_MOVES = 999999 # for max_move
UNLIMITED_ACCEPTS = 999999 # for max_accept
UNLIMITED_CARDS = 999999 # for max_cards
#
NO_REDEAL = 0
UNLIMITED_REDEALS = -1
VARIABLE_REDEALS = -2
CARDSET = _("cardset")
IMAGE_EXTENSIONS = (".gif", ".ppm", ".png")
if 1 and os.name == "nt":
IMAGE_EXTENSIONS = (".png", ".gif", ".ppm", ".jpg",)
pass
if Image:
IMAGE_EXTENSIONS = (".png", ".gif", ".jpg", ".ppm", ".bmp")
if TOOLKIT == 'kivy':
IMAGE_EXTENSIONS = (".png", ".bmp", ".ppm", ".jpg", ".tiff")
# ************************************************************************
# * DataLoader
# ************************************************************************
class DataLoader:
def __init__(self, argv0, filenames, path=[]):
self.dir = None
if isinstance(filenames, str):
filenames = (filenames,)
assert isinstance(filenames, (tuple, list))
# init path
path = path[:]
head, tail = os.path.split(argv0)
if not head:
head = os.curdir
# dir where placed startup script
path.append(head)
path.append(os.path.join(head, "data"))
path.append(os.path.join(head, os.pardir, "data"))
# dir where placed pysol package
path.append(os.path.join(sys.path[0], "data"))
path.append(os.path.join(sys.path[0], "pysollib", "data"))
# from settings.py
path.extend(DATA_DIRS)
# check path for valid directories
self.path = []
for p in path:
if not p:
continue
np = os.path.abspath(p)
if np and (np not in self.path) and os.path.isdir(np):
self.path.append(np)
# now try to find all filenames along path
for p in self.path:
if all(os.path.isfile(os.path.join(p, fn)) for fn in filenames):
self.dir = p
break
else:
raise OSError(str(argv0)+": DataLoader could not find " +
str(filenames))
def __findFile(self, func, filename, subdirs=None, do_raise=1):
if subdirs is None:
subdirs = ("",)
elif isinstance(subdirs, str):
subdirs = (subdirs,)
for dir in subdirs:
f = os.path.join(self.dir, dir, filename)
f = os.path.normpath(f)
if func(f):
return f
if do_raise:
raise OSError("DataLoader could not find "+filename+" in " +
self.dir+" "+str(subdirs))
return None
def findFile(self, filename, subdirs=None):
return self.__findFile(os.path.isfile, filename, subdirs)
def findImage(self, filename, subdirs=None):
for ext in IMAGE_EXTENSIONS:
f = self.__findFile(os.path.isfile, filename+ext, subdirs, 0)
if f:
return f
raise OSError("DataLoader could not find image "+filename +
" in "+self.dir+" "+str(subdirs))
def findIcon(self, filename='pysol', subdirs=None):
root, ext = os.path.splitext(filename)
if not ext:
filename += ('.ico' if os.name == 'nt' else '.xbm')
return self.findFile(filename, subdirs)
def findDir(self, filename, subdirs=None):
return self.__findFile(os.path.isdir, filename, subdirs)
| gpl-3.0 | 6,557,655,274,380,356,000 | 33.895425 | 79 | 0.540925 | false |
tkwon/dj-stripe | djstripe/migrations/0009_auto_20160501_1838.py | 1 | 42461 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('djstripe', '0008_auto_20150806_1641'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(verbose_name='created',
default=django.utils.timezone.now, editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified',
default=django.utils.timezone.now,
editable=False)),
('stripe_id', djstripe.fields.StripeIdField(max_length=50, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False)),
('stripe_timestamp', djstripe.fields.StripeDateTimeField(
help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(
blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful\
for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.',
null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='StripeSource',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(verbose_name='created',
default=django.utils.timezone.now, editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified',
default=django.utils.timezone.now,
editable=False)),
('stripe_id', djstripe.fields.StripeIdField(max_length=50, unique=True)),
('livemode', djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False)),
('stripe_timestamp', djstripe.fields.StripeDateTimeField(
help_text='The datetime this object was created in stripe.', null=True)),
('metadata', djstripe.fields.StripeJSONField(
blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful\
for storing additional information about an object in a structured format.', null=True)),
('description', djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.',
null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Card',
fields=[
('stripesource_ptr', models.OneToOneField(serialize=False, parent_link=True, primary_key=True, on_delete=django.db.models.deletion.CASCADE,
to='djstripe.StripeSource', auto_created=True)),
('address_city', djstripe.fields.StripeTextField(help_text='Billing address city.', null=True)),
('address_country', djstripe.fields.StripeTextField(help_text='Billing address country.', null=True)),
('address_line1', djstripe.fields.StripeTextField(help_text='Billing address (Line 1).', null=True)),
('address_line1_check', djstripe.fields.StripeCharField(
choices=[('pass', 'Pass'), ('fail', 'Fail'), ('unavailable', 'Unavailable'),
('unknown', 'Unknown')],
max_length=11, help_text='If ``address_line1`` was provided, results of the check.', null=True)),
('address_line2', djstripe.fields.StripeTextField(help_text='Billing address (Line 2).', null=True)),
('address_state', djstripe.fields.StripeTextField(help_text='Billing address state.', null=True)),
('address_zip', djstripe.fields.StripeTextField(help_text='Billing address zip code.', null=True)),
('address_zip_check', djstripe.fields.StripeCharField(
choices=[('pass', 'Pass'), ('fail', 'Fail'), ('unavailable', 'Unavailable'),
('unknown', 'Unknown')],
max_length=11, help_text='If ``address_zip`` was provided, results of the check.', null=True)),
('brand', djstripe.fields.StripeCharField(
choices=[('Visa', 'Visa'), ('American Express', 'American Express'), ('MasterCard', 'MasterCard'),
('Discover', 'Discover'), ('JCB', 'JCB'), ('Diners Club', 'Diners Club'),
('Unknown', 'Unknown')],
max_length=16, help_text='Card brand.')),
('country', djstripe.fields.StripeCharField(
max_length=2, help_text='Two-letter ISO code representing the country of the card.')),
('cvc_check', djstripe.fields.StripeCharField(
choices=[('pass', 'Pass'), ('fail', 'Fail'), ('unavailable', 'Unavailable'),
('unknown', 'Unknown')],
max_length=11, help_text='If a CVC was provided, results of the check.', null=True)),
('dynamic_last4', djstripe.fields.StripeCharField(
max_length=4,
help_text='(For tokenized numbers only.) The last four digits of the device account number.',
null=True)),
('exp_month', djstripe.fields.StripeIntegerField(help_text='Card expiration month.')),
('exp_year', djstripe.fields.StripeIntegerField(help_text='Card expiration year.')),
('fingerprint', djstripe.fields.StripeTextField(
help_text='Uniquely identifies this particular card number.', null=True)),
('funding', djstripe.fields.StripeCharField(
choices=[('credit', 'Credit'), ('debit', 'Debit'), ('prepaid', 'Prepaid'), ('unknown', 'Unknown')],
max_length=7, help_text='Card funding type.')),
('last4', djstripe.fields.StripeCharField(max_length=4, help_text='Last four digits of Card number.')),
('name', djstripe.fields.StripeTextField(help_text='Cardholder name.', null=True)),
('tokenization_method', djstripe.fields.StripeCharField(
choices=[('apple_pay', 'Apple Pay'), ('android_pay', 'Android Pay')], max_length=11,
help_text='If the card number is tokenized, this is the method that was used.', null=True)),
],
options={
'abstract': False,
},
bases=('djstripe.stripesource',),
),
migrations.RemoveField(
model_name='transferchargefee',
name='transfer',
),
migrations.RemoveField(
model_name='charge',
name='card_kind',
),
migrations.RemoveField(
model_name='charge',
name='card_last_4',
),
migrations.RemoveField(
model_name='charge',
name='invoice',
),
migrations.RemoveField(
model_name='customer',
name='card_exp_month',
),
migrations.RemoveField(
model_name='customer',
name='card_exp_year',
),
migrations.RemoveField(
model_name='customer',
name='card_fingerprint',
),
migrations.RemoveField(
model_name='customer',
name='card_kind',
),
migrations.RemoveField(
model_name='customer',
name='card_last_4',
),
migrations.RemoveField(
model_name='event',
name='validated_message',
),
migrations.RemoveField(
model_name='invoiceitem',
name='line_type',
),
migrations.RemoveField(
model_name='subscription',
name='amount',
),
migrations.RemoveField(
model_name='transfer',
name='event',
),
migrations.AddField(
model_name='charge',
name='account',
field=models.ForeignKey(
related_name='charges', on_delete=django.db.models.deletion.CASCADE, null=True, to='djstripe.Account',
help_text='The account the charge was made on behalf of. Null here indicates that this value was \
never set.'),
),
migrations.AddField(
model_name='charge',
name='source',
field=models.ForeignKey(related_name='charges', on_delete=django.db.models.deletion.CASCADE, null=True, to='djstripe.StripeSource'),
),
migrations.AddField(
model_name='charge',
name='currency',
field=djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code representing the currency \
in which the charge was made.', max_length=3, default=''),
preserve_default=False,
),
migrations.AddField(
model_name='charge',
name='failure_code',
field=djstripe.fields.StripeCharField(
choices=[('invalid_number', 'Invalid Number'), ('invalid_expiry_month', 'Invalid Expiry Month'),
('invalid_expiry_year', 'Invalid Expiry Year'), ('invalid_cvc', 'Invalid Cvc'),
('incorrect_number', 'Incorrect Number'), ('expired_card', 'Expired Card'),
('incorrect_cvc', 'Incorrect Cvc'), ('incorrect_zip', 'Incorrect Zip'),
('card_declined', 'Card Declined'), ('missing', 'Missing'),
('processing_error', 'Processing Error'), ('rate_limit', 'Rate Limit')],
max_length=30, help_text='Error code explaining reason for charge failure if available.', null=True),
),
migrations.AddField(
model_name='charge',
name='failure_message',
field=djstripe.fields.StripeTextField(
help_text='Message to user further explaining reason for charge failure if available.', null=True),
),
migrations.AddField(
model_name='charge',
name='fee_details',
field=djstripe.fields.StripeJSONField(null=True),
),
migrations.AddField(
model_name='charge',
name='fraudulent',
field=djstripe.fields.StripeBooleanField(
help_text='Whether or not this charge was marked as fraudulent.', default=False),
),
migrations.AddField(
model_name='charge',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='charge',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True, help_text='A set of key/value pairs that you can attach to an object. It can be useful for\
storing additional information about an object in a structured format.',
null=True),
),
migrations.AddField(
model_name='charge',
name='shipping',
field=djstripe.fields.StripeJSONField(help_text='Shipping information for the charge', null=True),
),
migrations.AddField(
model_name='charge',
name='source_stripe_id',
field=djstripe.fields.StripeIdField(max_length=50, help_text='The payment source id.', null=True),
),
migrations.AddField(
model_name='charge',
name='source_type',
field=djstripe.fields.StripeCharField(
max_length=20, help_text='The payment source type. If the payment source is supported by dj-stripe, \
a corresponding model is attached to this Charge via a foreign key matching this field.', null=True),
),
migrations.AddField(
model_name='charge',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(
max_length=22, help_text='An arbitrary string to be displayed on your customer\'s credit card \
statement. The statement description may not include <>"\' characters, and will appear on your \
customer\'s statement in capital letters. Non-ASCII characters are automatically stripped. While \
most banks display this information consistently, some may display it incorrectly or not at all.',
null=True),
),
migrations.AddField(
model_name='charge',
name='status',
field=djstripe.fields.StripeCharField(help_text='The status of the payment.',
choices=[('succeeded', 'Succeeded'), ('failed', 'Failed')],
max_length=10, default='unknown'),
preserve_default=False,
),
migrations.AddField(
model_name='charge',
name='transfer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='djstripe.Transfer',
help_text='The transfer to the destination account (only applicable if the \
charge was created using the destination parameter).'),
),
migrations.AddField(
model_name='customer',
name='account_balance',
field=djstripe.fields.StripeIntegerField(help_text="Current balance, if any, being stored on the \
customer's account. If negative, the customer has credit to apply to the next invoice. If positive, the \
customer has an amount owed that will be added to the next invoice. The balance does not refer to any \
unpaid invoices; it solely takes into account amounts that have yet to be successfully applied to any \
invoice. This balance is only taken into account for recurring charges.", null=True),
),
migrations.AddField(
model_name='customer',
name='business_vat_id',
field=djstripe.fields.StripeCharField(max_length=20, help_text="The customer's VAT identification number.",
null=True),
),
migrations.AddField(
model_name='customer',
name='currency',
field=djstripe.fields.StripeCharField(
help_text='The currency the customer can be charged in for recurring billing purposes \
(subscriptions, invoices, invoice items).', max_length=3, null=True),
),
migrations.AddField(
model_name='customer',
name='delinquent',
field=djstripe.fields.StripeBooleanField(
help_text="Whether or not the latest charge for the customer's latest invoice has failed.",
default=False),
),
migrations.AddField(
model_name='customer',
name='description',
field=djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True),
),
migrations.AddField(
model_name='customer',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='customer',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='customer',
name='shipping',
field=djstripe.fields.StripeJSONField(help_text='Shipping information associated with the customer.',
null=True),
),
migrations.AddField(
model_name='customer',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AddField(
model_name='event',
name='description',
field=djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True),
),
migrations.AddField(
model_name='event',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='event',
name='received_api_version',
field=djstripe.fields.StripeCharField(
max_length=15,
help_text='the API version at which the event data was rendered. Blank for old entries only, all \
new entries will have this value', blank=True),
),
migrations.AddField(
model_name='event',
name='request_id',
field=djstripe.fields.StripeCharField(
max_length=50,
help_text="Information about the request that triggered this event, for traceability purposes. \
If empty string then this is an old entry without that data. If Null then this is not an old entry, \
but a Stripe 'automated' event with no associated request.", blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(
help_text='The datetime this object was created in stripe.', null=True),
),
migrations.AddField(
model_name='invoice',
name='amount_due',
field=djstripe.fields.StripeCurrencyField(
help_text="Final amount due at this time for this invoice. If the invoice's total is smaller than \
the minimum charge amount, for example, or if there is account credit that can be applied to the \
invoice, the amount_due may be 0. If there is a positive starting_balance for the invoice \
(the customer owes money), the amount_due will also take that into account. The charge that gets \
generated for the invoice will be for the amount specified in amount_due.",
max_digits=7, default=0, decimal_places=2),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='application_fee',
field=djstripe.fields.StripeCurrencyField(
max_digits=7,
help_text="The fee in cents that will be applied to the invoice and transferred to the application \
owner's Stripe account when the invoice is paid.", decimal_places=2, null=True),
),
migrations.AddField(
model_name='invoice',
name='currency',
field=djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.',
max_length=3, default=''),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='description',
field=djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True),
),
migrations.AddField(
model_name='invoice',
name='ending_balance',
field=djstripe.fields.StripeIntegerField(
help_text='Ending customer balance after attempting to pay invoice. If the invoice has not been \
attempted yet, this will be null.', null=True),
),
migrations.AddField(
model_name='invoice',
name='forgiven',
field=djstripe.fields.StripeBooleanField(
help_text='Whether or not the invoice has been forgiven. Forgiving an invoice instructs us to \
update the subscription status as if the invoice were successfully paid. Once an invoice has been \
forgiven, it cannot be unforgiven or reopened.', default=False),
),
migrations.AddField(
model_name='invoice',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='invoice',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.',
null=True),
),
migrations.AddField(
model_name='invoice',
name='next_payment_attempt',
field=djstripe.fields.StripeDateTimeField(help_text='The time at which payment will next be attempted.',
null=True),
),
migrations.AddField(
model_name='invoice',
name='starting_balance',
field=djstripe.fields.StripeIntegerField(
help_text='Starting customer balance before attempting to pay invoice. If the invoice has not been \
attempted yet, this will be the current customer balance.', default=0),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(
max_length=22,
help_text='An arbitrary string to be displayed on your customer\'s credit card statement. \
The statement description may not include <>"\' characters, and will appear on your customer\'s \
statement in capital letters. Non-ASCII characters are automatically stripped. While most banks \
display this information consistently, some may display it incorrectly or not at all.', null=True),
),
migrations.AddField(
model_name='invoice',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AddField(
model_name='invoice',
name='subscription',
field=models.ForeignKey(
related_name='invoices',
on_delete=django.db.models.deletion.CASCADE,
null=True,
to='djstripe.Subscription',
help_text='The subscription that this invoice was prepared for, if any.'),
),
migrations.AddField(
model_name='invoice',
name='subscription_proration_date',
field=djstripe.fields.StripeDateTimeField(
help_text='Only set for upcoming invoices that preview prorations. The time used to calculate \
prorations.',
null=True),
),
migrations.AddField(
model_name='invoice',
name='tax',
field=djstripe.fields.StripeCurrencyField(max_digits=7, help_text='The amount of tax included in the \
total, calculated from ``tax_percent`` and the subtotal. If no ``tax_percent`` is defined, this value \
will be null.', decimal_places=2, null=True),
),
migrations.AddField(
model_name='invoice',
name='tax_percent',
field=djstripe.fields.StripePercentField(
validators=[django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0)],
max_digits=5,
help_text="This percentage of the subtotal has been added to the total amount of the invoice, \
including invoice line items and discounts. This field is inherited from the subscription's \
``tax_percent`` field, but can be changed before the invoice is paid. This field defaults to null.",
decimal_places=2, null=True),
),
migrations.AddField(
model_name='invoiceitem',
name='customer',
field=models.ForeignKey(related_name='invoiceitems', on_delete=django.db.models.deletion.CASCADE, default=1, to='djstripe.Customer',
help_text='The customer associated with this invoiceitem.'),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='date',
field=djstripe.fields.StripeDateTimeField(help_text='The date on the invoiceitem.',
default=datetime.datetime(2100, 1, 1, 0, 0,
tzinfo=django.utils.timezone.utc)),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='discountable',
field=djstripe.fields.StripeBooleanField(help_text='If True, discounts will apply to this invoice item. \
Always False for prorations.', default=False),
),
migrations.AddField(
model_name='invoiceitem',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='invoiceitem',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='invoiceitem',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AddField(
model_name='invoiceitem',
name='subscription',
field=models.ForeignKey(
related_name='invoiceitems',
on_delete=django.db.models.deletion.CASCADE,
null=True,
to='djstripe.Subscription',
help_text='The subscription that this invoice item has been created for, if any.'),
),
migrations.AddField(
model_name='plan',
name='description',
field=djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True),
),
migrations.AddField(
model_name='plan',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='plan',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='plan',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(
max_length=22,
help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The \
statement description may not include <>"\' characters, and will appear on your customer\'s statement \
in capital letters. Non-ASCII characters are automatically stripped. While most banks display this \
information consistently, some may display it incorrectly or not at all.', null=True),
),
migrations.AddField(
model_name='plan',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AlterModelOptions(
name='plan',
options={'ordering': ['amount']},
),
migrations.AddField(
model_name='subscription',
name='application_fee_percent',
field=djstripe.fields.StripePercentField(
validators=[django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0)],
help_text='A positive decimal that represents the fee percentage of the subscription invoice amount \
that will be transferred to the application owner’s Stripe account each billing period.',
max_digits=5, decimal_places=2, null=True),
),
migrations.AddField(
model_name='subscription',
name='description',
field=djstripe.fields.StripeTextField(blank=True, help_text='A description of this object.', null=True),
),
migrations.AddField(
model_name='subscription',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='subscription',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='subscription',
name='stripe_id',
field=djstripe.fields.StripeIdField(max_length=50, default='unknown', unique=False),
preserve_default=False,
),
migrations.AddField(
model_name='subscription',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AddField(
model_name='subscription',
name='tax_percent',
field=djstripe.fields.StripePercentField(
validators=[django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0)],
max_digits=5,
help_text='A positive decimal (with at most two decimal places) between 1 and 100. This represents \
the percentage of the subscription invoice subtotal that will be calculated and added as tax to the \
final amount each billing period.', decimal_places=2, null=True),
),
migrations.AddField(
model_name='transfer',
name='amount_reversed',
field=djstripe.fields.StripeCurrencyField(
max_digits=7,
help_text='The amount reversed (can be less than the amount attribute on the transfer if a partial \
reversal was issued).', decimal_places=2, null=True),
),
migrations.AddField(
model_name='transfer',
name='currency',
field=djstripe.fields.StripeCharField(help_text='Three-letter ISO currency code.',
max_length=3, default=''),
preserve_default=False,
),
migrations.AddField(
model_name='transfer',
name='destination',
field=djstripe.fields.StripeIdField(
help_text='ID of the bank account, card, or Stripe account the transfer was sent to.',
max_length=50, default='unknown'),
preserve_default=False,
),
migrations.AddField(
model_name='transfer',
name='destination_payment',
field=djstripe.fields.StripeIdField(
max_length=50,
help_text='If the destination is a Stripe account, this will be the ID of the payment that the \
destination account received for the transfer.', null=True),
),
migrations.AddField(
model_name='transfer',
name='destination_type',
field=djstripe.fields.StripeCharField(
help_text='The type of the transfer destination.',
choices=[('card', 'Card'), ('bank_account', 'Bank Account'), ('stripe_account', 'Stripe Account')],
max_length=14, default='unknown'),
preserve_default=False,
),
migrations.AddField(
model_name='transfer',
name='failure_code',
field=djstripe.fields.StripeCharField(
choices=[('insufficient_funds', 'Insufficient Funds'), ('account_closed', 'Account Closed'),
('no_account', 'No Account'), ('invalid_account_number', 'Invalid Account Number'),
('debit_not_authorized', 'Debit Not Authorized'),
('bank_ownership_changed', 'Bank Ownership Changed'), ('account_frozen', 'Account Frozen'),
('could_not_process', 'Could Not Process'),
('bank_account_restricted', 'Bank Account Restricted'),
('invalid_currency', 'Invalid Currency')],
max_length=23,
help_text='Error code explaining reason for transfer failure if available. See \
https://stripe.com/docs/api/python#transfer_failures.', null=True),
),
migrations.AddField(
model_name='transfer',
name='failure_message',
field=djstripe.fields.StripeTextField(
help_text='Message to user further explaining reason for transfer failure if available.', null=True),
),
migrations.AddField(
model_name='transfer',
name='fee',
field=djstripe.fields.StripeCurrencyField(max_digits=7, decimal_places=2, null=True),
),
migrations.AddField(
model_name='transfer',
name='fee_details',
field=djstripe.fields.StripeJSONField(null=True),
),
migrations.AddField(
model_name='transfer',
name='livemode',
field=djstripe.fields.StripeNullBooleanField(
help_text='Null here indicates that the livemode status is unknown or was previously unrecorded. \
Otherwise, this field indicates whether this record comes from Stripe test mode or live mode \
operation.', default=False),
),
migrations.AddField(
model_name='transfer',
name='metadata',
field=djstripe.fields.StripeJSONField(
blank=True,
help_text='A set of key/value pairs that you can attach to an object. It can be useful for storing \
additional information about an object in a structured format.', null=True),
),
migrations.AddField(
model_name='transfer',
name='reversed',
field=djstripe.fields.StripeBooleanField(
help_text='Whether or not the transfer has been fully reversed. If the transfer is only partially \
reversed, this attribute will still be false.', default=False),
),
migrations.AddField(
model_name='transfer',
name='source_transaction',
field=djstripe.fields.StripeIdField(
max_length=50,
help_text='ID of the charge (or other transaction) that was used to fund the transfer. If null, the \
transfer was funded from the available balance.', null=True),
),
migrations.AddField(
model_name='transfer',
name='source_type',
field=djstripe.fields.StripeCharField(
help_text='The source balance from which this transfer came.',
choices=[('card', 'Card'), ('bank_account', 'Bank Account'), ('bitcoin_reciever', 'Bitcoin Reciever'),
('alipay_account', 'Alipay Account')], max_length=16, default='unknown'),
preserve_default=False,
),
migrations.AddField(
model_name='transfer',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(
max_length=22,
help_text='An arbitrary string to be displayed on your customer\'s credit card statement. The \
statement description may not include <>"\' characters, and will appear on your customer\'s statement \
in capital letters. Non-ASCII characters are automatically stripped. While most banks display this \
information consistently, some may display it incorrectly or not at all.', null=True),
),
migrations.AddField(
model_name='transfer',
name='stripe_timestamp',
field=djstripe.fields.StripeDateTimeField(help_text='The datetime this object was created in stripe.',
null=True),
),
migrations.AddField(
model_name='stripesource',
name='customer',
field=models.ForeignKey(related_name='sources', on_delete=django.db.models.deletion.CASCADE, to='djstripe.Customer'),
),
migrations.AddField(
model_name='stripesource',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_djstripe.stripesource_set+', on_delete=django.db.models.deletion.CASCADE,
to='contenttypes.ContentType', editable=False, null=True),
),
migrations.AddField(
model_name='customer',
name='default_source',
field=models.ForeignKey(related_name='customers', on_delete=django.db.models.deletion.CASCADE, null=True, to='djstripe.StripeSource'),
),
migrations.DeleteModel(
name='TransferChargeFee',
),
]
| mit | 1,678,715,755,278,278,700 | 51.483313 | 155 | 0.567206 | false |
daspecster/google-cloud-python | bigtable/unit_tests/test_cluster.py | 1 | 15990 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestCluster(unittest.TestCase):
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
CLUSTER_NAME = ('projects/' + PROJECT +
'/instances/' + INSTANCE_ID +
'/clusters/' + CLUSTER_ID)
@staticmethod
def _get_target_class():
from google.cloud.bigtable.cluster import Cluster
return Cluster
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_constructor_defaults(self):
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertIs(cluster._instance, instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
def test_constructor_non_default(self):
SERVE_NODES = 8
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertIs(cluster._instance, instance)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test_copy(self):
SERVE_NODES = 8
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
new_cluster = cluster.copy()
# Make sure the client copy succeeded.
self.assertIsNot(new_cluster._instance, instance)
self.assertEqual(new_cluster.serve_nodes, SERVE_NODES)
# Make sure the client got copied to a new instance.
self.assertIsNot(cluster, new_cluster)
self.assertEqual(cluster, new_cluster)
def test__update_from_pb_success(self):
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
SERVE_NODES = 8
cluster_pb = _ClusterPB(
serve_nodes=SERVE_NODES,
)
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
cluster._update_from_pb(cluster_pb)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test__update_from_pb_no_serve_nodes(self):
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
cluster_pb = _ClusterPB()
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
with self.assertRaises(ValueError):
cluster._update_from_pb(cluster_pb)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
def test_from_pb_success(self):
SERVE_NODES = 331
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster_pb = _ClusterPB(
name=self.CLUSTER_NAME,
serve_nodes=SERVE_NODES,
)
klass = self._get_target_class()
cluster = klass.from_pb(cluster_pb, instance)
self.assertIsInstance(cluster, klass)
self.assertIs(cluster._instance, instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test_from_pb_bad_cluster_name(self):
BAD_CLUSTER_NAME = 'INCORRECT_FORMAT'
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_from_pb_project_mistmatch(self):
ALT_PROJECT = 'ALT_PROJECT'
client = _Client(ALT_PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
cluster_pb = _ClusterPB(name=self.CLUSTER_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_from_pb_instance_mistmatch(self):
ALT_INSTANCE_ID = 'ALT_INSTANCE_ID'
client = _Client(self.PROJECT)
instance = _Instance(ALT_INSTANCE_ID, client)
self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID)
cluster_pb = _ClusterPB(name=self.CLUSTER_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_name_property(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster.name, self.CLUSTER_NAME)
def test___eq__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance)
cluster2 = self._make_one(self.CLUSTER_ID, instance)
self.assertEqual(cluster1, cluster2)
def test___eq__type_differ(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance)
cluster2 = object()
self.assertNotEqual(cluster1, cluster2)
def test___ne__same_value(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one(self.CLUSTER_ID, instance)
cluster2 = self._make_one(self.CLUSTER_ID, instance)
comparison_val = (cluster1 != cluster2)
self.assertFalse(comparison_val)
def test___ne__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._make_one('cluster_id1', instance)
cluster2 = self._make_one('cluster_id2', instance)
self.assertNotEqual(cluster1, cluster2)
def test_reload(self):
from unit_tests._testing import _FakeStub
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
SERVE_NODES = 31
LOCATION = 'LOCATION'
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
# Create request_pb
request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME)
# Create response_pb
response_pb = _ClusterPB(
serve_nodes=SERVE_NODES,
location=LOCATION,
)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Cluster optional config values before.
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
# Perform the method and check the result.
result = cluster.reload()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetCluster',
(request_pb,),
{},
)])
# Check Cluster optional config values before.
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
self.assertEqual(cluster.location, LOCATION)
def test_create(self):
from google.longrunning import operations_pb2
from google.cloud.operation import Operation
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from unit_tests._testing import _FakeStub
SERVE_NODES = 4
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(
self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES)
# Create response_pb
OP_ID = 5678
OP_NAME = (
'operations/projects/%s/instances/%s/clusters/%s/operations/%d' %
(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID))
response_pb = operations_pb2.Operation(name=OP_NAME)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Perform the method and check the result.
result = cluster.create()
self.assertIsInstance(result, Operation)
self.assertEqual(result.name, OP_NAME)
self.assertIs(result.target, cluster)
self.assertIs(result.client, client)
self.assertIsNone(result.metadata)
self.assertEqual(result.caller_metadata,
{'request_type': 'CreateCluster'})
self.assertEqual(len(stub.method_calls), 1)
api_name, args, kwargs = stub.method_calls[0]
self.assertEqual(api_name, 'CreateCluster')
request_pb, = args
self.assertIsInstance(request_pb,
messages_v2_pb2.CreateClusterRequest)
self.assertEqual(request_pb.parent, instance.name)
self.assertEqual(request_pb.cluster_id, self.CLUSTER_ID)
self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES)
self.assertEqual(kwargs, {})
def test_update(self):
import datetime
from google.longrunning import operations_pb2
from google.cloud.operation import Operation
from google.protobuf.any_pb2 import Any
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable._generated import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from unit_tests._testing import _FakeStub
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
SERVE_NODES = 81
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
# Create request_pb
request_pb = _ClusterPB(
name=self.CLUSTER_NAME,
serve_nodes=SERVE_NODES,
)
# Create response_pb
OP_ID = 5678
OP_NAME = (
'operations/projects/%s/instances/%s/clusters/%s/operations/%d' %
(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID))
metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB)
type_url = 'type.googleapis.com/%s' % (
messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name,)
response_pb = operations_pb2.Operation(
name=OP_NAME,
metadata=Any(
type_url=type_url,
value=metadata.SerializeToString()
)
)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
result = cluster.update()
self.assertIsInstance(result, Operation)
self.assertEqual(result.name, OP_NAME)
self.assertIs(result.target, cluster)
self.assertIs(result.client, client)
self.assertIsInstance(result.metadata,
messages_v2_pb2.UpdateClusterMetadata)
self.assertEqual(result.metadata.request_time, NOW_PB)
self.assertEqual(result.caller_metadata,
{'request_type': 'UpdateCluster'})
self.assertEqual(len(stub.method_calls), 1)
api_name, args, kwargs = stub.method_calls[0]
self.assertEqual(api_name, 'UpdateCluster')
request_pb, = args
self.assertIsInstance(request_pb, data_v2_pb2.Cluster)
self.assertEqual(request_pb.name, self.CLUSTER_NAME)
self.assertEqual(request_pb.serve_nodes, SERVE_NODES)
self.assertEqual(kwargs, {})
def test_delete(self):
from google.protobuf import empty_pb2
from unit_tests._testing import _FakeStub
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._make_one(self.CLUSTER_ID, instance)
# Create request_pb
request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = cluster.delete()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'DeleteCluster',
(request_pb,),
{},
)])
class Test__prepare_create_request(unittest.TestCase):
def _call_fut(self, cluster):
from google.cloud.bigtable.cluster import _prepare_create_request
return _prepare_create_request(cluster)
def test_it(self):
from google.cloud.bigtable.cluster import Cluster
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
SERVE_NODES = 8
client = _Client(PROJECT)
instance = _Instance(INSTANCE_ID, client)
cluster = Cluster(CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
request_pb = self._call_fut(cluster)
self.assertEqual(request_pb.cluster_id, CLUSTER_ID)
self.assertEqual(request_pb.parent, instance.name)
self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES)
def _ClusterPB(*args, **kw):
from google.cloud.bigtable._generated import (
instance_pb2 as instance_v2_pb2)
return instance_v2_pb2.Cluster(*args, **kw)
def _DeleteClusterRequestPB(*args, **kw):
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
return messages_v2_pb2.DeleteClusterRequest(*args, **kw)
def _GetClusterRequestPB(*args, **kw):
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
return messages_v2_pb2.GetClusterRequest(*args, **kw)
class _Instance(object):
def __init__(self, instance_id, client):
self.instance_id = instance_id
self._client = client
@property
def name(self):
return 'projects/%s/instances/%s' % (
self._client.project, self.instance_id)
def copy(self):
return self.__class__(self.instance_id, self._client)
def __eq__(self, other):
return (other.instance_id == self.instance_id and
other._client == self._client)
class _Client(object):
def __init__(self, project):
self.project = project
self.project_name = 'projects/' + self.project
def __eq__(self, other):
return (other.project == self.project and
other.project_name == self.project_name)
| apache-2.0 | -3,644,475,206,319,272,400 | 34.376106 | 77 | 0.630269 | false |
ralfonso/harvestmedia | tests/test_playlists.py | 1 | 17635 | # -*- coding: utf-8 -*-
import datetime
import hashlib
from nose.tools import raises, with_setup
import mock
import StringIO
import textwrap
import xml.etree.cElementTree as ET
import harvestmedia.api.exceptions
from harvestmedia.api.member import Member
from harvestmedia.api.playlist import Playlist
from utils import build_http_mock, get_random_md5, init_client
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_add_playlist(HttpMock):
client = init_client()
test_member_id = get_random_md5()
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
content = """<?xml version="1.0" encoding="utf-8"?>
<ResponsePlaylists>
<playlists>
<playlist id="%(id)s" name="%(name)s" />
</playlists>
</ResponsePlaylists>""" % \
{'id': test_playlist_id,
'name': test_playlist_name}
http = build_http_mock(HttpMock, content=content)
playlist = Playlist.add(_client=client, member_id=test_member_id, playlist_name=test_playlist_name)
assert playlist.id == test_playlist_id
assert playlist.name == test_playlist_name
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_get_member_playlists(HttpMock):
client = init_client()
test_member_id = get_random_md5()
username = 'testuser'
firstname = 'Test'
lastname = 'User'
email = '[email protected]'
member_xml = ET.fromstring("""<memberaccount id="%(test_member_id)s">
<username>%(username)s</username>
<firstname>%(firstname)s</firstname>
<lastname>%(lastname)s</lastname>
<email>%(email)s</email>
</memberaccount>""" % {'test_member_id': test_member_id,
'username': username,
'firstname': firstname,
'lastname': lastname,
'email': email})
member = Member._from_xml(member_xml, client)
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
content = """<?xml version="1.0" encoding="utf-8"?>
<ResponsePlaylists>
<playlists>
<playlist id="%(id)s" name="%(name)s">
<tracks>
<track tracknumber="001" time="02:50" lengthseconds="170" comment="Make sure
you’re down the front for this fiery Post Punk workout."
composer=""S. Milton, J. Wygens"" publisher="HM"
name="Guerilla Pop" id="17376d36f309f18d" keywords="" lyrics=""
displaytitle="Guerilla Pop" genre="Pop / Rock" tempo="" instrumentation=""
bpm="" mixout="" frequency="44100" bitrate="1411" />
</tracks>
</playlist>
</playlists>
</ResponsePlaylists>""" % {'id': test_playlist_id,
'name': test_playlist_name}
http = build_http_mock(HttpMock, content=content)
playlists = member.get_playlists()
assert isinstance(playlists, list)
playlist = playlists[0]
assert playlist.id == test_playlist_id
assert playlist.name == test_playlist_name
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_get_featured_playlists(HttpMock):
client = init_client()
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
content = """<?xml version="1.0" encoding="utf-8"?>
<responsefeaturedplaylists>
<playlists>
<playlist id="%(id)s" name="%(name)s">
<tracks>
<track tracknumber="001" time="01:07" lengthseconds="67" comment="If a certain
animated film studio were to remake The Andy Griffith Show as a digital short, we'd
nominate this for the theme. Warm, chunky, a little slow on the uptake... a.k.a. the
anti-lemonade song. Ending starts @ 1:08. Lite Mix, without main rhythm acoustic
guitars." composer="D. Holter/K. White" publisher="TLL UNDERscore Nashcap (ASCAP)"
name="Pencilneck Strut" id="902dea1d377473df" keywords="Cute, Goofy, Lighthearted,
Happy, Comical, Twang, Rural, Fun, Mischievous, Celebration, Campy, Childlike,
Cheerful, Simple, Quirky, Swampy, Playful" lyrics="" displaytitle="Pencilneck Strut"
genre="Country" tempo="Medium" instrumentation="Acoustic Guitar, Banjo, Percussion"
bpm="130" mixout="Alt2" frequency="2650" bitrate="24" />
</tracks>
</playlist>
</playlists>
</responsefeaturedplaylists>
""" % {'id': test_playlist_id,
'name': test_playlist_name}
http = build_http_mock(HttpMock, content=content)
playlists = Playlist.query.get_featured_playlists(client)
assert isinstance(playlists, list)
playlist = playlists[0]
assert playlist.id == test_playlist_id
assert playlist.name == test_playlist_name
@raises(harvestmedia.api.exceptions.MissingParameter)
def test_add_playlist_client_missing():
playlist = Playlist.add()
@raises(harvestmedia.api.exceptions.MissingParameter)
def test_add_playlist_member_id_missing():
client = init_client()
playlist = Playlist.add(_client=client)
@raises(harvestmedia.api.exceptions.MissingParameter)
def test_add_playlist_name_missing():
client = init_client()
test_member_id = 123
playlist = Playlist.add(_client=client, member_id=test_member_id)
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_add_track(HttpMock):
client = init_client()
test_playlist_id = get_random_md5()
track_id = get_random_md5()
content = """<?xml version="1.0" encoding="utf-8"?>
<responsecode xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<code>OK</code>
</responsecode>"""
http = build_http_mock(HttpMock, content=content)
playlist = Playlist(_client=client)
playlist.member_id = 123
playlist.id = test_playlist_id
playlist.add_track(track_id)
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_remove(HttpMock):
client = init_client()
test_member_id = get_random_md5()
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
test_track_id = get_random_md5()
return_values = [
(200, """<?xml version="1.0" encoding="utf-8"?>
<ResponsePlaylists>
<playlists>
<playlist id="%(test_playlist_id)s" name="%(test_playlist_name)s">
<tracks>
<track tracknumber="001" time="02:50" lengthseconds="170" comment="Make sure you’re
down the front for this fiery Post Punk workout."
composer=""S. Milton, J. Wygens"" publisher="HM"
name="Guerilla Pop" id="%(test_track_id)s" keywords=""
lyrics="" displaytitle="Guerilla Pop" genre="Pop / Rock" tempo=""
instrumentation="" bpm="" mixout="" frequency="44100" bitrate="1411" />
</tracks>
</playlist>
</playlists>
</ResponsePlaylists>""" % locals()),
(200, """<?xml version="1.0" encoding="utf-8"?>
<responsecode xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<code>OK</code>
</responsecode>""",),
]
http = build_http_mock(HttpMock, responses=return_values)
member = Member(_client=client)
member.id = test_member_id
playlists = member.get_playlists()
playlists[0].remove()
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_remove_track(HttpMock):
client = init_client()
album_id = '1c5f47572d9152f3'
now = datetime.datetime.today().isoformat()
test_member_id = get_random_md5()
now = datetime.datetime.today().isoformat()
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
track_id = '17376d36f309f18d'
return_values = [
(200, """<?xml version="1.0" encoding="utf-8"?>
<ResponsePlaylists>
<playlists>
<playlist id="%(test_playlist_id)s" name="%(test_playlist_name)s">
<tracks>
<track tracknumber="001" time="02:50" lengthseconds="170" comment="Make
sure you’re down the front for this fiery Post Punk workout."
composer=""S. Milton, J. Wygens"" publisher="HM"
name="Guerilla Pop" id="%(track_id)s" keywords="" lyrics=""
displaytitle="Guerilla Pop" genre="Pop / Rock" tempo=""
instrumentation="" bpm="" mixout="" frequency="44100" bitrate="1411" />
</tracks>
</playlist>
</playlists>
</ResponsePlaylists>""" % locals(),),
(200, """<?xml version="1.0" encoding="utf-8"?>
<responsecode xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<code>OK</code>
</responsecode>"""),
]
http = build_http_mock(HttpMock, responses=return_values)
member = Member(_client=client)
member.id = test_member_id
playlists = member.get_playlists()
playlists[0].remove_track(track_id)
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_playlist_update(HttpMock):
client = init_client()
test_member_id = get_random_md5()
test_playlist_id = get_random_md5()
test_playlist_name = 'test playlist'
test_playlist_update_name = 'test playlist update'
track_id = '17376d36f309f18d'
return_values = [
(200, """<?xml version="1.0" encoding="utf-8"?>
<ResponsePlaylists>
<playlists>
<playlist id="%(test_playlist_id)s" name="%(test_playlist_name)s">
<tracks>
<track tracknumber="001" time="02:50" lengthseconds="170" comment="Make
sure you’re down the front for this fiery Post Punk workout."
composer=""S. Milton, J. Wygens"" publisher="HM"
name="Guerilla Pop" id="%(track_id)s" keywords="" lyrics=""
displaytitle="Guerilla Pop" genre="Pop / Rock" tempo=""
instrumentation="" bpm="" mixout="" frequency="44100" bitrate="1411" />
</tracks>
</playlist>
</playlists>
</ResponsePlaylists>""" % locals()),
(200, """<?xml version="1.0" encoding="utf-8"?>
<responsecode xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<code>OK</code>
</responsecode>"""),
]
http = build_http_mock(HttpMock, responses=return_values)
member = Member(_client=client)
member.id = test_member_id
playlists = member.get_playlists()
playlist = playlists[0]
playlist.name = test_playlist_update_name
playlist.update()
assert playlist.name == test_playlist_update_name
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_get_download_url(HttpMock):
client = init_client()
member_id = 'bacba2b288328238bcbac'
track_format = 'mp3'
playlist_id = "908098a8a0ba8b065"
playlist_xml = ET.fromstring("""
<playlist id="%(playlist_id)s" name="sample playlist">
<tracks>
<track tracknumber="1" time="02:50" lengthseconds="170"
comment="Track Comment" composer="JJ Jayjay"
publisher="PP Peepee" name="Epic Track" albumid="1abcbacbac33" id="11bacbcbabcb3b2823"
displaytitle="Epic Track" genre="Pop / Rock"
bpm="100" mixout="FULL" frequency="44100" bitrate="1411"
dateingested="2008-05-15 06:08:18"/>
</tracks>
</playlist>""" % locals())
playlist = Playlist._from_xml(playlist_xml, client)
download_url = playlist.get_download_url(track_format, member_id)
download_url_template = client.config.playlistdownload_url
format_identifier = client.config.get_format_identifier(track_format)
expected_url = download_url_template.replace('{memberaccountid}', member_id).\
replace('{id}', playlist_id).\
replace('{trackformat}', format_identifier)
assert download_url == expected_url, 'url: %s != expected: %s' % (download_url, expected_url)
@raises(harvestmedia.api.exceptions.MissingParameter)
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_get_download_url_missing_format(HttpMock):
client = init_client()
member_id = 'bacba2b288328238bcbac'
track_format = 'BAD-FORMAT'
playlist_id = "908098a8a0ba8b065"
playlist_xml = ET.fromstring("""
<playlist id="%(playlist_id)s" name="sample playlist">
<tracks>
<track tracknumber="1" time="02:50" lengthseconds="170"
comment="Track Comment" composer="JJ Jayjay"
publisher="PP Peepee" name="Epic Track" albumid="1abcbacbac33" id="11bacbcbabcb3b2823"
displaytitle="Epic Track" genre="Pop / Rock"
bpm="100" mixout="FULL" frequency="44100" bitrate="1411"
dateingested="2008-05-15 06:08:18"/>
</tracks>
</playlist>""" % locals())
playlist = Playlist._from_xml(playlist_xml, client)
download_url = playlist.get_download_url(track_format, member_id)
@mock.patch('harvestmedia.api.client.httplib2.Http')
def test_get_playlist_art_url(HttpMock):
client = init_client()
playlist_id = '112358'
playlist_xml = ET.fromstring(textwrap.dedent("""<playlist name="EFF!" id="%s"
createddate="2012-04-17 06:24:45"
trackcount="0" />""" % (playlist_id)))
playlist_art_url = 'http://download.harvestmedia.net/wsplaylistart/8185d768cd8fcaa7/{id}/{width}/{height}'
expiry = datetime.datetime.now()
test_token = get_random_md5()
return_values = [
(200, """<?xml version="1.0" encoding="utf-8"?>
<responseservicetoken>
<token value="%s" expiry="%s"/>
</responseservicetoken>""" % \
(test_token, expiry.strftime("%Y-%m-%dT%H:%M:%S"))),
(200, """<?xml version="1.0" encoding="utf-8"?>
<responseserviceinfo>
<asseturl
waveform="http://asset.harvestmedia.net/waveform/8185d768cd8fcaa7/{id}/{width}/{height}"
trackstream="http://asset.harvestmedia.net/trackstream/8185d768cd8fcaa7/{id}"
trackdownload=" http://asset.harvestmedia.net/trackdownload/8185d768cd8fcaa7/{id}/{trackformat}"
playlistart="%(playlist_art_url)s" />
<trackformats>
<trackformat identifier="8185d768cd8fcaa7" extension="mp3" bitrate="320" samplerate="48" samplesize="16" />
<trackformat identifier="768cd8fcaa8185d7" extension="wav" bitrate="1536" samplerate="48" samplesize="16" />
<trackformat identifier="7jsi8fcaa818df57" extension="aif" bitrate="1536" samplerate="48" samplesize="16" />
</trackformats>
</responseserviceinfo>""" % locals()),
]
http = build_http_mock(HttpMock, responses=return_values)
width = 200
height = 300
playlist = Playlist._from_xml(playlist_xml, client)
cover_art_url = playlist.get_cover_url(width, height)
expected_url = playlist_art_url.replace('{id}', playlist_id).replace('{width}', str(width)).replace('{height}', str(height))
assert cover_art_url == expected_url
| mit | -4,184,953,822,004,371,000 | 44.784416 | 133 | 0.540932 | false |
alazanman/py_epg_tests | fixture/channel_db.py | 1 | 2699 | # -*- coding: utf-8 -*-
from model.channel import Channel
class DbChannelHelper:
def __init__(self, db):
self.db = db
def get_channels(self):
channels = []
cursor = self.db.connection.cursor()
try:
cursor.execute(
"select id, name, service_id, epg_name, epg_channel.offset, provider, icon, allow_record, narrow_banner, wide_banner from epg_channel")
for row in cursor:
(id, name, service_id, epg_name, offset, provider, icon, allow_record, narrow_banner, wide_banner) = row
channels.append(
Channel(id=str(id), name=name, service_id=str(service_id), epg_name=epg_name, offset=str(offset),
provider=provider, languages=self.get_channel_languages(id), allow_record=bool(allow_record),
icon={"server_file": icon, "user_file": None}, narrow_banner={"server_file": narrow_banner, "user_file": None},
wide_banner={"server_file": wide_banner, "user_file": None}))
# provider=provider, languages=self.get_channel_languages(id), allow_record=bool(allow_record),
# icon={"server_file": self.full_path_if_exists(icon), "user_file": None}, narrow_banner=self.full_path_if_exists(narrow_banner),
# wide_banner=self.full_path_if_exists(wide_banner)))
self.db.connection.commit()
finally:
cursor.close()
# self.connection.close()
# print channels
return channels
def get_channel_languages(self, channel_id):
languages = []
cursor = self.db.connection.cursor()
try:
cursor.execute(
"select language_id from epg_channel_languages where channel_id=" + str(channel_id))
for row in cursor:
languages.append(str(row[0]))
self.db.connection.commit()
finally:
cursor.close()
return languages
def count(self):
cursor = self.db.connection.cursor()
try:
cursor.execute("select count(*) from epg_channel")
count = cursor.fetchone()[0]
self.db.connection.commit()
finally:
cursor.close()
# self.connection.close()
# print count, type(count), int(count), type(count)
return int(count)
# def full_path_if_exists(self, relative_path):
# # base_media_url = nose_config.load_config()['web']['baseUrl'] + "media/"
# global base_media_url
# if relative_path:
# return base_media_url + relative_path | apache-2.0 | -2,066,353,362,194,304,800 | 41.857143 | 157 | 0.56206 | false |
mnahm5/django-estore | Lib/site-packages/awscli/customizations/datapipeline/translator.py | 1 | 7212 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from awscli.clidriver import CLIOperationCaller
class PipelineDefinitionError(Exception):
def __init__(self, msg, definition):
full_msg = (
"Error in pipeline definition: %s\n" % msg)
super(PipelineDefinitionError, self).__init__(full_msg)
self.msg = msg
self.definition = definition
# Method to convert the dictionary input to a string
# This is required for escaping
def dict_to_string(dictionary, indent=2):
return json.dumps(dictionary, indent=indent)
# Method to parse the arguments to get the region value
def get_region(session, parsed_globals):
region = parsed_globals.region
if region is None:
region = session.get_config_variable('region')
return region
# Method to display the response for a particular CLI operation
def display_response(session, operation_name, result, parsed_globals):
cli_operation_caller = CLIOperationCaller(session)
# Calling a private method. Should be changed after the functionality
# is moved outside CliOperationCaller.
cli_operation_caller._display_response(
operation_name, result, parsed_globals)
def api_to_definition(definition):
# When we're translating from api_response -> definition
# we have to be careful *not* to mutate the existing
# response as other code might need to the original
# api_response.
if 'pipelineObjects' in definition:
definition['objects'] = _api_to_objects_definition(
definition.pop('pipelineObjects'))
if 'parameterObjects' in definition:
definition['parameters'] = _api_to_parameters_definition(
definition.pop('parameterObjects'))
if 'parameterValues' in definition:
definition['values'] = _api_to_values_definition(
definition.pop('parameterValues'))
return definition
def definition_to_api_objects(definition):
if 'objects' not in definition:
raise PipelineDefinitionError('Missing "objects" key', definition)
api_elements = []
# To convert to the structure expected by the service,
# we convert the existing structure to a list of dictionaries.
# Each dictionary has a 'fields', 'id', and 'name' key.
for element in definition['objects']:
try:
element_id = element.pop('id')
except KeyError:
raise PipelineDefinitionError('Missing "id" key of element: %s' %
json.dumps(element), definition)
api_object = {'id': element_id}
# If a name is provided, then we use that for the name,
# otherwise the id is used for the name.
name = element.pop('name', element_id)
api_object['name'] = name
# Now we need the field list. Each element in the field list is a dict
# with a 'key', 'stringValue'|'refValue'
fields = []
for key, value in sorted(element.items()):
fields.extend(_parse_each_field(key, value))
api_object['fields'] = fields
api_elements.append(api_object)
return api_elements
def definition_to_api_parameters(definition):
if 'parameters' not in definition:
return None
parameter_objects = []
for element in definition['parameters']:
try:
parameter_id = element.pop('id')
except KeyError:
raise PipelineDefinitionError('Missing "id" key of parameter: %s' %
json.dumps(element), definition)
parameter_object = {'id': parameter_id}
# Now we need the attribute list. Each element in the attribute list
# is a dict with a 'key', 'stringValue'
attributes = []
for key, value in sorted(element.items()):
attributes.extend(_parse_each_field(key, value))
parameter_object['attributes'] = attributes
parameter_objects.append(parameter_object)
return parameter_objects
def definition_to_parameter_values(definition):
if 'values' not in definition:
return None
parameter_values = []
for key in definition['values']:
parameter_values.extend(
_convert_single_parameter_value(key, definition['values'][key]))
return parameter_values
def _parse_each_field(key, value):
values = []
if isinstance(value, list):
for item in value:
values.append(_convert_single_field(key, item))
else:
values.append(_convert_single_field(key, value))
return values
def _convert_single_field(key, value):
field = {'key': key}
if isinstance(value, dict) and list(value.keys()) == ['ref']:
field['refValue'] = value['ref']
else:
field['stringValue'] = value
return field
def _convert_single_parameter_value(key, values):
parameter_values = []
if isinstance(values, list):
for each_value in values:
parameter_value = {'id': key, 'stringValue': each_value}
parameter_values.append(parameter_value)
else:
parameter_value = {'id': key, 'stringValue': values}
parameter_values.append(parameter_value)
return parameter_values
def _api_to_objects_definition(api_response):
pipeline_objects = []
for element in api_response:
current = {
'id': element['id'],
'name': element['name']
}
for field in element['fields']:
key = field['key']
if 'stringValue' in field:
value = field['stringValue']
else:
value = {'ref': field['refValue']}
_add_value(key, value, current)
pipeline_objects.append(current)
return pipeline_objects
def _api_to_parameters_definition(api_response):
parameter_objects = []
for element in api_response:
current = {
'id': element['id']
}
for attribute in element['attributes']:
_add_value(attribute['key'], attribute['stringValue'], current)
parameter_objects.append(current)
return parameter_objects
def _api_to_values_definition(api_response):
pipeline_values = {}
for element in api_response:
_add_value(element['id'], element['stringValue'], pipeline_values)
return pipeline_values
def _add_value(key, value, current_map):
if key not in current_map:
current_map[key] = value
elif isinstance(current_map[key], list):
# Dupe keys result in values aggregating
# into a list.
current_map[key].append(value)
else:
converted_list = [current_map[key], value]
current_map[key] = converted_list
| mit | 1,967,457,875,482,338,000 | 34.527094 | 79 | 0.642263 | false |
Alwnikrotikz/kegbot | pykeg/src/pykeg/core/migrations/0020_add_thermo_summary_table.py | 1 | 14418 | # -*- coding: latin-1 -*-
from south.db import db
from django.db import models
from pykeg.core.models import *
class Migration:
def forwards(self, orm):
# Adding model 'ThermoSummaryLog'
db.create_table('core_thermosummarylog', (
('id', orm['core.thermosummarylog:id']),
('sensor', orm['core.thermosummarylog:sensor']),
('date', orm['core.thermosummarylog:date']),
('period', orm['core.thermosummarylog:period']),
('num_readings', orm['core.thermosummarylog:num_readings']),
('min_temp', orm['core.thermosummarylog:min_temp']),
('max_temp', orm['core.thermosummarylog:max_temp']),
('mean_temp', orm['core.thermosummarylog:mean_temp']),
))
db.send_create_signal('core', ['ThermoSummaryLog'])
def backwards(self, orm):
# Deleting model 'ThermoSummaryLog'
db.delete_table('core_thermosummarylog')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authenticationtoken': {
'Meta': {'unique_together': "(('auth_device', 'token_value'),)"},
'auth_device': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'token_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.bac': {
'bac': ('django.db.models.fields.FloatField', [], {}),
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rectime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.beerstyle': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.beertype': {
'abv': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BeerStyle']"})
},
'core.brewer': {
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'distribution': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.config': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.drink': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'valid'", 'max_length': '128'}),
'ticks': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.drinkingsessiongroup': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {})
},
'core.keg': {
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'enddate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origcost': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegSize']"}),
'startdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BeerType']"})
},
'core.kegsize': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.kegtap': {
'current_keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tick_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'meter_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ml_per_tick': ('django.db.models.fields.FloatField', [], {'default': '0.45454545454545453'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'temperature_sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']", 'null': 'True', 'blank': 'True'})
},
'core.relaylog': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermolog': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'temp': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermosensor': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nice_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'core.thermosummarylog': {
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_temp': ('django.db.models.fields.FloatField', [], {}),
'mean_temp': ('django.db.models.fields.FloatField', [], {}),
'min_temp': ('django.db.models.fields.FloatField', [], {}),
'num_readings': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period': ('django.db.models.fields.CharField', [], {'default': "'daily'", 'max_length': '64'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"})
},
'core.userdrinkingsession': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.DrinkingSessionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userdrinkingsessionassignment': {
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserDrinkingSession']"})
},
'core.userlabel': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelname': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.userpicture': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userprofile': {
'gender': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.UserLabel']"}),
'mugshot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserPicture']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['core']
| gpl-2.0 | -2,401,296,472,967,906,300 | 66.690141 | 153 | 0.533639 | false |
akuster/yali | yali/gui/runner.py | 1 | 5413 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import sys
import imp
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QTimer
from PyQt4.Qt import QStyleFactory
from PyQt4.Qt import QObject
from PyQt4.Qt import QShortcut
from PyQt4.Qt import Qt
from PyQt4.Qt import QApplication
from PyQt4.Qt import SIGNAL
from PyQt4.Qt import SLOT
from PyQt4.Qt import QKeySequence
from PyQt4.Qt import QTranslator
from PyQt4.Qt import QLocale
from PyQt4.Qt import QLibraryInfo
import yali
import yali.util
import yali.context as ctx
import yali.gui
import yali.gui.YaliWindow
class Runner:
_window = None
_application = None
def __init__(self):
self._application = QApplication(sys.argv)
self._window = None
# Main Window Initialized..
try:
self._window = yali.gui.YaliWindow.Widget()
except yali.Error, msg:
ctx.logger.debug(msg)
sys.exit(1)
self._translator = QTranslator()
self._translator.load("qt_" + QLocale.system().name(), QLibraryInfo.location(QLibraryInfo.TranslationsPath))
ctx.mainScreen = self._window
screens = self._get_screens(ctx.flags.install_type)
self._set_steps(screens)
# These shorcuts for developers :)
prevScreenShortCut = QShortcut(QKeySequence(Qt.SHIFT + Qt.Key_F1), self._window)
nextScreenShortCut = QShortcut(QKeySequence(Qt.SHIFT + Qt.Key_F2), self._window)
QObject.connect(prevScreenShortCut, SIGNAL("activated()"), self._window.slotBack)
QObject.connect(nextScreenShortCut, SIGNAL("activated()"), self._window.slotNext)
# VBox utils
ctx.logger.debug("Starting VirtualBox tools..")
#FIXME:sh /etc/X11/Xsession.d/98-vboxclient.sh
yali.util.run_batch("VBoxClient", ["--autoresize"])
yali.util.run_batch("VBoxClient", ["--clipboard"])
# Cp Reboot, ShutDown
yali.util.run_batch("cp", ["/sbin/reboot", "/tmp/reboot"])
yali.util.run_batch("cp", ["/sbin/shutdown", "/tmp/shutdown"])
# base connections
QObject.connect(self._application, SIGNAL("lastWindowClosed()"),
self._application, SLOT("quit()"))
QObject.connect(self._window, SIGNAL("signalProcessEvents"),
self._application.processEvents)
QObject.connect(self._application.desktop(), SIGNAL("resized(int)"),
self._reinit_screen)
# Font Resize
fontMinusShortCut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_Minus), self._window)
fontPlusShortCut = QShortcut(QKeySequence(Qt.CTRL + Qt.Key_Plus) , self._window)
QObject.connect(fontMinusShortCut, SIGNAL("activated()"), self._window.setFontMinus)
QObject.connect(fontPlusShortCut , SIGNAL("activated()"), self._window.setFontPlus)
def _reinit_screen(self):
QTimer.singleShot(700,self._init_screen)
def _init_screen(self):
# We want it to be a full-screen window
# inside the primary display.
screen = self._application.desktop().screenGeometry()
self._window.resize(screen.size())
self._window.setMaximumSize(screen.size())
self._window.move(screen.topLeft())
self._window.show()
def _get_screens(self, install_type):
screens = []
ctx.logger.info("Install type is %s" % ctx.STEP_TYPE_STRINGS[install_type])
for name in yali.gui.GUI_STEPS[install_type]:
screenClass = None
moduleName = ""
try:
module_name = yali.gui.stepToClass[name]
found = imp.find_module(module_name, yali.gui.__path__)
loaded = imp.load_module(module_name, *found)
screenClass = loaded.__dict__["Widget"]
except ImportError, msg:
ctx.logger.debug(msg)
rc = ctx.interface.messageWindow(_("Error!"),
_("An error occurred when attempting "
"to load an installer interface "
"component.\n\nclassName = %s.Widget") % module_name,
type="custom", customIcon="warning",
customButtons=[_("Exit"), _("Retry")])
if not rc:
sys.exit(1)
else:
screens.append(screenClass)
return screens
def _set_steps(self, screens):
self._window.createWidgets(screens)
self._window.setCurrent(ctx.flags.startup)
def run(self):
# Use default theme;
# if you use different Qt4 theme our works looks ugly :)
self._application.setStyle(QStyleFactory.create('Plastique'))
self._init_screen()
self._application.installTranslator(self._translator)
# For testing..
# self._window.resize(QSize(800,600))
# Run run run
self._application.exec_()
| gpl-2.0 | -2,483,951,368,154,206,700 | 35.560811 | 116 | 0.608021 | false |
bflaven/BlogArticlesExamples | extending_streamlit_usage/001_nlp_spacy_python_realp/002c_nlp_spacy_python.py | 1 | 1651 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/extending_streamlit_usage/001_nlp_spacy_python_realp/
[file]
python 002c_nlp_spacy_python.py
# source
Source: https://realpython.com/natural-language-processing-spacy-python/
# required
pip install spacy-langdetect
# validation
python -m spacy validate
"""
from spacy_langdetect import LanguageDetector
from spacy.language import Language
import spacy
import spacy_streamlit
# Add the decorator `@Language.component` (for function components) or `@Language.factory` (for class components / factories) to your custom component and assign it a name, e.g. `@Language.component('your_name')`. You can then run `nlp.add_pipe('your_name')` to add it to the pipeline.
def set_custom_boundaries(doc):
# Adds support to use ` ` as the delimiter for sentence detection
for token in doc[:-1]:
if token.text == ' ':
doc[token.i+1].is_sent_start = True
return doc
ellipsis_text = ('Gus, can you, never mind, I forgot'
' what I was saying. So, do you think'
' we should ')
custom_nlp = spacy.load('en_core_web_sm')
Language.component("the_set_custom_boundaries", func=set_custom_boundaries)
custom_nlp.add_pipe("the_set_custom_boundaries", last=True)
custom_ellipsis_doc = custom_nlp(ellipsis_text)
custom_ellipsis_sentences = list(custom_ellipsis_doc.sents)
print("\n --- result_5")
for sentence in custom_ellipsis_sentences:
print(sentence)
# Gus, can you, never mind, I forgot what I was saying.
# So, do you think we should
| mit | 1,533,434,948,254,148,000 | 27.964912 | 285 | 0.690491 | false |
archesproject/arches | arches/app/search/base_index.py | 1 | 7123 | import pyprind
from datetime import datetime
from django.utils.translation import ugettext as _
from arches.app.models import models
from arches.app.models.resource import Resource
from arches.app.models.system_settings import settings
from arches.app.utils import import_class_from_string
from arches.app.search.search_engine_factory import SearchEngineFactory
from arches.app.search.elasticsearch_dsl_builder import Query, Term, Ids
class BaseIndex(object):
def __init__(self, index_name=None):
if index_name is None or index_name == "":
raise SearchIndexError("Index name is not defined")
self.se = SearchEngineFactory().create()
self.index_metadata = None
self.index_name = index_name
def prepare_index(self):
"""
Defines the Elastic Search mapping and settings for an index
Arguments:
None
Keyword Arguments:
None
Return: None
"""
if self.index_metadata is not None:
self.se.create_index(index=self.index_name, body=self.index_metadata)
else:
raise SearchIndexError("No index metadata defined.")
def get_documents_to_index(self, resourceinstance, tiles):
"""
Gets a document to index into Elastic Search
Arguments:
resourceinstance -- resource instance object
tiles -- list of tiles that make up the resource instance
Keyword Arguments:
None
Return: tuple of (document, document id)
"""
raise NotImplementedError
def index_document(self, document=None, id=None):
"""
Indexes a document into Elastic Search
Arguments:
None
Keyword Arguments:
document -- the document to index
id -- the id of the document
Return: None
"""
if document is not None and id is not None:
self.se.index_data(index=self.index_name, body=document, id=id)
def index_resources(self, resources=None, batch_size=settings.BULK_IMPORT_BATCH_SIZE, quiet=False):
"""
Indexes a list of resources in bulk to Elastic Search
Keyword Arguments:
resources -- the list of resource instances to index
batch_size -- the number of records to index as a group, the larger the number to more memory required
quiet -- Silences the status bar output during certain operations, use in celery operations for example
Return: None
"""
start = datetime.now()
q = Query(se=self.se)
self.se.refresh(index=self.index_name)
count_before = self.se.count(index=self.index_name, body=q.dsl)
result_summary = {"database": len(resources), "indexed": 0}
if quiet is False:
bar = pyprind.ProgBar(len(resources), bar_char="█") if len(resources) > 1 else None
with self.se.BulkIndexer(batch_size=batch_size, refresh=True) as indexer:
for resource in resources:
if quiet is False and bar is not None:
bar.update(item_id=resource)
tiles = list(models.TileModel.objects.filter(resourceinstance=resource))
document, doc_id = self.get_documents_to_index(resource, tiles)
if document is not None and id is not None:
indexer.add(index=self.index_name, id=doc_id, data=document)
self.se.refresh(index=self.index_name)
result_summary["indexed"] = self.se.count(index=self.index_name, body=q.dsl) - count_before
status = "Passed" if result_summary["database"] == result_summary["indexed"] else "Failed"
print(f"Custom Index - {settings.ELASTICSEARCH_PREFIX}_{self.index_name}")
print(
f" Status: {status}, In Database: {result_summary['database']}, Indexed: {result_summary['indexed']}, Took: {(datetime.now() - start).seconds} seconds"
)
def delete_resources(self, resources=None):
"""
Deletes documents from an index based on the passed in list of resources
Delete by query, so this is a single operation
Keyword Arguments:
resources -- a single resource instance or a list of resource instances
"""
q = Query(se=self.se)
if not isinstance(resources, list):
resourcelist = [resources]
else:
resourcelist = resources
list_of_ids_to_delete = []
for resource in resourcelist:
list_of_ids_to_delete.append(resource.pk)
ids_query = Ids(ids=list_of_ids_to_delete)
q.add_query(ids_query)
q.delete(index=self.index_name)
def delete_index(self):
"""
Deletes this index from Elastic Search
Arguments:
None
Keyword Arguments:
None
Return: None
"""
self.se.delete_index(index=self.index_name)
def reindex(self, graphids=None, clear_index=True, batch_size=settings.BULK_IMPORT_BATCH_SIZE, quiet=False):
"""
Reindexes the index. By default this does nothing, it needs to be implemented in a subclass.
By default you can pass in a list of graph ids to trigger the reindex. This will loop through all resource instances of each graph type.
Example subclass command:
def reindex(self, clear_index=True):
PARCEL_GRAPHID = "e3c35dca-5e72-11ea-a2d3-dca90488358a"
super(CustomIndexName, self).reindex(graphids=[PARCEL_GRAPHID], clear_index=clear_index)
Keyword Arguments:
graphids -- list of graphs ids to trigger the reindex on, will get all resource instances of each graph id supplied
clear_index -- True(default) to clear all documents out of the index before reindexing begins
batch_size -- the number of records to index as a group, the larger the number to more memory required
Return: None
"""
if graphids is not None:
if clear_index:
self.delete_index()
self.prepare_index()
for graphid in graphids:
resources = Resource.objects.filter(graph_id=graphid)
self.index_resources(resources=resources, batch_size=batch_size, quiet=quiet)
else:
raise NotImplementedError
def get_index(name):
for index in settings.ELASTICSEARCH_CUSTOM_INDEXES:
if index["name"] == name:
return import_class_from_string(index["module"])(name)
raise SearchIndexNotDefinedError(name=name)
class SearchIndexError(Exception):
def __init__(self, message, code=None):
self.title = _("Search Index Error:")
self.message = message
self.code = code
def __str__(self):
return repr(self.message)
class SearchIndexNotDefinedError(Exception):
def __init__(self, name=None):
self.title = _("Search Index Not Defined Error:")
self.message = _('The index "%s" is not defined in settings.ELASTICSEARCH_CUSTOM_INDEXES' % name)
def __str__(self):
return repr(self.message)
| agpl-3.0 | 8,049,778,892,263,966,000 | 35.331633 | 166 | 0.63404 | false |
itnihao/thriftpy | thriftpy/protocol/binary.py | 1 | 10025 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import struct
from ..thrift import TType
from .exc import TProtocolException
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def pack_i8(byte):
return struct.pack("!b", byte)
def pack_i16(i16):
return struct.pack("!h", i16)
def pack_i32(i32):
return struct.pack("!i", i32)
def pack_i64(i64):
return struct.pack("!q", i64)
def pack_double(dub):
return struct.pack("!d", dub)
def pack_string(string):
return struct.pack("!i%ds" % len(string), len(string), string)
def unpack_i8(buf):
return struct.unpack("!b", buf)[0]
def unpack_i16(buf):
return struct.unpack("!h", buf)[0]
def unpack_i32(buf):
return struct.unpack("!i", buf)[0]
def unpack_i64(buf):
return struct.unpack("!q", buf)[0]
def unpack_double(buf):
return struct.unpack("!d", buf)[0]
def write_message_begin(outbuf, name, ttype, seqid, strict=True):
if strict:
outbuf.write(pack_i32(VERSION_1 | ttype))
outbuf.write(pack_string(name.encode('utf-8')))
else:
outbuf.write(pack_string(name.encode('utf-8')))
outbuf.write(pack_i8(ttype))
outbuf.write(pack_i32(seqid))
def write_field_begin(outbuf, ttype, fid):
outbuf.write(pack_i8(ttype) + pack_i16(fid))
def write_field_stop(outbuf):
outbuf.write(pack_i8(TType.STOP))
def write_list_begin(outbuf, etype, size):
outbuf.write(pack_i8(etype) + pack_i32(size))
def write_map_begin(outbuf, ktype, vtype, size):
outbuf.write(pack_i8(ktype) + pack_i8(vtype) + pack_i32(size))
def write_val(outbuf, ttype, val, spec=None):
if ttype == TType.BOOL:
if val:
outbuf.write(pack_i8(1))
else:
outbuf.write(pack_i8(0))
elif ttype == TType.BYTE:
outbuf.write(pack_i8(val))
elif ttype == TType.I16:
outbuf.write(pack_i16(val))
elif ttype == TType.I32:
outbuf.write(pack_i32(val))
elif ttype == TType.I64:
outbuf.write(pack_i64(val))
elif ttype == TType.DOUBLE:
outbuf.write(pack_double(val))
elif ttype == TType.STRING:
if not isinstance(val, bytes):
val = val.encode('utf-8')
outbuf.write(pack_string(val))
elif ttype == TType.SET or ttype == TType.LIST:
if isinstance(spec, tuple):
e_type, t_spec = spec[0], spec[1]
else:
e_type, t_spec = spec, None
val_len = len(val)
write_list_begin(outbuf, e_type, val_len)
for e_val in val:
write_val(outbuf, e_type, e_val, t_spec)
elif ttype == TType.MAP:
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
write_map_begin(outbuf, k_type, v_type, len(val))
for k in iter(val):
write_val(outbuf, k_type, k, k_spec)
write_val(outbuf, v_type, val[k], v_spec)
elif ttype == TType.STRUCT:
for fid in iter(val.thrift_spec):
f_spec = val.thrift_spec[fid]
if len(f_spec) == 3:
f_type, f_name, f_req = f_spec
f_container_spec = None
else:
f_type, f_name, f_container_spec, f_req = f_spec
v = getattr(val, f_name)
if v is None:
continue
write_field_begin(outbuf, f_type, fid)
write_val(outbuf, f_type, v, f_container_spec)
write_field_stop(outbuf)
def read_message_begin(inbuf, strict=True):
sz = unpack_i32(inbuf.read(4))
if sz < 0:
version = sz & VERSION_MASK
if version != VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in read_message_begin: %d' % (sz))
name_sz = unpack_i32(inbuf.read(4))
name = inbuf.read(name_sz).decode('utf-8')
type_ = sz & TYPE_MASK
else:
if strict:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = inbuf.read(sz).decode('utf-8')
type_ = unpack_i8(inbuf.read(1))
seqid = unpack_i32(inbuf.read(4))
return name, type_, seqid
def read_field_begin(inbuf):
f_type = unpack_i8(inbuf.read(1))
if f_type == TType.STOP:
return f_type, 0
return f_type, unpack_i16(inbuf.read(2))
def read_list_begin(inbuf):
e_type = unpack_i8(inbuf.read(1))
sz = unpack_i32(inbuf.read(4))
return e_type, sz
def read_map_begin(inbuf):
k_type, v_type = unpack_i8(inbuf.read(1)), unpack_i8(inbuf.read(1))
sz = unpack_i32(inbuf.read(4))
return k_type, v_type, sz
def read_val(inbuf, ttype, spec=None):
if ttype == TType.BOOL:
return bool(unpack_i8(inbuf.read(1)))
elif ttype == TType.BYTE:
return unpack_i8(inbuf.read(1))
elif ttype == TType.I16:
return unpack_i16(inbuf.read(2))
elif ttype == TType.I32:
return unpack_i32(inbuf.read(4))
elif ttype == TType.I64:
return unpack_i64(inbuf.read(8))
elif ttype == TType.DOUBLE:
return unpack_double(inbuf.read(8))
elif ttype == TType.STRING:
sz = unpack_i32(inbuf.read(4))
byte_payload = inbuf.read(sz)
# Since we cannot tell if we're getting STRING or BINARY, try both
try:
return byte_payload.decode('utf-8')
except UnicodeDecodeError:
return byte_payload
elif ttype == TType.SET or ttype == TType.LIST:
if isinstance(spec, tuple):
v_type, v_spec = spec[0], spec[1]
else:
v_type, v_spec = spec, None
result = []
r_type, sz = read_list_begin(inbuf)
# the v_type is useless here since we already get it from spec
if r_type != v_type:
raise Exception("Message Corrupt")
for i in range(sz):
result.append(read_val(inbuf, v_type, v_spec))
return result
elif ttype == TType.MAP:
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
result = {}
sk_type, sv_type, sz = read_map_begin(inbuf)
if sk_type != k_type or sv_type != v_type:
raise Exception("Message Corrupt")
for i in range(sz):
k_val = read_val(inbuf, k_type, k_spec)
v_val = read_val(inbuf, v_type, v_spec)
result[k_val] = v_val
return result
elif ttype == TType.STRUCT:
obj = spec()
read_struct(inbuf, obj)
return obj
def read_struct(inbuf, obj):
# The max loop count equals field count + a final stop byte.
for i in range(len(obj.thrift_spec) + 1):
f_type, fid = read_field_begin(inbuf)
if f_type == TType.STOP:
break
if fid not in obj.thrift_spec:
skip(inbuf, f_type)
continue
if len(obj.thrift_spec[fid]) == 3:
sf_type, f_name, f_req = obj.thrift_spec[fid]
f_container_spec = None
else:
sf_type, f_name, f_container_spec, f_req = obj.thrift_spec[fid]
# it really should equal here. but since we already wasted
# space storing the duplicate info, let's check it.
if f_type != sf_type:
raise Exception("Message Corrupt")
setattr(obj, f_name, read_val(inbuf, f_type, f_container_spec))
def skip(inbuf, ftype):
if ftype == TType.BOOL or ftype == TType.BYTE:
inbuf.read(1)
elif ftype == TType.I16:
inbuf.read(2)
elif ftype == TType.I32:
inbuf.read(4)
elif ftype == TType.I64:
inbuf.read(8)
elif ftype == TType.DOUBLE:
inbuf.read(8)
elif ftype == TType.STRING:
inbuf.read(unpack_i32(inbuf.read(4)))
elif ftype == TType.SET or ftype == TType.LIST:
v_type, sz = read_list_begin(inbuf)
for i in range(sz):
skip(inbuf, v_type)
elif ftype == TType.MAP:
k_type, v_type, sz = read_map_begin(inbuf)
for i in range(sz):
skip(inbuf, k_type)
skip(inbuf, v_type)
elif ftype == TType.STRUCT:
while True:
f_type, fid = read_field_begin(inbuf)
if f_type == TType.STOP:
break
skip(inbuf, f_type)
class TBinaryProtocol(object):
"""Binary implementation of the Thrift protocol driver."""
def __init__(self, trans, strict_read=True, strict_write=True):
self.trans = trans
self.strict_read = strict_read
self.strict_write = strict_write
def skip(self, ttype):
skip(self.trans, ttype)
def read_message_begin(self):
api, ttype, seqid = read_message_begin(
self.trans, strict=self.strict_read)
return api, ttype, seqid
def read_message_end(self):
pass
def write_message_begin(self, name, ttype, seqid):
write_message_begin(self.trans, name, ttype, seqid,
strict=self.strict_write)
def write_message_end(self):
pass
def read_struct(self, obj):
return read_struct(self.trans, obj)
def write_struct(self, obj):
write_val(self.trans, TType.STRUCT, obj)
class TBinaryProtocolFactory(object):
def __init__(self, strict_read=True, strict_write=True):
self.strict_read = strict_read
self.strict_write = strict_write
def get_protocol(self, trans):
return TBinaryProtocol(trans, self.strict_read, self.strict_write)
| mit | -7,436,702,996,779,655,000 | 25.038961 | 75 | 0.569476 | false |
CirrusMio/ShoreWheel | db/seedDB.py | 1 | 1185 | from app import db
from createDB import Person, Chore
from random import randint
from interactDB import rotate, multiSelect
from datetime import date
def createPairs():
rotate()
multiSelect()
def seed():
if(len(Person.query.all())!=0):
return
#start the seeding
people = [
Person("Mike Dillon", "Mike: Original Flavor"),
Person("Chase Southard", "Chase Original"),
Person("Chase James", "Nu Chase"),
Person("Tanzi Merritt", "Tanzi"),
Person("Sarah Vessels", "Sarah"),
Person("Todd Willey", "Todd"),
Person("Asian Steev", "Steev"),
Person("Will Anderson", "Will"),
Person("Nikolai Warner", "Nick"),
Person("Michael New-Guy", "Michael III")
]
chores = [
Chore("Load and Unload Dishwasher", 1, date.today()),
Chore("Take Out Trash and Recycling", 1, date.today()),
Chore("Clean Both Bathrooms", 4, date.today()),
Chore("Mop the Floors", 4, date.today()),
Chore("Cans to Curb and Back", 1, date.today()),
Chore("Clean out Refridgerator", 2, date.today())
]
Person.query.delete()
Chore.query.delete()
db.session.add_all(people)
db.session.add_all(chores)
db.session.commit()
createPairs()
| apache-2.0 | -5,407,113,397,750,673,000 | 26.55814 | 59 | 0.652321 | false |
cloudtools/troposphere | tests/test_efs.py | 1 | 1822 | import unittest
from troposphere import Template, efs
class TestEfsTemplate(unittest.TestCase):
def test_bucket_template(self):
template = Template()
title = "Efs"
efs.FileSystem(title, template)
self.assertIn(title, template.resources)
class TestEfs(unittest.TestCase):
def test_validData(self):
file_system = efs.FileSystem("Efs")
file_system.to_dict()
def test_validateThroughputMode(self):
with self.assertRaises(ValueError):
file_system = efs.FileSystem(
"Efs", ThroughputMode="UndefinedThroughputMode"
)
file_system.to_dict()
file_system = efs.FileSystem("Efs", ThroughputMode=efs.Bursting)
result = file_system.to_dict()
self.assertEqual(result["Type"], "AWS::EFS::FileSystem")
def test_validateProvisionedThroughputInMibps(self):
with self.assertRaises(TypeError):
file_system = efs.FileSystem("Efs", ProvisionedThroughputInMibps="512")
file_system.to_dict()
with self.assertRaises(TypeError):
file_system = efs.FileSystem("Efs", ProvisionedThroughputInMibps=512)
file_system.to_dict()
file_system = efs.FileSystem("Efs", ProvisionedThroughputInMibps=512.0)
result = file_system.to_dict()
self.assertEqual(result["Type"], "AWS::EFS::FileSystem")
def test_validateBackupPolicy(self):
with self.assertRaises(ValueError):
backup_policy = efs.BackupPolicy("backupPolicy", Status="NOTOK")
backup_policy.to_dict()
backup_policy = efs.BackupPolicy("backupPolicy", Status="ENABLED")
result = backup_policy.to_dict()
self.assertEqual(result["Status"], "ENABLED")
if __name__ == "__main__":
unittest.main()
| bsd-2-clause | 241,375,281,587,382,340 | 30.964912 | 83 | 0.642151 | false |
cloudify-cosmo/cloudify-manager | tests/integration_tests/tests/agentless_tests/test_license.py | 1 | 8924 | ########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import pytest
import tempfile
from datetime import datetime, timedelta
import requests
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource
from integration_tests.framework.constants import INSERT_MOCK_LICENSE_QUERY
from cloudify_rest_client.exceptions import (
MissingCloudifyLicense,
ExpiredCloudifyLicense,
CloudifyClientError
)
from integration_tests.tests.utils import run_postgresql_command
pytestmark = pytest.mark.group_premium
LICENSE_ENGINE_URL = 'https://us-central1-omer-tenant.cloudfunctions' \
'.net/LicenseEngineHubSpot'
class TestLicense(AgentlessTestCase):
def setUp(self):
super(TestLicense, self).setUp()
run_postgresql_command(self.env.container_id, "DELETE FROM licenses")
def tearDown(self):
super(TestLicense, self).setUp()
run_postgresql_command(self.env.container_id, "DELETE FROM licenses")
run_postgresql_command(self.env.container_id,
INSERT_MOCK_LICENSE_QUERY)
def test_error_when_no_license_on_manager(self):
"""
Most REST endpoints are blocked when there is no Cloudify license
on the Manager, the `blueprints` endpoint was chosen randomly
for this test.
"""
self.assertRaises(MissingCloudifyLicense,
self.client.blueprints.list)
def test_error_when_no_license_substring_endpoint(self):
"""
Restricted endpoint that partly contains allowed endpoint should not
be allowed. For example: `snapshot-status` is not allowed even
though `status` is allowed
"""
self.assertRaises(MissingCloudifyLicense,
self.client.snapshots.get_status)
def test_no_error_when_using_allowed_endpoints(self):
"""
The following endpoints are allowed even when there is no Cloudify
license on the Manager: tenants, status, license, snapshots, tokens,
maintenance.
"""
self.client.tokens.get()
self.client.tenants.list()
self.client.license.list()
self.client.manager.get_status()
def test_no_error_when_using_get_user(self):
self.client.users.get('admin', _get_data=True)
def test_error_when_using_allowed_endpoint_and_forbidden_method(self):
self.assertRaises(MissingCloudifyLicense,
self.client.users.create,
username='user', password='password', role='default')
def test_upload_valid_paying_license(self):
self._upload_license('test_valid_paying_license.yaml')
self._verify_license(expired=False, trial=False)
self.client.blueprints.list()
def test_upload_valid_trial_license(self):
self._upload_license('test_valid_trial_license.yaml')
self._verify_license(expired=False, trial=True)
self.client.blueprints.list()
def test_error_when_uploading_tampered_trial_license(self):
with self.assertRaisesRegex(CloudifyClientError,
'could not be verified'):
self._upload_license('test_tampered_trial_license.yaml')
def test_error_when_uploading_tampered_paying_license(self):
with self.assertRaisesRegex(CloudifyClientError,
'could not be verified'):
self._upload_license('test_tampered_paying_license.yaml')
def test_error_when_using_expired_trial_license(self):
self._upload_license('test_expired_trial_license.yaml')
self._verify_license(expired=True, trial=True)
with self.assertRaisesRegex(ExpiredCloudifyLicense, 'expired'):
self.client.blueprints.list()
def test_using_expired_paying_license(self):
self._upload_license('test_expired_paying_license.yaml')
self._verify_license(expired=True, trial=False)
self.client.blueprints.list()
def test_upload_two_licenses(self):
"""
There can only be one Cloudify license on the Manager, so each time
a user uploads a license it runs over the old license.
"""
self._upload_license('test_expired_paying_license.yaml')
self._verify_license(expired=True, trial=False)
self._upload_license('test_valid_trial_license.yaml')
self._verify_license(expired=False, trial=True)
def test_upload_tampered_license_after_valid_license(self):
"""
- Upload a valid Cloudify license
- Try (and fail) to upload a tampered license
- Make sure REST is not blocked
"""
self._upload_license('test_valid_paying_license.yaml')
self._verify_license(expired=False, trial=False)
with self.assertRaisesRegex(CloudifyClientError,
'could not be verified'):
self._upload_license('test_tampered_paying_license.yaml')
self.client.blueprints.list()
def test_valid_for_60_days_license(self):
"""
Instead of a specific expiration date this license is valid for 60 days
from Manager installation date.
"""
self._upload_license('test_60_days_license.yaml')
license = self.client.license.list().items[0]
self._verify_license(expired=False, trial=True)
expected_date = datetime.utcnow() + timedelta(days=60)
expiration_date = datetime.strptime(license['expiration_date'],
'%Y-%m-%dT%H:%M:%S.%fZ')
self.assertLessEqual(expiration_date, expected_date)
self.client.blueprints.list()
def test_license_valid_for_all_version(self):
"""
Cloudify licenses with empty `version` value are valid for all
Cloudify versions.
"""
self._upload_license('test_no_version_license.yaml')
license = self.client.license.list().items[0]
self._verify_license(expired=False, trial=True)
self.assertIsNone(license['cloudify_version'])
self.client.blueprints.list()
def test_error_when_uploading_license_with_old_version(self):
"""
Try (and fail) to upload a Cloudify license that is valid for
Cloudify 4.5.5
"""
with self.assertRaisesRegex(CloudifyClientError, 'versions'):
self._upload_license('test_version_4_5_5_license.yaml')
def test_license_with_no_expiration_date(self):
"""
Cloudify licenses with empty `expiration_date` value are valid
for good.
"""
self._upload_license('test_no_expiration_date_license.yaml')
license = self.client.license.list().items[0]
self._verify_license(expired=False, trial=True)
self.assertIsNone(license['expiration_date'])
self.client.blueprints.list()
def test_gcp_license_engine(self):
"""
Send request to the GCP license engine and make sure the license
received is indeed valid.
"""
json_data = {'vid': 5464472,
'properties':
{'email': {'value': '[email protected]'}}}
response = requests.post(url=LICENSE_ENGINE_URL, json=json_data)
with tempfile.NamedTemporaryFile(mode='w') as license_file:
license_file.write(response.text)
license_file.flush()
self.client.license.upload(license_file.name)
self._verify_license(expired=False, trial=True)
license = self.client.license.list().items[0]
customer_id = self._get_customer_id(json_data)
self.assertEqual(license['customer_id'], customer_id)
self.client.blueprints.list()
@staticmethod
def _get_customer_id(json_data):
vid = str(json_data['vid'])
email = json_data['properties']['email']['value']
domain = email.split('@')[1] + '-'
return 'TRL-' + domain + vid
def _upload_license(self, license):
license_path = get_resource('licenses/{0}'.format(license))
self.client.license.upload(license_path)
def _verify_license(self, expired, trial):
license = self.client.license.list().items[0]
self.assertEqual(license['expired'], expired)
self.assertEqual(license['trial'], trial)
| apache-2.0 | -1,961,958,292,138,700,500 | 39.563636 | 79 | 0.647243 | false |
nocarryr/vidhub-control | tests/kv/test_kv_presets.py | 1 | 5322 | import asyncio
import pytest
@pytest.mark.asyncio
async def test_kv_presets(kivy_app, KvEventWaiter):
from vidhubcontrol.backends import DummyBackend
kv_waiter = KvEventWaiter()
config = kivy_app.vidhub_config
vidhub = await DummyBackend.create_async(device_id='dummy1', device_name='Dummy 1')
kv_waiter.bind(kivy_app, 'vidhubs')
await config.add_vidhub(vidhub)
await kv_waiter.wait()
kv_waiter.bind(kivy_app.root, 'active_widget')
kivy_app.selected_device = vidhub
await kv_waiter.wait()
kv_waiter.unbind(kivy_app.root, 'active_widget')
vidhub_widget = kivy_app.root.active_widget.vidhub_widget
input_button_grid = vidhub_widget.input_button_grid
output_button_grid = vidhub_widget.output_button_grid
preset_button_grid = vidhub_widget.preset_button_grid
await kivy_app.wait_for_widget_init(vidhub_widget)
store_btn = None
for w in preset_button_grid.walk():
if w.__class__.__name__ != 'Button':
continue
if w.text == 'Store':
store_btn = w
break
assert store_btn is not None
kv_waiter.bind(preset_button_grid, 'record_enable')
store_btn.dispatch('on_release')
await kv_waiter.wait()
assert preset_button_grid.record_enable is True
assert store_btn.state == 'down'
kv_waiter.unbind(preset_button_grid, 'record_enable')
kv_waiter.bind(preset_button_grid, 'selected_buttons')
xpts1 = [0] * vidhub.num_outputs
await vidhub.set_crosspoints(*((i, v) for i, v in enumerate(xpts1)))
# Store to preset index 0
preset_button_grid.button_widgets[0].dispatch('on_release')
await kv_waiter.wait()
preset1 = vidhub.presets[0]
assert preset_button_grid.record_enable is False
assert len(preset_button_grid.selected_buttons) == 1
assert preset_button_grid.selected_buttons[0] == preset1.index
assert preset_button_grid.button_widgets[preset1.index].text == preset1.name
# Set crosspoints - preset 0 should be inactive
xpts2 = [1] * vidhub.num_outputs
await vidhub.set_crosspoints(*((i, v) for i, v in enumerate(xpts2)))
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 0
# Store to preset index 1
store_btn.dispatch('on_release')
await asyncio.sleep(0)
preset_button_grid.button_widgets[1].dispatch('on_release')
await kv_waiter.wait()
preset2 = vidhub.presets[1]
assert len(preset_button_grid.selected_buttons) == 1
assert preset_button_grid.selected_buttons[0] == preset2.index
assert preset_button_grid.button_widgets[preset2.index].text == preset2.name
# Recall preset index 0
preset_button_grid.button_widgets[0].dispatch('on_release')
await kv_waiter.wait()
# Allow time for all events to dispatch
if len(preset_button_grid.selected_buttons) == 0:
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 1
assert preset_button_grid.selected_buttons[0] == preset1.index
kv_waiter.unbind(preset_button_grid, 'selected_buttons')
await asyncio.sleep(.1)
await kv_waiter.clear()
# Test preset name binding
kv_waiter.bind(preset_button_grid.button_widgets[0], 'text')
preset1.name = 'foo'
await kv_waiter.wait()
assert preset_button_grid.button_widgets[0].text == preset1.name
assert preset_button_grid.button_labels[0] == preset1.name
# Test preset add/store from vidhub
print('test add/store from vidhub')
kv_waiter.unbind(preset_button_grid.button_widgets[0], 'text')
await asyncio.sleep(.1)
assert kv_waiter.empty()
kv_waiter.bind(preset_button_grid, 'selected_buttons')
xpts3 = [2] * vidhub.num_outputs
await vidhub.set_crosspoints(*((i, v) for i, v in enumerate(xpts3)))
print('set xpts3')
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 0
print('storing preset3')
preset3 = await vidhub.store_preset(index=8)
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 1
assert preset_button_grid.selected_buttons[0] == preset3.index
assert preset_button_grid.button_widgets[preset3.index].text == preset3.name
kv_waiter.bind(preset_button_grid, 'button_labels')
print('adding preset4')
preset4 = await vidhub.add_preset()
await kv_waiter.wait()
# Allow the rest of the binding events to propagate
await asyncio.sleep(0)
assert preset4.index not in preset_button_grid.selected_buttons
print('rename preset4')
preset4.name = 'foobarbaz'
await kv_waiter.wait()
btn = preset_button_grid.button_widgets[preset4.index]
assert btn.text == preset_button_grid.button_labels[preset4.index] == preset4.name
assert btn.state == 'normal'
print('store preset4')
await preset4.store()
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 2
assert btn.state == 'down'
assert preset3.index in preset_button_grid.selected_buttons
assert preset4.index in preset_button_grid.selected_buttons
print('resetting crosspoints')
await vidhub.set_crosspoint(0, 0)
await kv_waiter.wait()
if len(preset_button_grid.selected_buttons):
await kv_waiter.wait()
assert len(preset_button_grid.selected_buttons) == 0
| gpl-3.0 | 8,188,140,257,634,002,000 | 31.851852 | 87 | 0.689402 | false |
skdaccess/skdaccess | skdaccess/examples/terminal_groundwater_example.py | 2 | 2474 | #!/usr/bin/env python3
# The MIT License (MIT)
# Copyright (c) 2016,2017 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# import data fetcher and AutoParams
from skdaccess.geo.groundwater import DataFetcher as WDF
from skdaccess.framework.param_class import *
# Create a data fetcher of stations within
# 35 < Latitude < 38, and -119 < Longitude < -118
# in the time period 2007-01-01 to 2016-12-31
fullDF = WDF([AutoParam(35), AutoParam(38), AutoParam(-119), AutoParam(-118)],
'2007-01-01','2016-12-31',cutoff=0.0)
# Access data wrapper
fullDW = fullDF.output()
# Access metadata
meta_data = WDF.getStationMetadata()
# Get an iterator to the data
dataIt = fullDW.getIterator()
# The iterator returns the data label and the data.
label_1, data_1 = next(dataIt)
label_2, data_2 = next(dataIt)
# Try to plot the first two groundwater stations:
try:
import matplotlib.pyplot as plt
plt.figure().set_size_inches(14,4)
plt.ylabel('Median Depth to Water Level')
plt.title(label_1)
plt.plot(data_1['Median Depth to Water']);
plt.figure().set_size_inches(14,4)
plt.ylabel('Median Depth to Water Level')
plt.title(label_2);
plt.plot(data_2['Median Depth to Water'],color='red')
plt.show()
except ImportError as e:
pass
| mit | -3,362,070,544,281,174,500 | 33.361111 | 79 | 0.738884 | false |
atmtools/typhon | typhon/math/array.py | 1 | 6240 | """Functions operating on arrays."""
# Any commits made to this module between 2015-05-01 and 2017-03-01
# by Gerrit Holl are developed for the EC project “Fidelity and
# Uncertainty in Climate Data Records from Earth Observations (FIDUCEO)”.
# Grant agreement: 638822
#
# All those contributions are dual-licensed under the MIT license for use
# in typhon, and the GNU General Public License version 3.
import numpy as np
import scipy.stats
def localmin(arr):
"""Find local minima for 1-D array
Given a 1-dimensional numpy.ndarray, return the locations of any local
minimum as a boolean array. The first and last item are always
considered False.
Arguments:
localmin (numpy.ndarray): 1-D ndarray for which to find local
minima. Should have a numeric dtype.
Returns:
numpy.ndarray with dtype `bool`. True for any element that is
strictly smaller than both neighbouring elements. First and last
element are always False.
"""
localmin = np.hstack(
(False, (arr[1:-1] < arr[0:-2]) & (arr[1:-1] < arr[2:]), False)
)
return localmin
def limit_ndarray(M, limits):
"""Select elements from structured ndarray based on value ranges
This function filters a structured ndarray based on ranges defined for
zero or more fields. For each field f with limits (lo, hi), it will
select only those elements where lo<=X[f]<hi.
>>> X = array([(2, 3), (4, 5), (8, 2), (5, 1)],
dtype=[("A", "i4"), ("B", "i4")])
>>> print(limit_ndarray(X, {"A": (2, 5)}))
[(2, 3) (4, 5)]
>>> X = array([([2, 3], 3), ([4, 6], 5), ([8, 3], 2), ([5, 3], 1)],
dtype=[("A", "i4", 2), ("B", "i4")])
>>> print(limit_ndarray(X, {"A": (2, 5, "all")}))
[([2, 3], 3)]
Arguments:
M (numpy.ndarray): 1-D structured ndarray
limits (dict): Dictionary with limits. Keys must correspond to
fields in M. If this is a scalar field
(`M.dtype[field].shape==()`), values are tuples (lo, hi).
If this is a multidimensional field, values are tuples (lo,
hi, mode), where mode must be either `all` or `any`.
Values in the range [lo, hi) are retained, applying all or any
when needed.
Returns:
ndarray subset of M. This is a view, not a copy.
"""
selection = np.ones(shape=M.shape, dtype="?")
for (field, val) in limits.items():
ndim = len(M.dtype[field].shape)
if ndim == 0:
(lo, hi) = val
selection = selection & (M[field] >= lo) & (M[field] < hi)
else:
(lo, hi, mode) = val
lelo = M[field] >= lo
sthi = M[field] < hi
while lelo.ndim > 1:
lelo = getattr(lelo, mode)(-1)
sthi = getattr(sthi, mode)(-1)
selection = selection & lelo & sthi
return M[selection]
def parity(v):
"""Vectorised parity-checking.
For any ndarray with an nd.integer dtype, return an equally shaped
array with the bit parity for each element.
Arguments:
v (numpy.ndarray): Array of integer dtype
Returns:
ndarray with uint8 dtype with the parity for each value in v
"""
v = v.copy() # don't ruin original
parity = np.zeros(dtype=">u1", shape=v.shape)
while v.any():
parity[v != 0] += 1
v &= v - 1
return parity
def mad_outliers(arr, cutoff=10, mad0="raise"):
"""Mask out mad outliers
Mask out any values that are more than N times the median absolute
devitation from the median.
Although I (Gerrit Holl) came up with this myself, it's also
documented at:
http://eurekastatistics.com/using-the-median-absolute-deviation-to-find-outliers/
except that I rolled by own approach for "what if mad==0".
Note: If all values except one are constant, it is not possible to
determine whether the remaining one is an outlier or “reasonably
close” to the rest, without additional hints. In this case, some
outliers may go unnoticed.
Arguments:
arr (numpy.ndarray): n-D array with numeric dtype
cutoff (int): Maximum tolerable normalised fractional distance
mad0 (str): What to do if mad=0. Can be 'raise', 'ignore', or
'perc'. In case of 'perc', will search for the lowest
percentile at which the percentile absolute deviation is
nonzero, increase the cutoff by the fractional approach toward
percentile 100, and use that percentile instead. So if the
first non-zero is at percentile 75%, it will use the
75th-percntile-absolute-deviation and increase the cutoff by
a factor (100 - 50)/(100 - 75).
Returns:
ndarray with bool dtype, True for outliers
"""
if arr.ptp() == 0:
return np.zeros(shape=arr.shape, dtype="?")
ad = abs(arr - np.ma.median(arr))
mad = np.ma.median(ad)
if mad == 0:
if mad0 == "raise":
raise ValueError("Cannot filter outliers, MAD=0")
elif mad0 == "perc":
# try other percentiles
perc = np.r_[np.arange(50, 99, 1), np.linspace(99, 100, 100)]
pad = scipy.stats.scoreatpercentile(ad, perc)
if (pad == 0).all(): # all constant…?
raise ValueError("These data are weird!")
p_i = pad.nonzero()[0][0]
cutoff *= (100 - 50) / (100 - perc[p_i])
return (ad / pad[p_i]) > cutoff
elif mad is np.ma.masked:
# all are masked already…
return np.ones(shape=ad.shape, dtype="?")
else:
return (ad / mad) > cutoff
def argclosest(array, value, retvalue=False):
"""Returns the index of the closest value in array.
Parameters:
array (ndarray): Input array.
value (float): Value to compare to.
retvalue (bool): If True, return the index and the closest value.
Returns:
int, float:
Index of closest value, Closest value (if ``retvalue`` is True)
"""
idx = np.abs(np.asarray(array) - value).argmin()
return (idx, array[idx]) if retvalue else idx
| mit | -2,880,749,940,338,907,600 | 30.77551 | 85 | 0.594733 | false |
domfarolino/material-message-board | app/google.py | 1 | 3680 | """
OAuthSignIn sub-class to support Google login
"""
from oauth import *
class GoogleSignIn(OAuthSignIn):
public_name = "Google"
provider_name = "google"
def __init__(self):
import ast
print "GoogleSignIn Object __init__()"
super(GoogleSignIn, self).__init__(self.provider_name)
print " * Creating googleinfo variable to store latest api information"
googleinfo = urllib2.urlopen('https://accounts.google.com/.well-known/openid-configuration')
print " * Creating json from googleinfo"
google_params = json.load(googleinfo)
self.service = OAuth2Service(name='google', client_id=self.consumer_id, client_secret=self.consumer_secret, authorize_url=google_params.get('authorization_endpoint'), base_url=google_params.get('userinfo_endpoint'), access_token_url=google_params.get('token_endpoint'))
self.oauth_session = None
print " * GoogleSignIn Object __init__() complete"
def authorize(self):
print " * Google Authorize"
print " * AUTHORIZE"
return redirect(self.service.get_authorize_url(scope='email', response_type='code', redirect_uri=self.get_callback_url(), access_type='offline'))
def callback(self):
print " * Google Callback"
print " * CALLBACK"
if 'code' not in request.args: return None
print " * Google creating self.oauth_session"
self.oauth_session = self.service.get_auth_session(data={'code': request.args['code'], 'grant_type': 'authorization_code', 'redirect_uri': self.get_callback_url() }, decoder=json.loads)
print vars(self.oauth_session)
session['access_token'] = self.oauth_session.access_token
return self.getMe()
@OAuthSignIn.auth_request
def getMe(self):
print " * Google getMe()"
me = self.oauth_session.get('').json()
social_id = me['sub']
name = me['name']
username = me['email'].split('@')[0]
email = me['email']
profile_picture = me['picture']
access_token = session['access_token']
access_token_info = urllib2.urlopen('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=' + access_token)
access_token_info = json.load(access_token_info)
access_token_exp = access_token_info['expires_in']+time.time()
access_token_secret = None # Google does not use access_token_secret
refresh_token = None
#print self.oauth_session.access_token_response._content.json()
try:
refresh_token = str(ast.literal_eval(self.oauth_session.access_token_response._content)['refresh_token'])
print " * This is user's first time giving access to this site"
except:
print " * User must've already granted access to this site"
# id, name, username, email, picture, access_token, access_token_exp, access_token,_secret refresh_token
return social_id, name, username, email, profile_picture, access_token, access_token_exp, access_token_secret, refresh_token
def refreshAccessToken(self, refresh_token):
data = {'client_id':self.consumer_id,
'client_secret': self.consumer_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
self.oauth_session = self.service.get_auth_session(data=data, decoder=ast.literal_eval)
session['access_token'] = self.oauth_session.access_token
def getAccessTokenAndExpire(self):
access_token_info = urllib2.urlopen('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=' + access_token)
access_token_info = json.load(access_token_info)
return access_token_info['access_token'], access_token_info['expires_in']+time.time()
@OAuthSignIn.auth_request
def getGeneralData(self):
return self.oauth_session.get('').json() | gpl-3.0 | 1,862,180,900,680,866,300 | 45.0125 | 273 | 0.692391 | false |
captify-nzavgorodnii/twitter_data | mongodb_handler.py | 1 | 5213 | import logging
from pymongo import MongoClient
class MongoDBHandler:
def __init__(self, config_name):
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='>>> %(asctime)s %(levelname)s %(message)s')
from configparser import ConfigParser
from urllib.parse import quote_plus
config = ConfigParser()
config.read(config_name)
mongodb_uri = "mongodb://%s:%s@%s:%s" % (quote_plus(config['mongodb']['username']),
quote_plus(config['mongodb']['password']),
config['mongodb']['host'],
config['mongodb']['port'])
# print(mongodb_uri)
# MongoDB connection
self.client = MongoClient(mongodb_uri)
self.db_name = config['mongodb']['db_name']
self.db = self.client[self.db_name]
from pymongo.errors import ConnectionFailure
try:
# The ismaster command is cheap and does not require auth.
self.client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
print(self.client.database_names())
def save_user_timeline(self, item):
db = self.db
print(item['user_id'])
type(item['user_id'])
# TODO: de-hardcode DB name
twt = db.tweets.find({'user_id': item['user_id']})
if twt.count() == 0:
# save user timeline
print("New account:", item['screen_name'], item['user_id'], item['n_tweets'], item['lang'])
db.tweets.insert_one(item)
else:
# update the existing account record
res = db.tweets.replace_one(
{'user_id': item['user_id']}, item
)
# result of the update
if res.matched_count == 0:
print("no match for user_id: ", item['user_id'])
elif res.modified_count == 0:
print("no modification for user_id: ", item['user_id'])
else:
print("replaced ", item['screen_name'], item['user_id'], item['n_tweets'], item['lang'])
def aggregate_tweets(self, timeline, lang=None):
"""
Get the user's timeline with the list of tweets in the following format and aggregate into one document.
{'lang': 'en',
'n_tweets': 100,
'parent_account': 'Honda',
'screen_name': 'Kevinloveslife',
'user_id': 701100381380546561,
'tweets': [{'country': None,
'country_code': None,
'created_at': 'Sun May 14 23:38:58 +0000 2017',
'favorite_count': 0,
'id': 863901480285241346,
'lang': 'en',
'retweet_count': 0,
'text': 'Last time flying @united. pilot Joe is a complete ass '
'hole. United flight 3556. Yells at us for using the '
'bathroom when we are delayed 1hr.'},
{'country': None,
'country_code': None,
'created_at': 'Fri May 12 00:16:08 +0000 2017',
'favorite_count': 1,
'id': 862823672054243328,
'lang': 'en',
'retweet_count': 0,
'text': "@DMC_Ryan I'm literally sobbing in the airport while "
'listening to podcast unlocked and looking at pictures of '
'Maggie. Dogs are the best.'}]
}
:param lang: str
:param timeline: dict
:return: dict('user_id': account_id, 'all_tweets': str(concatenated_tweets))
"""
if lang is None:
twt_doc = ' '.join([t['text'] for t in timeline['tweets']])
else:
twt_doc = ' '.join([t['text'] for t in timeline['tweets'] if t['lang'] == lang])
return {'user_id': timeline['user_id'], 'all_tweets': twt_doc}
def get_timelines_for_parent(self, parent_name):
"""
Get timelines for all friends (following) for this twitter account and return tweets aggregated for each user.
:param parent_name:
:return: [{'user_id': 110, 'all_tweets': 'Tweet 11. Tweet 12. Tweet 13'},
{'user_id': 220, 'all_tweets': 'Tweet 21. Tweet 22. Tweet 23'}]
"""
db = self.db
cursor = db.tweets.find({'parent_account': parent_name})
friends_tweets = []
for tl in range(cursor.count()):
friends_tweets.append(self.aggregate_tweets(cursor.next()))
return friends_tweets
def get_user_timeline(self, account_name):
"""
Get timeline for specified user.
:param account_name: str
:return: {'user_id': 110, 'all_tweets': 'Tweet 11. Tweet 12. Tweet 13'}
"""
db = self.db
cursor = db.tweets.find({'screen_name': account_name})
if cursor.count() > 0:
return cursor.next()
else:
logging.error("There are {} entries in DB for user {}".format(cursor.count(), account_name))
raise BaseException("Tweet for specified account not found")
| bsd-3-clause | -273,705,593,580,747,550 | 39.726563 | 118 | 0.530213 | false |
adykstra/mne-python | mne/viz/__init__.py | 1 | 1666 | """Visualization routines."""
from .topomap import (plot_evoked_topomap, plot_projs_topomap, plot_arrowmap,
plot_ica_components, plot_tfr_topomap, plot_topomap,
plot_epochs_psd_topomap, plot_layout)
from .topo import plot_topo_image_epochs, iter_topography
from .utils import (tight_layout, mne_analyze_colormap, compare_fiff,
ClickableImage, add_background_image, plot_sensors)
from ._3d import (plot_sparse_source_estimates, plot_source_estimates,
plot_vector_source_estimates, plot_evoked_field,
plot_dipole_locations, snapshot_brain_montage,
plot_head_positions, plot_alignment, plot_volume_source_estimates)
from .misc import (plot_cov, plot_csd, plot_bem, plot_events,
plot_source_spectrogram, _get_presser,
plot_dipole_amplitudes, plot_ideal_filter, plot_filter,
adjust_axes)
from .evoked import (plot_evoked, plot_evoked_image, plot_evoked_white,
plot_snr_estimate, plot_evoked_topo,
plot_evoked_joint, plot_compare_evokeds)
from .circle import plot_connectivity_circle, circular_layout
from .epochs import (plot_drop_log, plot_epochs, plot_epochs_psd,
plot_epochs_image)
from .raw import plot_raw, plot_raw_psd, plot_raw_psd_topo
from .ica import (plot_ica_scores, plot_ica_sources, plot_ica_overlay,
_plot_sources_raw, _plot_sources_epochs, plot_ica_properties)
from .montage import plot_montage
from .backends.renderer import (set_3d_backend, get_3d_backend, use_3d_backend)
from . import backends
| bsd-3-clause | -1,444,097,416,788,562,700 | 58.5 | 84 | 0.666267 | false |
iris-edu/ispaq | ispaq/orientationCheck_metrics.py | 1 | 17555 | """
ISPAQ Business Logic for Orientation Check Metrics.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pandas as pd
from .concierge import NoAvailableDataError
from . import utils
from . import irisseismic
from . import irismustangmetrics
from obspy import UTCDateTime
from rpy2.robjects import pandas2ri
import rpy2.robjects as ro
from rpy2.robjects import numpy2ri
def orientationCheck_metrics(concierge):
"""
Generate *orientationCheck* metrics.
:type concierge: :class:`~ispaq.concierge.Concierge`
:param concierge: Data access expiditer.
:rtype: pandas dataframe
:return: Dataframe of simple metrics.
.. rubric:: Example
TODO: doctest examples
"""
# Get the logger from the concierge
logger = concierge.logger
# Default parameters from IRISMustangUtils::generateMetrics_orientationCheck
channelFilter = "[BCHLM][HX]."
logger.debug("channelFilter %s" % channelFilter)
minmag = 7.0
maxdepth = 100
eventMinradius = 0
eventMaxradius = 180
windowSecsBefore = 20
windowSecsAfter = 600
taper = 0.05
filterArgs = [2,0.02,0.04]
degreeIncrement = 1
# Sanity check for metadata
if concierge.station_url is None:
logger.warning('No station metadata found for orientationCheck metrics')
return None
# Get the seismic events in this time period
events = concierge.get_event(minmag=minmag)
# Sanity check
if events is None or events.shape[0] == 0:
logger.info('No events found for orientationCheck metrics.')
return None
# Container for all of the metrics dataframes generated
dataframes = []
#############################################################
## Loop through each event.
#############################################################
logger.info('Calculating orientationCheck metrics for %d events' % events.shape[0])
for (index, event) in events.iterrows():
logger.info('Magnitude %3.1f event: %s %sT%s:%s:%sZ' % (event.magnitude, event.eventLocationName, event.time.date, str(event.time.hour).zfill(2), str(event.time.minute).zfill(2), str(event.time.second).zfill(2)))
# Sanity check
if pd.isnull(event.latitude) or pd.isnull(event.longitude):
logger.info('Skipping event because of missing longitude or latitude')
continue
# Sanity check
if pd.isnull(event.depth):
logger.info('Skipping event because of missing depth')
continue
# Get the data availability around this event
# NOTE: Get availability from 2 minutes before event until 28 minutes after
# Get the data availability using spatial search parameters
halfHourStart = event.time - 60 * 2
halfHourEnd = event.time + 60 * 28
logger.debug("Looking for metadata from %s to %s" % (halfHourStart.strftime("%Y-%m-%dT%H:%M:%S"),halfHourEnd.strftime("%Y-%m-%dT%H:%M:%S")))
# crossCorrelation requires 3 channels, look for all 3 even if input SNCL pattern is for one (i.e., TA.109..BHZ will look for TA.109C..BH?)
original_sncl_patterns = concierge.sncl_patterns
new_sncl_patterns = []
UR=["temp0","temp1","temp2","temp3"]
for sncl_pattern in concierge.sncl_patterns:
UR[concierge.netOrder] = sncl_pattern.split('.')[concierge.netOrder]
UR[concierge.staOrder] = sncl_pattern.split('.')[concierge.staOrder]
UR[concierge.locOrder] = sncl_pattern.split('.')[concierge.locOrder]
UR[concierge.chanOrder] = sncl_pattern.split('.')[concierge.chanOrder]
if len(UR[concierge.chanOrder]) == 3:
UR[concierge.chanOrder]=UR[concierge.chanOrder][:-1] + '?'
new_sncl_pattern = ".".join(UR)
new_sncl_patterns.append(new_sncl_pattern)
concierge.sncl_patterns = new_sncl_patterns
try:
availability = concierge.get_availability(starttime=halfHourStart, endtime=halfHourEnd,
longitude=event.longitude, latitude=event.latitude,
minradius=eventMinradius, maxradius=eventMaxradius)
except NoAvailableDataError as e:
logger.info('Skipping event with no available data')
concierge.sncl_patterns = original_sncl_patterns
continue
except Exception as e:
logger.warning('Skipping event because concierge.get_availability failed: %s' % (e))
concierge.sncl_patterns = original_sncl_patterns
continue
if availability is None:
logger.info("Skipping event with no available data")
concierge.sncl_patterns = original_sncl_patterns
continue
concierge.sncl_patterns = original_sncl_patterns
# Apply the channelFilter
availability = availability[availability.channel.str.contains(channelFilter)]
#logger.debug(availability)
# Sanity check that some SNCLs exist
if availability.shape[0] == 0:
logger.info('Skipping event because no stations are available')
continue
############################################################
# Loop through the SN.L combinations
############################################################
# Channel types (as opposed to orientation) will contain only the first two characters
channelType = availability.channel.apply(lambda x: x[0:2])
# Find unique network-station-location combinations
sn_lIds = availability.network + '.' + availability.station + '.' + availability.location + '.' + channelType
# Add sn_lId to the availability dataframe for easy detection
availability.insert(availability.shape[1],'sn_lId',sn_lIds)
# ----- All available SNCLs -------------------------------------------------
for idx, sn_lId in enumerate(sorted(list(set(sn_lIds)))):
logger.info('%03d Calculating orientationCheck metrics for %s' % (idx, sn_lId))
sn_lAvailability = availability[availability.sn_lId == sn_lId]
if sn_lAvailability.shape[0] != 3:
logger.info('Skipping %s because only %d channels were found at this SN.L (3 required)' % (sn_lId, sn_lAvailability.shape[0]))
continue
# If any of the channels don't have metadata, skip this sn.l
if any(sn_lAvailability.samplerate.isnull().values):
logger.info('Skipping %s because at least one channel is missing metadata' % sn_lId)
continue
# Determine N, E and Z channels
N_or_1_mask = sn_lAvailability.channel.str.contains('..N') | sn_lAvailability.channel.str.contains('..1')
E_or_2_mask = sn_lAvailability.channel.str.contains('..E') | sn_lAvailability.channel.str.contains('..2')
Z_mask = sn_lAvailability.channel.str.contains('..Z')
Channel_1 = sn_lAvailability[N_or_1_mask].iloc[0]
Channel_2 = sn_lAvailability[E_or_2_mask].iloc[0]
ZChannel = sn_lAvailability[Z_mask].iloc[0]
# Calculate various distances and surface travel time
numpy2ri.activate()
# pandas2ri.activate()
distaz = irisseismic.getDistaz(event.latitude,event.longitude,ZChannel.latitude,ZChannel.longitude)
surfaceDistance = irisseismic.surfaceDistance(event.latitude,event.longitude,ZChannel.latitude,ZChannel.longitude)[0]
surfaceTravelTime = surfaceDistance / 4.0 # km / (km/sec)
# pandas2ri.deactivate()
numpy2ri.deactivate()
# Get the data -----------------------------------------
# request 3-channel instrument-corrected (or scaled) data for one station/loc/sample_rate
# starting 20s before the predicted Rayleigh wave and ending 600s after.
# Apply a 10% cosine taper to each channel
# Bandpass filter from 0.02 to 0.04 Hz (50-25s)
# Take the Hilbert transform of the Z channel
windowStart = event.time + surfaceTravelTime - windowSecsBefore
windowEnd = event.time + surfaceTravelTime + windowSecsAfter
logger.debug("Looking for data for %s, %s, %s from %s to %s" % (Channel_1.snclId, Channel_2.snclId, ZChannel.snclId, windowStart.strftime("%Y-%m-%dT%H:%M:%S"), windowEnd.strftime("%Y-%m-%dT%H:%M:%S")))
try:
stN = concierge.get_dataselect(Channel_1.network, Channel_1.station, Channel_1.location, Channel_1.channel,
windowStart, windowEnd, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.info('No data available for %s' % (Channel_1.snclId[:-1]))
elif str(e).lower().find('multiple epochs'):
logger.info('Skipping %s because multiple metadata epochs found' % (av.snclId))
else:
logger.warning('No data available for %s from %s: %s' % (Channel_1.snclId, concierge.dataselect_url, e))
continue
try:
stE = concierge.get_dataselect(Channel_2.network, Channel_2.station, Channel_2.location, Channel_2.channel,
windowStart, windowEnd, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.info('No data available for %s' % (Channel_2.snclId[:-1]))
elif str(e).lower().find('multiple epochs'):
logger.info('Skipping %s because multiple metadata epochs found' % (av.snclId))
else:
logger.info('No data available for %s from %s: %s' % (Channel_2.snclId, concierge.dataselect_url, e))
continue
try:
stZ = concierge.get_dataselect(ZChannel.network, ZChannel.station, ZChannel.location, ZChannel.channel,
windowStart, windowEnd, inclusiveEnd=False)
except Exception as e:
if str(e).lower().find('no data') > -1:
logger.info('No data available for %s' % (ZChannel.snclId[:-1]))
elif str(e).lower().find('multiple epochs'):
logger.info('Skipping %s because multiple metadata epochs found' % (av.snclId))
else:
logger.info('No data available for %s from %s: %s' % (ZChannel.snclId, concierge.dataselect_url, e))
continue
# If metadata indicates reversed polarity (dip>0), invert the amplitudes
if (ZChannel.dip > 0):
stZ = irisseismic.multiplyBy(stZ,-1)
if len(utils.get_slot(stN,'traces')) > 1 or len(utils.get_slot(stE,'traces')) > 1 or len(utils.get_slot(stZ,'traces')) > 1:
logger.info('Skipping %s because it has gaps' % (sn_lId))
continue
# complain if sample lengths differ by more than 1 sample
l1 = utils.get_slot(stN,'npts')
l2 = utils.get_slot(stE,'npts')
l3 = utils.get_slot(stZ,'npts')
if( abs(l1 - l2) > 1 or abs(l1 - l3) > 1 ):
logger.info('Skipping %s because the number of data samples differs between channels. Incompatible lengths stN=%d, stE=%d, stZ=%d' % (sn_lId,l1,l2,l3))
continue
else:
max_length = min(l1, l2, l3)
# NOTE: This next function captures a chunk of functionality that involves direct
# NOTE: manipulation of individual slots in Stream objects.
# NOTE: This requires some R knowledge and rpy2 trickery that doesn't belong in
# NOTE: the business logic python code.
(stN, stE, stZ, HZ) = irisseismic.trim_taper_filter(stN, stE, stZ, max_length, taper, filterArgs)
# For trial empirical BHN/BH1 channel azimuths X = 0 to 360 in degrees (X is bearing from N):
# Rotate the two horizontal channels to find the radial component R
# (R is the vector sum of the 2 horizontal channels in the back azimuth direction)
# Assume BHE/BH2 is 90 degrees clockwise from BHN/BH1.
# Calculate the cross-correlation of R and H{Z} at zero lag:
# Szr = sum(i): [R[t(i)] * H{Z[t(i)]}] where i = 1,...,N samples
# Calculate the auto-correlations of R and H{Z} at zero lag:
# Szz = sum(i): [H{Z[t(i)]}^2] where i = 1,...,N samples
# Srr = sum(i): [R[t(i)]^2] where i = 1,...,N samples
# Calculate and save 2 separate normalized correlation coefficients:
# a) Czr = Szr / sqrt(Szz*Srr)
# b) C*zr = Szr / Srr
# Prefill Szz as it doesn't depend on the angle
HZ_data = pd.Series(utils.get_slot(HZ,'data'))
SzzValue = sum(HZ_data * HZ_data)
Szz = pd.Series([SzzValue] * 360)
Szr = pd.Series([np.NaN] * 360)
Srr = pd.Series([np.NaN] * 360)
# Calculate correlations as a function of angle
rotateOK = True
for angle in range(1,360,degreeIncrement):
#if angle % 10 == 0:
# logger.debug('rotate2D angle = %d' % angle)
try:
stR = irisseismic.rotate2D(stN, stE, angle)[0]
except Exception as e:
logger.warning('skipping %s: irisseismic.rotate2D failed: %s' % (sn_lId, e.message))
rotateOK = False
break
R_data = pd.Series(utils.get_slot(stR,'data'))
Srr[angle] = sum(R_data * R_data)
Szr[angle] = sum(HZ_data * R_data)
# an error in the loop means we skip this SNL, go to next in loop
if not rotateOK:
continue
# Normalized correlation coefficients
Czr = Szr / (Szz*Srr).pow(.5)
C_zr = Szr / Szz
maxCzr = Czr.max(skipna=True)
maxC_zr = C_zr.max(skipna=True)
angleAtMaxCzr = int( list(Czr[Czr == maxCzr].index)[0] )
angleAtMaxC_zr = int( list(C_zr[C_zr == maxC_zr].index)[0] )
azimuth_R = angleAtMaxC_zr % 360
azimuth_T = (azimuth_R + 90) % 360
# Find the orientation X with the maximum C*zr and:
# report empirical X, X+90,
# report metadata azimuths for horizontal channels
# report Czr & C*zr
# report start and end of data window
#
#
# REC Feb 2014 -- change the attribute names based on Mary Templeton's recommendations
# -- also add an event magnitude attribute
# azimuth_R
# backAzimuth
# azimuth_Y_obs (= backAzimuth - azimuth_R)
# azimuth_X_obs (= azimuth_Y_obs + 90)
# azimuth_Y_meta (azimuth_N renamed)
# azimuth_X_meta (azimuth_E renamed)
# max_Czr
# max_C_zr
# magnitude
azimuth_Y_obs = (float(distaz.backAzimuth) - azimuth_R) % 360
azimuth_X_obs = (azimuth_Y_obs + 90.0) % 360
elementNames = ["azimuth_R","backAzimuth","azimuth_Y_obs","azimuth_X_obs","azimuth_Y_meta","azimuth_X_meta","max_Czr","max_C_zr","magnitude"]
elementValues = [azimuth_R, float(distaz.backAzimuth), azimuth_Y_obs, azimuth_X_obs,
float(Channel_1.azimuth), float(Channel_2.azimuth), maxCzr, maxC_zr, float(event.magnitude)]
# Create metric
df = irisseismic.generalValueMetric(utils.get_slot(stZ, 'id'), windowStart, windowEnd,
'orientation_check', elementNames, elementValues)
dataframes.append(df)
# END of sn_lId loop
# END of event loop
# Concatenate dataframes before returning ----------------------------------
if len(dataframes) == 0:
logger.warning('"orientation_check" metric calculation generated zero metrics')
return None
else:
result = pd.concat(dataframes, ignore_index=True)
result.reset_index(drop=True, inplace=True)
return(result)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| lgpl-3.0 | -514,858,001,789,757,000 | 45.197368 | 220 | 0.554543 | false |
jirikuncar/invenio-oaiserver | tests/test_verbs.py | 1 | 18989 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test OAI verbs."""
from __future__ import absolute_import
import uuid
from copy import deepcopy
from time import sleep
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.minters import recid_minter
from invenio_records.api import Record
from lxml import etree
from invenio_oaiserver.minters import oaiid_minter
from invenio_oaiserver.models import OAISet
from invenio_oaiserver.response import NS_DC, NS_OAIDC, NS_OAIPMH, \
datetime_to_datestamp
def _xpath_errors(body):
"""Find errors in body."""
return list(body.iter('{*}error'))
def test_no_verb(app):
"""Test response when no verb is specified."""
with app.test_client() as c:
result = c.get('/oai2d')
tree = etree.fromstring(result.data)
assert 'Missing data for required field.' in _xpath_errors(
tree)[0].text
def test_wrong_verb(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Aaa')
tree = etree.fromstring(result.data)
assert 'This is not a valid OAI-PMH verb:Aaa' in _xpath_errors(
tree)[0].text
def test_identify(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:Identify',
namespaces=namespaces)) == 1
repository_name = tree.xpath('/x:OAI-PMH/x:Identify/x:repositoryName',
namespaces=namespaces)
assert len(repository_name) == 1
assert repository_name[0].text == 'Invenio-OAIServer'
base_url = tree.xpath('/x:OAI-PMH/x:Identify/x:baseURL',
namespaces=namespaces)
assert len(base_url) == 1
assert base_url[0].text == 'http://app/oai2d'
protocolVersion = tree.xpath('/x:OAI-PMH/x:Identify/x:protocolVersion',
namespaces=namespaces)
assert len(protocolVersion) == 1
assert protocolVersion[0].text == '2.0'
adminEmail = tree.xpath('/x:OAI-PMH/x:Identify/x:adminEmail',
namespaces=namespaces)
assert len(adminEmail) == 1
assert adminEmail[0].text == '[email protected]'
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=namespaces)
assert len(earliestDatestamp) == 1
deletedRecord = tree.xpath('/x:OAI-PMH/x:Identify/x:deletedRecord',
namespaces=namespaces)
assert len(deletedRecord) == 1
assert deletedRecord[0].text == 'no'
granularity = tree.xpath('/x:OAI-PMH/x:Identify/x:granularity',
namespaces=namespaces)
assert len(granularity) == 1
def test_getrecord(app):
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
with db.session.begin_nested():
record = Record.create({'title': 'Test0', '$schema': schema}).model
recid_minter(record.id, record.json)
pid = oaiid_minter(record.id, record.json)
db.session.commit()
pid_value = pid.pid_value
pid_updated = pid.updated
with app.test_client() as c:
result = c.get(
"/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc"
.format(pid_value))
assert 200 == result.status_code
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:header',
namespaces=namespaces)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:identifier',
namespaces=namespaces)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:identifier/text()',
namespaces=namespaces)
assert identifier == [str(record.id)]
datestamp = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:header/x:datestamp/text()',
namespaces=namespaces)
assert datestamp == [datetime_to_datestamp(pid_updated)]
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:metadata',
namespaces=namespaces)) == 1
def test_getrecord_fail(app):
"""Test GetRecord if record doesn't exist."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(
"/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc"
.format('not-exist-pid'))
assert 422 == result.status_code
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _check_xml_error(tree, code):
"""Text xml for a error idDoesNotExist."""
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
error = tree.xpath('/x:OAI-PMH/x:error', namespaces=namespaces)
assert len(error) == 1
assert error[0].attrib['code'] == code
def test_identify_with_additional_args(app):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify¬AValidArg=True')
tree = etree.fromstring(result.data)
assert 'You have passed too many arguments.' == _xpath_errors(
tree)[0].text
def test_listmetadataformats(app):
"""Test ListMetadataFormats."""
_listmetadataformats(app=app, query='/oai2d?verb=ListMetadataFormats')
def test_listmetadataformats_record(app):
"""Test ListMetadataFormats for a record."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
Record.create(data, id_=record_id)
pid_value = pid.pid_value
db.session.commit()
_listmetadataformats(
app=app,
query='/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
pid_value))
def test_listmetadataformats_record_fail(app):
"""Test ListMetadataFormats for a record that doesn't exist."""
query = '/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
"pid-not-exixts")
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _listmetadataformats(app, query):
"""Try ListMetadataFormats."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListMetadataFormats',
namespaces=namespaces)) == 1
metadataFormats = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat',
namespaces=namespaces)
cfg_metadataFormats = deepcopy(
app.config.get('OAISERVER_METADATA_FORMATS', {}))
assert len(metadataFormats) == len(cfg_metadataFormats)
for metadataFormat in metadataFormats:
# prefix
prefix = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataPrefix', namespaces=namespaces)
assert len(prefix) == 1
assert prefix[0].text in cfg_metadataFormats
# schema
schema = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:schema', namespaces=namespaces)
assert len(schema) == 1
assert schema[0].text in cfg_metadataFormats[
prefix[0].text]['schema']
# metadataNamespace
metadataNamespace = metadataFormat.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataNamespace', namespaces=namespaces)
assert len(metadataNamespace) == 1
assert metadataNamespace[0].text in cfg_metadataFormats[
prefix[0].text]['namespace']
# remove tested format
del cfg_metadataFormats[prefix[0].text]
def test_listsets(app):
"""Test ListSets."""
with app.test_request_context():
with db.session.begin_nested():
a = OAISet(spec='test', name='Test', description="test desc")
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setSpec',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=namespaces)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription',
namespaces=namespaces
)) == 1
namespaces['y'] = NS_OAIDC
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc',
namespaces=namespaces)
) == 1
namespaces['z'] = NS_DC
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description', namespaces=namespaces)
) == 1
text = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description/text()', namespaces=namespaces)
assert len(text) == 1
assert text[0] == 'test desc'
def test_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords',
'/oai2d?verb=GetRecord&identifier=123',
'/oai2d?verb=ListIdentifiers'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_fail_not_exist_metadataPrefix(app):
"""Test ListRecords fail not exist metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords&metadataPrefix=not-exist',
'/oai2d?verb=GetRecord&identifier=123&metadataPrefix=not-exist',
'/oai2d?verb=ListIdentifiers&metadataPrefix=not-exist'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
query = '/oai2d?verb=ListRecords&'
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords(app):
"""Test ListRecords."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
sleep(1)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListRecords&metadataPrefix=oai_dc')
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=namespaces)) == 1
def test_listidentifiers(app):
"""Test verb ListIdentifiers."""
schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'field': {'type': 'boolean'},
},
'required': ['title'],
}
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title': 'Test0', '$schema': schema}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
sleep(1)
pid_value = pid.pid_value
with app.test_client() as c:
result = c.get(
"/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc"
)
tree = etree.fromstring(result.data)
namespaces = {'x': NS_OAIPMH}
assert len(tree.xpath('/x:OAI-PMH', namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers',
namespaces=namespaces)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers/x:header',
namespaces=namespaces)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=namespaces
)
assert len(identifier) == 1
assert identifier[0].text == str(pid_value)
datestamp = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:datestamp',
namespaces=namespaces
)
assert len(datestamp) == 1
assert datestamp[0].text == datetime_to_datestamp(record.updated)
def test_list_sets_long(app):
"""Test listing of sets."""
from invenio_db import db
from invenio_oaiserver.models import OAISet
with app.app_context():
with db.session.begin_nested():
for i in range(27):
db.session.add(OAISet(
spec='test{0}'.format(i),
name='Test{0}'.format(i),
description='test desc {0}'.format(i),
search_pattern='title:Test{0}'.format(i),
))
db.session.commit()
namespaces = {'x': NS_OAIPMH}
with app.test_client() as c:
# First page:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert resumption_token.text
# Second page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert resumption_token.text
# Third page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=namespaces)) == 7
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=namespaces
)[0]
assert not resumption_token.text
def test_list_sets_with_resumption_token_and_other_args(app):
pass
| gpl-2.0 | 6,154,629,541,478,651,000 | 34.964015 | 79 | 0.577229 | false |
tiborsimko/analysis-preservation.cern.ch | cap/modules/schemas/models.py | 1 | 7772 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Models for JSONschemas."""
import re
from flask import current_app
from invenio_access.models import ActionSystemRoles
from invenio_access.permissions import authenticated_user
from invenio_jsonschemas.errors import JSONSchemaNotFound
from invenio_db import db
from invenio_search import current_search
from invenio_search import current_search_client as es
from sqlalchemy import UniqueConstraint, event
from sqlalchemy.dialects import postgresql
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy_utils.types import JSONType
from .permissions import ReadSchemaPermission, SchemaReadAction
class Schema(db.Model):
"""Model defining analysis JSON schemas."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=False, nullable=False)
fullname = db.Column(db.String(128), unique=False, nullable=True)
# version
major = db.Column(db.Integer, unique=False, nullable=False, default=0)
minor = db.Column(db.Integer, unique=False, nullable=False, default=0)
patch = db.Column(db.Integer, unique=False, nullable=False, default=0)
experiment = db.Column(db.String(128), unique=False, nullable=True)
is_deposit = db.Column(db.Boolean(create_constraint=False),
unique=False,
default=False)
json = db.Column(
JSONType().with_variant(
postgresql.JSONB(none_as_null=True),
'postgresql',
).with_variant(
JSONType(),
'sqlite',
),
default=lambda: dict(),
nullable=True
)
__tablename__ = 'schema'
__table_args__ = (UniqueConstraint('name', 'major', 'minor', 'patch',
name='unique_schema_version'),)
def __init__(self, fullpath=None, **kwargs):
"""."""
if fullpath:
self.name, self.major, self.minor, self.patch = \
self._parse_fullpath(fullpath)
super(Schema, self).__init__(**kwargs)
@property
def version(self):
"""Return stringified version."""
return "{}.{}.{}".format(self.major, self.minor, self.patch)
@property
def is_record(self):
"""Return stringified version."""
return self.name.startswith('records')
@property
def fullpath(self):
"""Return full path eg. https://host.com/schemas/schema-v0.0.1.json."""
host = current_app.config['JSONSCHEMAS_HOST']
return "https://{}/schemas/{}-v{}.json".format(
host, self.name, self.version)
@property
def index_name(self):
"""Get index name."""
return "{}-v{}".format(self.name.replace('/', '-'), self.version)
@property
def aliases(self):
"""Get ES aliases names."""
aliases = []
if self.is_deposit:
aliases = ['deposits', 'deposits-records']
elif self.is_record:
aliases = ['records']
return aliases
def get_matching_record_schema(self):
"""For given deposit schema, get record one."""
name = self.name.replace('deposits/', '')
try:
return Schema.query \
.filter_by(name=name,
major=self.major,
minor=self.minor,
patch=self.patch)\
.one()
except NoResultFound:
raise JSONSchemaNotFound(schema=name)
def add_read_access_to_all(self):
"""Give read access to all authenticated users."""
try:
db.session.add(
ActionSystemRoles.allow(
SchemaReadAction(self.id),
role=authenticated_user
)
)
db.session.flush()
except IntegrityError:
db.session.rollback()
@classmethod
def get_latest(cls, name):
"""Get the latest version of schema with given name."""
latest = cls.query \
.filter_by(name=name) \
.order_by(cls.major.desc(),
cls.minor.desc(),
cls.patch.desc())\
.first()
if latest:
return latest
else:
raise JSONSchemaNotFound(schema=name)
@classmethod
def get_by_fullpath(cls, string):
"""Get schema by full path, e.g. record/schema-v0.0.1.json."""
name, major, minor, patch = cls._parse_fullpath(string)
try:
return cls.query \
.filter_by(name=name,
major=major,
minor=minor,
patch=patch)\
.one()
except NoResultFound:
raise JSONSchemaNotFound(schema=name)
@classmethod
def get_user_deposit_schemas(cls):
"""Return all deposit schemas user has read access to."""
schemas = cls.query.filter_by(is_deposit=True).all()
return [x for x in schemas if ReadSchemaPermission(x).can()]
@staticmethod
def _parse_fullpath(string):
try:
regex = re.compile('(?:.*/schemas/)?'
'/?(?P<name>\S+)'
'-v(?P<major>\d+).'
'(?P<minor>\d+).'
'(?P<patch>\d+)'
'(?:.json)?')
return re.search(regex, string).groups()
except AttributeError:
raise JSONSchemaNotFound(schema=string)
@event.listens_for(Schema, 'after_insert')
def after_insert_schema(target, value, schema):
"""On schema insert, create corresponding indexes and aliases in ES."""
if (schema.is_deposit or schema.is_record) and \
not es.indices.exists(schema.index_name):
# invenio search needs it
current_search.mappings[schema.index_name] = {}
es.indices.create(
index=schema.index_name,
body={},
ignore=False
)
for alias in schema.aliases:
es.indices.update_aliases({
"actions": [
{"add": {
"index": schema.index_name,
"alias": alias
}}
]
})
@event.listens_for(Schema, 'after_delete')
def before_delete_schema(mapper, connect, target):
"""On schema delete, delete corresponding indexes and aliases in ES."""
if es.indices.exists(target.index_name):
es.indices.delete(target.index_name)
# invenio search needs it
current_search.mappings.pop(target.index_name)
| gpl-2.0 | 4,545,314,367,023,224,000 | 32.938865 | 79 | 0.585821 | false |
asceth/devsyn | devsyn/procedural/trees/simple.py | 1 | 6441 | import math
import __builtin__
from pandac.PandaModules import Vec3, GeomVertexData, GeomVertexFormat
from pandac.PandaModules import Geom, GeomVertexWriter, GeomVertexRewriter
from pandac.PandaModules import GeomVertexReader, GeomTristrips, CullFaceAttrib
from pandac.PandaModules import GeomNode, NodePath, Mat4, Vec4, TransformState
from pandac.PandaModules import GeomVertexArrayFormat, InternalName
from devsyn.entities import Entity
from devsyn.procedural.utility import ProceduralUtility
APP_PATH = __builtin__.APP_PATH
base = __builtin__.base
class SimpleTree(Entity):
geom_vertex_format = None
def get_format():
if SimpleTree.geom_vertex_format is None:
format_array = GeomVertexArrayFormat()
format_array.addColumn(InternalName.make("drawFlag"), 1, Geom.NTUint8,
Geom.COther)
format = GeomVertexFormat(GeomVertexFormat.getV3n3cpt2())
format.addArray(format_array)
SimpleTree.geom_vertex_format = GeomVertexFormat.registerFormat(format)
return SimpleTree.geom_vertex_format
get_format = staticmethod(get_format)
def __init__(self, scale = 0.125, length = Vec3(4, 4, 7), position = Vec3(0, 0, 0),
iterations = 10, num_branches = 4, vector_list = [Vec3(0, 0, 1),
Vec3(1, 0, 0),
Vec3(0, -1, 0)]):
self.set_model(NodePath("SimpleTree"))
self.format = SimpleTree.get_format()
self.vdata = GeomVertexData("body vertices", self.format, Geom.UHStatic)
self.position = position
self.length = length
self.vector_list = vector_list
self.iterations = iterations
self.num_branches = num_branches
self.scale = scale
self.bark_texture = base.loader.loadTexture(APP_PATH +
'media/textures/trees/bark.jpg')
def generate(self):
self.recurse(self.length, self.position, self.iterations, self.vector_list)
self.get_model().setTexture(self.bark_texture, 1)
self.reparentTo(base.render)
def recurse(self, length, position, iterations, vector_list):
if iterations > 0:
self.draw_body(position, vector_list, length.getX())
# move forward along the right axis
new_position = position + vector_list[0] * length.length()
# Only branch every third level
if iterations % 3 == 0:
# decrease dimensions when we branch
length = Vec3(length.getX() / 2, length.getY() / 2,
length.getZ() / 1.1)
for i in range(self.num_branches):
self.recurse(length, new_position, iterations - 1,
ProceduralUtility.random_axis(vector_list))
else:
# just make another branch connected to this one with a small
# variation in direction
self.recurse(length, new_position, iterations - 1,
ProceduralUtility.small_random_axis(vector_list))
else:
self.draw_body(position, vector_list, length.getX(), False)
self.draw_leaf(position, vector_list, self.scale)
def draw_body(self, position, vector_list, radius = 1, keep_drawing = True, num_vertices = 8):
circle_geom = Geom(self.vdata)
vertex_writer = GeomVertexWriter(self.vdata, "vertex")
color_writer = GeomVertexWriter(self.vdata, "color")
normal_writer = GeomVertexWriter(self.vdata, "normal")
draw_rewriter = GeomVertexRewriter(self.vdata, "drawFlag")
tex_rewriter = GeomVertexRewriter(self.vdata, "texcoord")
start_row = self.vdata.getNumRows()
vertex_writer.setRow(start_row)
color_writer.setRow(start_row)
normal_writer.setRow(start_row)
sCoord = 0
if start_row != 0:
tex_rewriter.setRow(start_row - num_vertices)
sCoord = tex_rewriter.getData2f().getX() + 1
draw_rewriter.setRow(start_row - num_vertices)
if draw_rewriter.getData1f() == False:
sCoord -= 1
draw_rewriter.setRow(start_row)
tex_rewriter.setRow(start_row)
angle_slice = 2 * math.pi / num_vertices
current_angle = 0
perp1 = vector_list[1]
perp2 = vector_list[2]
# write vertex information
for i in range(num_vertices):
adjacent_circle = position + (perp1 * math.cos(current_angle) + perp2 * math.sin(current_angle)) * radius
normal = perp1 * math.cos(current_angle) + perp2 * math.sin(current_angle)
normal_writer.addData3f(normal)
vertex_writer.addData3f(adjacent_circle)
tex_rewriter.addData2f(sCoord, (i + 0.001) / (num_vertices - 1))
color_writer.addData4f(0.5, 0.5, 0.5, 1.0)
draw_rewriter.addData1f(keep_drawing)
current_angle += angle_slice
draw_reader = GeomVertexReader(self.vdata, "drawFlag")
draw_reader.setRow(start_row - num_vertices)
# we can't draw quads directly so use Tristrips
if start_row != 0 and draw_reader.getData1f() != False:
lines = GeomTristrips(Geom.UHStatic)
half = int(num_vertices * 0.5)
for i in range(num_vertices):
lines.addVertex(i + start_row)
if i < half:
lines.addVertex(i + start_row - half)
else:
lines.addVertex(i + start_row - half - num_vertices)
lines.addVertex(start_row)
lines.addVertex(start_row - half)
lines.closePrimitive()
lines.decompose()
circle_geom.addPrimitive(lines)
circle_geom_node = GeomNode("Debug")
circle_geom_node.addGeom(circle_geom)
circle_geom_node.setAttrib(CullFaceAttrib.makeReverse(), 1)
self.get_model().attachNewNode(circle_geom_node)
def draw_leaf(self, position, vector_list, scale = 0.125):
# use the vectors that describe the direction the branch grows
# to make the right rotation matrix
new_cs = Mat4(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
new_cs.setRow(0, vector_list[2]) # right
new_cs.setRow(1, vector_list[1]) # up
new_cs.setRow(2, vector_list[0]) # forward
new_cs.setRow(3, Vec3(0, 0, 0))
new_cs.setCol(3, Vec4(0, 0, 0, 1))
axis_adjustment = Mat4.scaleMat(scale) * new_cs * Mat4.translateMat(position)
leaf_model = base.loader.loadModelCopy(APP_PATH + 'media/models/shrubbery')
leaf_texture = base.loader.loadTexture(APP_PATH + 'media/models/material-10-cl.png')
leaf_model.reparentTo(self.get_model())
leaf_model.setTexture(leaf_texture, 1)
leaf_model.setTransform(TransformState.makeMat(axis_adjustment))
| mit | 3,595,269,286,669,190,000 | 37.112426 | 111 | 0.656575 | false |
PatrickKennedy/pygab | common/argparse.py | 1 | 75204 | # -*- coding: utf-8 -*-
# Copyright © 2006 Steven J. Bethard <[email protected]>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the 3-clause BSD
# license. No warranty expressed or implied.
# For details, see the accompanying file LICENSE.txt.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
* handles both optional and positional arguments
* produces highly informative usage messages
* supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file:
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
HelpFormatter, RawDescriptionHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default, while
RawDescriptionHelpFormatter tells the parser not to perform any
line-wrapping on description text.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '0.9.0'
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = '==PARSER=='
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format:
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join(func(*args) for func, args in self.items)
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max(len(s) for s in invocations)
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help() % dict(prog=self._prog)
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join(part
for part in part_strings
if part and part is not SUPPRESS)
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if no optionals or positionals are available, usage is just prog
if usage is None and not actions:
usage = '%(prog)s'
# if optionals and positionals are available, calculate usage
elif usage is None:
usage = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# determine width of "usage: PROG" and width of text
prefix_width = len(prefix) + len(usage) + 1
prefix_indent = self._current_indent + prefix_width
text_width = self._width - self._current_indent
# put them on one line if they're short enough
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
if prefix_width + len(action_usage) + 1 < text_width:
usage = '%s %s' % (usage, action_usage)
# if they're long, wrap optionals and positionals individually
else:
optional_usage = format(optionals, groups)
positional_usage = format(positionals, groups)
indent = ' ' * prefix_indent
# usage is made of PROG, optionals and positionals
parts = [usage, ' ']
# options always get added right after PROG
if optional_usage:
parts.append(_textwrap.fill(
optional_usage, text_width,
initial_indent=indent,
subsequent_indent=indent).lstrip())
# if there were options, put arguments on the next line
# otherwise, start them right after PROG
if positional_usage:
part = _textwrap.fill(
positional_usage, text_width,
initial_indent=indent,
subsequent_indent=indent).lstrip()
if optional_usage:
part = '\n' + indent + part
parts.append(part)
usage = ''.join(parts)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
start = actions.index(group._group_actions[0])
if start != -1:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
inserts[start] = '['
inserts[end] = ']'
else:
inserts[start] = '('
inserts[end] = ')'
for i in xrange(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join(item for item in parts if item is not None)
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
return self._format_metavar(action, action.dest)
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _format_metavar(self, action, default_metavar):
if action.metavar is not None:
name = action.metavar
elif action.choices is not None:
choice_strs = (str(choice) for choice in action.choices)
name = '{%s}' % ','.join(choice_strs)
else:
name = default_metavar
return name
def _format_args(self, action, default_metavar):
name = self._format_metavar(action, default_metavar)
if action.nargs is None:
result = name
elif action.nargs == OPTIONAL:
result = '[%s]' % name
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % (name, name)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % (name, name)
elif action.nargs is PARSER:
result = '%s ...' % name
else:
result = ' '.join([name] * action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name, value in params.items():
if value is SUPPRESS:
del params[name]
if params.get('choices') is not None:
choices_str = ', '.join(str(c) for c in params['choices'])
params['choices'] = choices_str
return action.help % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
class RawDescriptionHelpFormatter(HelpFormatter):
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(True))
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""ArgumentError(message, argument)
Raised whenever there was an error creating or using an argument
(optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Action(*strings, **options)
Action objects hold the information necessary to convert a
set of command-line arguments (possibly including an initial option
string) into the desired Python object(s).
Keyword Arguments:
option_strings -- A list of command-line option strings which
should be associated with this action.
dest -- The name of the attribute to hold the created object(s)
nargs -- The number of command-line arguments that should be consumed.
By default, one argument will be consumed and a single value will
be produced. Other values include:
* N (an integer) consumes N arguments (and produces a list)
* '?' consumes zero or one arguments
* '*' consumes zero or more arguments (and produces a list)
* '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
default -- The value to be produced if the option is not specified.
type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
required -- True if the action must always be specified at the command
line. This is only meaningful for optional command-line arguments.
help -- The help string describing the argument.
metavar -- The name to be used for the option's argument with the help
string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar'
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs must be > 0')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs must be > 0')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
_ensure_value(namespace, self.dest, []).append(values)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
_ensure_value(namespace, self.dest, []).append(self.const)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_version()
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
parser.parse_args(arg_strings, namespace)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join(repr(arg) for arg in args if arg is not None)
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
def __init__(self, **kwargs):
for name, value in kwargs.iteritems():
setattr(self, name, value)
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default settings methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
action = action_class(**kwargs)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on one-or-fewer-character option strings
if len(option_string) < 2:
msg = _('invalid option string %r: '
'must be at least two characters long')
raise ValueError(msg % option_string)
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# error on strings that are all prefix characters
if not (set(option_string) - set(self.prefix_chars)):
msg = _('invalid option string %r: '
'must contain characters other than %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join(option_string
for option_string, action
in conflicting_actions)
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
argument_default=None,
conflict_handler='error',
add_help=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.add_help = add_help
self._has_subparsers = False
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if self.add_help:
self.add_argument(
'-h', '--help', action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
'-v', '--version', action='version', default=SUPPRESS,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._has_subparsers:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._positionals._add_action(action)
self._has_subparsers = True
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest, value in self._defaults.iteritems():
if not hasattr(namespace, dest):
setattr(namespace, dest, value)
# parse the arguments and exit if there are any errors
try:
return self._parse_args(args, namespace)
except ArgumentError, err:
self.error(str(err))
def _parse_args(self, arg_strings, namespace):
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, raise an error
if action is None:
self.error(_('no such option: %s') % option_string)
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
for char in self.prefix_chars:
option_string = char + explicit_arg[0]
explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min(
index
for index in option_string_indices
if index >= start_index)
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were unparseable
# arguments
if start_index not in option_string_indices:
msg = _('extra arguments found: %s')
extras = arg_strings[start_index:next_option_string_index]
self.error(msg % ' '.join(extras))
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were too
# many supplied
if stop_index != len(arg_strings):
extras = arg_strings[stop_index:]
self.error(_('extra arguments found: %s') % ' '.join(extras))
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace
return namespace
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None:_('expected one argument'),
OPTIONAL:_('expected at most one argument'),
ONE_OR_MORE:_('expected at least one argument')
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in xrange(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join(self._get_nargs_pattern(action)
for action in actions_slice)
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend(len(string) for string in match.groups())
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if it's just dashes, it was meant to be positional
if not arg_string.strip('-'):
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join(opt_str for _, opt_str, _ in option_tuples)
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow one argument followed by any number of options or arguments
elif nargs is PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs is not PARSER:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# PARSER arguments convert all values, but check only the first
elif action.nargs is PARSER:
value = list(self._get_value(action, v) for v in arg_strings)
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = list(self._get_value(action, v) for v in arg_strings)
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# TypeErrors or ValueErrors indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
self._print_message(self.format_help(), file)
def print_version(self, file=None):
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
_sys.stderr.write(message)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message))
| bsd-2-clause | -6,090,538,348,669,467,000 | 35.955283 | 86 | 0.560143 | false |
jntkym/rappers | preprocess.py | 1 | 6424 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import csv
import time
import sys
import subprocess
import codecs
from utils import *
import unicodedata
import argparse
import cPickle as pickle
reload(sys)
sys.setdefaultencoding('utf8')
# pattern for removing redundunt spaces
space_pat = re.compile(u'\s\s+', re.U)
# # pattern for removing English - not used now
# eng_words_pat = re.compile(u'[A-Za-z]*',re.U)
# aux 1.1+
def translate_non_alphanumerics(to_translate, translate_to=None):
"""
Deleting not needed symbols
"""
not_letters_or_digits = u'[!&#%\"\'()_`{※+,』\|}~?...…「〜>r()<`!」?_%・@@”’":;+ー!。。。、_・_ _『 □***-\.\/:;<=>△?@\[\]\^'
translate_table = dict((ord(char), translate_to) for char in not_letters_or_digits)
return to_translate.translate(translate_table)
# aux 1+
def clean_lyrics(lyrics_file):
"""
Take crawled data and do some simple cleaning on it
:param lyrics_file: crawled data
:return: cleaned data, which will be fed to Kytea
"""
data_corpus = []
with open(lyrics_file) as csvfile:
reader = csv.reader(csvfile, delimiter="\t")
for row in reader:
sentences = row[2].strip().split(u"<BR>")
for sentence in sentences:
sentence = unicode(sentence)
sentence = translate_non_alphanumerics(sentence)
sentence = space_pat.sub(u' ', sentence)
if len(sentence) > 1:
data_corpus.append(sentence)
data_corpus.append(u"\n")
print(" Done cleaning crawled data! ")
# saving the corpus
with codecs.open("data/cleaned_lyrics.txt", "w", 'UTF-8') as f:
f.write("\n".join(data_corpus))
# aux 2
def create_corpus(crawled_lyrics_file, save=False):
"""
Load cleaned crawled corpus from local folder, feed to Kytea, get the output.
Then laod the output and do post-processing.
:param crawled_lyrics_file:
:param save:
:return: clean_corpus file
"""
# generating cleaned lyrics corpus from crawled data
clean_lyrics(crawled_lyrics_file) # the corpus is one sequence of characters per line
subprocess.call('kytea < ./data/cleaned_lyrics.txt > ./data/kytea_out.txt', shell=True) # processing with kytea
print(" Done kytea processing! ")
pron = []
unk_pat = re.compile(u"/補助記号/UNK")
slash_pat = re.compile(ur"\\")
with codecs.open("data/kytea_out.txt", 'UTF-8') as f:
for line in f:
line = line.decode(encoding="utf-8")
if line[0] == "\n":
pron.append(u"\n")
line = line.strip()
line = unk_pat.sub(u"", line)
line = slash_pat.sub(u"", line)
triplets = line.split(u" ") # take a look at Kytea output: https://github.com/chezou/Mykytea-python
seq = []
for item in triplets:
try:
hir = item.split(u"/")[0]
if hir != "\\":
seq.append(hir)
except IndexError:
continue
candidate_line = unicodedata.normalize("NFKC", u" ".join(seq))
candidate_line = re.sub(u"[A-Za-z]", u"", candidate_line)
candidate_line = re.sub(u"\s+", u"", candidate_line)
candidate_line = re.sub(u"\d+", u"5", candidate_line)
if len(candidate_line) > 2:
pron.append(candidate_line)
juman_input = u"\n".join(pron)
juman_input = re.sub(u"\n{4}",u"\n\n",juman_input)
return juman_input
# main function - creates input for the NN
def clean_corpus(crawled_lyrics_file, savepath=None):
print("Preparing data ...")
text = create_corpus(crawled_lyrics_file, save=False).lower()
if savepath != None:
with open(savepath, "w") as f:
f.write(text)
print(" Clean data saved into ----->%s " % (savepath))
def process_juman_output(juman_outfile):
print(" Processing juman output ...")
corpus = []
hiragana_corpus = []
daihyou_vocab = {}
with open(juman_outfile) as csvfile:
reader = csv.reader(csvfile, delimiter=str(u" "))
sent = []
hirag_sent = []
for line in reader:
if line[0] == u"@":
continue
if line[0] == u"EOS":
corpus.append(u" ".join(sent))
hiragana_corpus.append(u" ".join(hirag_sent))
hirag_sent = []
sent=[]
continue
if line[11] != "NIL":
value = line[11]
value = re.sub("代表表記:", u"", value,re.U)
value = value.split(u"/")[0]
else:
value = line[0]
hiragana = line[1]
hirag_sent.append(hiragana)
key = line[0]
daihyou_vocab[key] = value
sent.append(key)
corpus = u"\n".join(corpus)
hiragana_corpus = u"\n".join(hiragana_corpus)
corpus = re.sub(u"\n\n",u"\n",corpus)
hiragana_corpus = re.sub(u"\n\n",u"\n", hiragana_corpus)
print " All in all unique lemmas: %d" %(len(daihyou_vocab.values()))
# save a txt corpus file
with open("data/string_corpus.txt","w") as f:
for line in corpus.split(u"\n"):
print >> f, line
# save hiragana corpus
with open("data/hiragana_corpus.txt","w") as fo:
for line in hiragana_corpus.split(u"\n"):
print >> fo, line
# save a vocabulary
with open("data/daihyou_vocab.p", "w") as vocabfile:
pickle.dump(daihyou_vocab, vocabfile)
print "cleaning datadir ..."
subprocess.call('rm -f ./data/juman_input.txt ./data/kytea_out.txt ./data/cleaned_lyrics.txt',
shell=True)
def main():
parser = argparse.ArgumentParser(description="An LSTM language model")
parser.add_argument('-juman', help='Preprocess juman file', nargs=1)
parser.add_argument('-crawl', help='Preprocess crawled data', nargs=1)
opts = parser.parse_args()
if opts.crawl:
clean_corpus(opts.crawl[0], savepath="data/juman_input.txt")
print " Done cleaning crawled data"
if opts.juman:
process_juman_output(opts.juman[0])
print "Done preparing the corpus"
if __name__ == '__main__':
main()
| mit | -3,816,924,360,779,933,700 | 30.068627 | 116 | 0.563427 | false |
richard-willowit/odoo | addons/website_slides/models/slides.py | 2 | 30970 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import requests
from PIL import Image
import base64
import datetime
import io
import json
import re
from werkzeug import urls
from odoo import api, fields, models, SUPERUSER_ID, _
from odoo.addons.http_routing.models.ir_http import slug
from odoo.tools import image
from odoo.tools.translate import html_translate
from odoo.exceptions import Warning
class Channel(models.Model):
""" A channel is a container of slides. It has group-based access configuration
allowing to configure slide upload and access. Slides can be promoted in
channels. """
_name = 'slide.channel'
_description = 'Channel for Slides'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_order = 'sequence, id'
_order_by_strategy = {
'most_viewed': 'total_views desc',
'most_voted': 'likes desc',
'latest': 'date_published desc',
}
name = fields.Char('Name', translate=True, required=True)
active = fields.Boolean(default=True)
description = fields.Html('Description', translate=html_translate, sanitize_attributes=False)
sequence = fields.Integer(default=10, help='Display order')
category_ids = fields.One2many('slide.category', 'channel_id', string="Categories")
slide_ids = fields.One2many('slide.slide', 'channel_id', string="Slides")
promote_strategy = fields.Selection([
('none', 'No Featured Presentation'),
('latest', 'Latest Published'),
('most_voted', 'Most Voted'),
('most_viewed', 'Most Viewed'),
('custom', 'Featured Presentation')],
string="Featuring Policy", default='most_voted', required=True)
custom_slide_id = fields.Many2one('slide.slide', string='Slide to Promote')
promoted_slide_id = fields.Many2one('slide.slide', string='Featured Slide', compute='_compute_promoted_slide_id', store=True)
@api.depends('custom_slide_id', 'promote_strategy', 'slide_ids.likes',
'slide_ids.total_views', "slide_ids.date_published")
def _compute_promoted_slide_id(self):
for record in self:
if record.promote_strategy == 'none':
record.promoted_slide_id = False
elif record.promote_strategy == 'custom':
record.promoted_slide_id = record.custom_slide_id
elif record.promote_strategy:
slides = self.env['slide.slide'].search(
[('website_published', '=', True), ('channel_id', '=', record.id)],
limit=1, order=self._order_by_strategy[record.promote_strategy])
record.promoted_slide_id = slides and slides[0] or False
nbr_presentations = fields.Integer('Number of Presentations', compute='_count_presentations', store=True)
nbr_documents = fields.Integer('Number of Documents', compute='_count_presentations', store=True)
nbr_videos = fields.Integer('Number of Videos', compute='_count_presentations', store=True)
nbr_infographics = fields.Integer('Number of Infographics', compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('channel_id', 'in', self.ids)],
['channel_id', 'slide_type'], ['channel_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['channel_id'][0]][res_group['slide_type']] = result[res_group['channel_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
publish_template_id = fields.Many2one(
'mail.template', string='Published Template',
help="Email template to send slide publication through email",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_published'))
share_template_id = fields.Many2one(
'mail.template', string='Shared Template',
help="Email template used when sharing a slide",
default=lambda self: self.env['ir.model.data'].xmlid_to_res_id('website_slides.slide_template_shared'))
visibility = fields.Selection([
('public', 'Public'),
('private', 'Private'),
('partial', 'Show channel but restrict presentations')],
default='public', required=True)
group_ids = fields.Many2many(
'res.groups', 'rel_channel_groups', 'channel_id', 'group_id',
string='Channel Groups', help="Groups allowed to see presentations in this channel")
access_error_msg = fields.Html(
'Error Message', help="Message to display when not accessible due to access rights",
default="<p>This channel is private and its content is restricted to some users.</p>", translate=html_translate, sanitize_attributes=False)
upload_group_ids = fields.Many2many(
'res.groups', 'rel_upload_groups', 'channel_id', 'group_id',
string='Upload Groups', help="Groups allowed to upload presentations in this channel. If void, every user can upload.")
# not stored access fields, depending on each user
can_see = fields.Boolean('Can See', compute='_compute_access', search='_search_can_see')
can_see_full = fields.Boolean('Full Access', compute='_compute_access')
can_upload = fields.Boolean('Can Upload', compute='_compute_access')
def _search_can_see(self, operator, value):
if operator not in ('=', '!=', '<>'):
raise ValueError('Invalid operator: %s' % (operator,))
if not value:
operator = operator == "=" and '!=' or '='
if self._uid == SUPERUSER_ID:
return [(1, '=', 1)]
# Better perfs to split request and use inner join that left join
req = """
SELECT id FROM slide_channel WHERE visibility='public'
UNION
SELECT c.id
FROM slide_channel c
INNER JOIN rel_channel_groups rg on c.id = rg.channel_id
INNER JOIN res_groups g on g.id = rg.group_id
INNER JOIN res_groups_users_rel u on g.id = u.gid and uid = %s
"""
op = operator == "=" and "inselect" or "not inselect"
# don't use param named because orm will add other param (test_active, ...)
return [('id', op, (req, (self._uid)))]
@api.one
@api.depends('visibility', 'group_ids', 'upload_group_ids')
def _compute_access(self):
self.can_see = self.visibility in ['public', 'private'] or bool(self.group_ids & self.env.user.groups_id)
self.can_see_full = self.visibility == 'public' or bool(self.group_ids & self.env.user.groups_id)
self.can_upload = self.can_see and (not self.upload_group_ids or bool(self.upload_group_ids & self.env.user.groups_id))
@api.multi
@api.depends('name')
def _compute_website_url(self):
super(Channel, self)._compute_website_url()
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for channel in self:
if channel.id: # avoid to perform a slug on a not yet saved record in case of an onchange.
channel.website_url = '%s/slides/%s' % (base_url, slug(channel))
@api.onchange('visibility')
def change_visibility(self):
if self.visibility == 'public':
self.group_ids = False
@api.multi
def write(self, vals):
res = super(Channel, self).write(vals)
if 'active' in vals:
# archiving/unarchiving a channel does it on its slides, too
self.with_context(active_test=False).mapped('slide_ids').write({'active': vals['active']})
return res
@api.multi
@api.returns('self', lambda value: value.id)
def message_post(self, parent_id=False, subtype=None, **kwargs):
""" Temporary workaround to avoid spam. If someone replies on a channel
through the 'Presentation Published' email, it should be considered as a
note as we don't want all channel followers to be notified of this answer. """
self.ensure_one()
if parent_id:
parent_message = self.env['mail.message'].sudo().browse(parent_id)
if parent_message.subtype_id and parent_message.subtype_id == self.env.ref('website_slides.mt_channel_slide_published'):
if kwargs.get('subtype_id'):
kwargs['subtype_id'] = False
subtype = 'mail.mt_note'
return super(Channel, self).message_post(parent_id=parent_id, subtype=subtype, **kwargs)
class Category(models.Model):
""" Channel contain various categories to manage its slides """
_name = 'slide.category'
_description = "Slides Category"
_order = "sequence, id"
name = fields.Char('Name', translate=True, required=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True, ondelete='cascade')
sequence = fields.Integer(default=10, help='Display order')
slide_ids = fields.One2many('slide.slide', 'category_id', string="Slides")
nbr_presentations = fields.Integer("Number of Presentations", compute='_count_presentations', store=True)
nbr_documents = fields.Integer("Number of Documents", compute='_count_presentations', store=True)
nbr_videos = fields.Integer("Number of Videos", compute='_count_presentations', store=True)
nbr_infographics = fields.Integer("Number of Infographics", compute='_count_presentations', store=True)
total = fields.Integer(compute='_count_presentations', store=True)
@api.depends('slide_ids.slide_type', 'slide_ids.website_published')
def _count_presentations(self):
result = dict.fromkeys(self.ids, dict())
res = self.env['slide.slide'].read_group(
[('website_published', '=', True), ('category_id', 'in', self.ids)],
['category_id', 'slide_type'], ['category_id', 'slide_type'],
lazy=False)
for res_group in res:
result[res_group['category_id'][0]][res_group['slide_type']] = result[res_group['category_id'][0]].get(res_group['slide_type'], 0) + res_group['__count']
for record in self:
record.nbr_presentations = result[record.id].get('presentation', 0)
record.nbr_documents = result[record.id].get('document', 0)
record.nbr_videos = result[record.id].get('video', 0)
record.nbr_infographics = result[record.id].get('infographic', 0)
record.total = record.nbr_presentations + record.nbr_documents + record.nbr_videos + record.nbr_infographics
class EmbeddedSlide(models.Model):
""" Embedding in third party websites. Track view count, generate statistics. """
_name = 'slide.embed'
_description = 'Embedded Slides View Counter'
_rec_name = 'slide_id'
slide_id = fields.Many2one('slide.slide', string="Presentation", required=True, index=True)
url = fields.Char('Third Party Website URL', required=True)
count_views = fields.Integer('# Views', default=1)
def add_embed_url(self, slide_id, url):
baseurl = urls.url_parse(url).netloc
embeds = self.search([('url', '=', baseurl), ('slide_id', '=', int(slide_id))], limit=1)
if embeds:
embeds.count_views += 1
else:
embeds = self.create({
'slide_id': slide_id,
'url': baseurl,
})
return embeds.count_views
class SlideTag(models.Model):
""" Tag to search slides accross channels. """
_name = 'slide.tag'
_description = 'Slide Tag'
name = fields.Char('Name', required=True, translate=True)
_sql_constraints = [
('slide_tag_unique', 'UNIQUE(name)', 'A tag must be unique!'),
]
class Slide(models.Model):
""" This model represents actual presentations. Those must be one of four
types:
- Presentation
- Document
- Infographic
- Video
Slide has various statistics like view count, embed count, like, dislikes """
_name = 'slide.slide'
_inherit = ['mail.thread', 'website.seo.metadata', 'website.published.mixin']
_description = 'Slides'
_mail_post_access = 'read'
_PROMOTIONAL_FIELDS = [
'__last_update', 'name', 'image_thumb', 'image_medium', 'slide_type', 'total_views', 'category_id',
'channel_id', 'description', 'tag_ids', 'write_date', 'create_date',
'website_published', 'website_url', 'website_meta_title', 'website_meta_description', 'website_meta_keywords']
_sql_constraints = [
('name_uniq', 'UNIQUE(channel_id, name)', 'The slide name must be unique within a channel')
]
# description
name = fields.Char('Title', required=True, translate=True)
active = fields.Boolean(default=True)
description = fields.Text('Description', translate=True)
channel_id = fields.Many2one('slide.channel', string="Channel", required=True)
category_id = fields.Many2one('slide.category', string="Category", domain="[('channel_id', '=', channel_id)]")
tag_ids = fields.Many2many('slide.tag', 'rel_slide_tag', 'slide_id', 'tag_id', string='Tags')
download_security = fields.Selection(
[('none', 'No One'), ('user', 'Authentified Users Only'), ('public', 'Everyone')],
string='Download Security',
required=True, default='user')
image = fields.Binary('Image', attachment=True)
image_medium = fields.Binary('Medium', compute="_get_image", store=True, attachment=True)
image_thumb = fields.Binary('Thumbnail', compute="_get_image", store=True, attachment=True)
@api.depends('image')
def _get_image(self):
for record in self:
if record.image:
record.image_medium = image.crop_image(record.image, type='top', ratio=(4, 3), size=(500, 400))
record.image_thumb = image.crop_image(record.image, type='top', ratio=(4, 3), size=(200, 200))
else:
record.image_medium = False
record.iamge_thumb = False
# content
slide_type = fields.Selection([
('infographic', 'Infographic'),
('presentation', 'Presentation'),
('document', 'Document'),
('video', 'Video')],
string='Type', required=True,
default='document',
help="The document type will be set automatically based on the document URL and properties (e.g. height and width for presentation and document).")
index_content = fields.Text('Transcript')
datas = fields.Binary('Content', attachment=True)
url = fields.Char('Document URL', help="Youtube or Google Document URL")
document_id = fields.Char('Document ID', help="Youtube or Google Document ID")
mime_type = fields.Char('Mime-type')
@api.onchange('url')
def on_change_url(self):
self.ensure_one()
if self.url:
res = self._parse_document_url(self.url)
if res.get('error'):
raise Warning(_('Could not fetch data from url. Document or access right not available:\n%s') % res['error'])
values = res['values']
if not values.get('document_id'):
raise Warning(_('Please enter valid Youtube or Google Doc URL'))
for key, value in values.items():
self[key] = value
# website
date_published = fields.Datetime('Publish Date')
likes = fields.Integer('Likes')
dislikes = fields.Integer('Dislikes')
# views
embedcount_ids = fields.One2many('slide.embed', 'slide_id', string="Embed Count")
slide_views = fields.Integer('# of Website Views')
embed_views = fields.Integer('# of Embedded Views')
total_views = fields.Integer("Total # Views", default="0", compute='_compute_total', store=True)
@api.depends('slide_views', 'embed_views')
def _compute_total(self):
for record in self:
record.total_views = record.slide_views + record.embed_views
embed_code = fields.Text('Embed Code', readonly=True, compute='_get_embed_code')
def _get_embed_code(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for record in self:
if record.datas and (not record.document_id or record.slide_type in ['document', 'presentation']):
record.embed_code = '<iframe src="%s/slides/embed/%s?page=1" allowFullScreen="true" height="%s" width="%s" frameborder="0"></iframe>' % (base_url, record.id, 315, 420)
elif record.slide_type == 'video' and record.document_id:
if not record.mime_type:
# embed youtube video
record.embed_code = '<iframe src="//www.youtube.com/embed/%s?theme=light" allowFullScreen="true" frameborder="0"></iframe>' % (record.document_id)
else:
# embed google doc video
record.embed_code = '<embed src="https://video.google.com/get_player?ps=docs&partnerid=30&docid=%s" type="application/x-shockwave-flash"></embed>' % (record.document_id)
else:
record.embed_code = False
@api.multi
@api.depends('name')
def _compute_website_url(self):
super(Slide, self)._compute_website_url()
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for slide in self:
if slide.id: # avoid to perform a slug on a not yet saved record in case of an onchange.
# link_tracker is not in dependencies, so use it to shorten url only if installed.
if self.env.registry.get('link.tracker'):
url = self.env['link.tracker'].sudo().create({
'url': '%s/slides/slide/%s' % (base_url, slug(slide)),
'title': slide.name,
}).short_url
else:
url = '%s/slides/slide/%s' % (base_url, slug(slide))
slide.website_url = url
@api.model
def create(self, values):
if not values.get('index_content'):
values['index_content'] = values.get('description')
if values.get('slide_type') == 'infographic' and not values.get('image'):
values['image'] = values['datas']
if values.get('website_published') and not values.get('date_published'):
values['date_published'] = datetime.datetime.now()
if values.get('url') and not values.get('document_id'):
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.items():
values.setdefault(key, value)
# Do not publish slide if user has not publisher rights
if not self.user_has_groups('website.group_website_publisher'):
values['website_published'] = False
slide = super(Slide, self).create(values)
slide.channel_id.message_subscribe_users()
slide._post_publication()
return slide
@api.multi
def write(self, values):
if values.get('url') and values['url'] != self.url:
doc_data = self._parse_document_url(values['url']).get('values', dict())
for key, value in doc_data.items():
values.setdefault(key, value)
if values.get('channel_id'):
custom_channels = self.env['slide.channel'].search([('custom_slide_id', '=', self.id), ('id', '!=', values.get('channel_id'))])
custom_channels.write({'custom_slide_id': False})
res = super(Slide, self).write(values)
if values.get('website_published'):
self.date_published = datetime.datetime.now()
self._post_publication()
return res
@api.model
def check_field_access_rights(self, operation, fields):
""" As per channel access configuration (visibility)
- public ==> no restriction on slides access
- private ==> restrict all slides of channel based on access group defined on channel group_ids field
- partial ==> show channel, but presentations based on groups means any user can see channel but not slide's content.
For private: implement using record rule
For partial: user can see channel, but channel gridview have slide detail so we have to implement
partial field access mechanism for public user so he can have access of promotional field (name, view_count) of slides,
but not all fields like data (actual pdf content)
all fields should be accessible only for user group defined on channel group_ids
"""
if self.env.uid == SUPERUSER_ID:
return fields or list(self._fields)
fields = super(Slide, self).check_field_access_rights(operation, fields)
# still read not perform so we can not access self.channel_id
if self.ids:
self.env.cr.execute('SELECT DISTINCT channel_id FROM ' + self._table + ' WHERE id IN %s', (tuple(self.ids),))
channel_ids = [x[0] for x in self.env.cr.fetchall()]
channels = self.env['slide.channel'].sudo().browse(channel_ids)
limited_access = all(channel.visibility == 'partial' and
not len(channel.group_ids & self.env.user.groups_id)
for channel in channels)
if limited_access:
fields = [field for field in fields if field in self._PROMOTIONAL_FIELDS]
return fields
@api.multi
def get_access_action(self, access_uid=None):
""" Instead of the classic form view, redirect to website if it is published. """
self.ensure_one()
if self.website_published:
return {
'type': 'ir.actions.act_url',
'url': '%s' % self.website_url,
'target': 'self',
'target_type': 'public',
'res_id': self.id,
}
return super(Slide, self).get_access_action(access_uid)
@api.multi
def _notification_recipients(self, message, groups):
groups = super(Slide, self)._notification_recipients(message, groups)
self.ensure_one()
if self.website_published:
for group_name, group_method, group_data in groups:
group_data['has_button_access'] = True
return groups
def get_related_slides(self, limit=20):
domain = [('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)]
if self.category_id:
domain += [('category_id', '=', self.category_id.id)]
for record in self.search(domain, limit=limit):
yield record
def get_most_viewed_slides(self, limit=20):
for record in self.search([('website_published', '=', True), ('channel_id.visibility', '!=', 'private'), ('id', '!=', self.id)], limit=limit, order='total_views desc'):
yield record
def _post_publication(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for slide in self.filtered(lambda slide: slide.website_published and slide.channel_id.publish_template_id):
publish_template = slide.channel_id.publish_template_id
html_body = publish_template.with_context(base_url=base_url).render_template(publish_template.body_html, 'slide.slide', slide.id)
subject = publish_template.render_template(publish_template.subject, 'slide.slide', slide.id)
slide.channel_id.message_post(
subject=subject,
body=html_body,
subtype='website_slides.mt_channel_slide_published')
return True
@api.one
def send_share_email(self, email):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
return self.channel_id.share_template_id.with_context(email=email, base_url=base_url).send_mail(self.id)
# --------------------------------------------------
# Parsing methods
# --------------------------------------------------
@api.model
def _fetch_data(self, base_url, data, content_type=False, extra_params=False):
result = {'values': dict()}
try:
response = requests.get(base_url, params=data)
response.raise_for_status()
if content_type == 'json':
result['values'] = response.json()
elif content_type in ('image', 'pdf'):
result['values'] = base64.b64encode(response.content)
else:
result['values'] = response.content
except requests.exceptions.HTTPError as e:
result['error'] = e.response.content
except requests.exceptions.ConnectionError as e:
result['error'] = str(e)
return result
def _find_document_data_from_url(self, url):
expr = re.compile(r'^.*((youtu.be/)|(v/)|(\/u\/\w\/)|(embed\/)|(watch\?))\??v?=?([^#\&\?]*).*')
arg = expr.match(url)
document_id = arg and arg.group(7) or False
if document_id:
return ('youtube', document_id)
expr = re.compile(r'(^https:\/\/docs.google.com|^https:\/\/drive.google.com).*\/d\/([^\/]*)')
arg = expr.match(url)
document_id = arg and arg.group(2) or False
if document_id:
return ('google', document_id)
return (None, False)
def _parse_document_url(self, url, only_preview_fields=False):
document_source, document_id = self._find_document_data_from_url(url)
if document_source and hasattr(self, '_parse_%s_document' % document_source):
return getattr(self, '_parse_%s_document' % document_source)(document_id, only_preview_fields)
return {'error': _('Unknown document')}
def _parse_youtube_document(self, document_id, only_preview_fields):
key = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/youtube/v3/videos', {'id': document_id, 'key': key, 'part': 'snippet', 'fields': 'items(id,snippet)'}, 'json')
if fetch_res.get('error'):
return fetch_res
values = {'slide_type': 'video', 'document_id': document_id}
items = fetch_res['values'].get('items')
if not items:
return {'error': _('Please enter valid Youtube or Google Doc URL')}
youtube_values = items[0]
if youtube_values.get('snippet'):
snippet = youtube_values['snippet']
if only_preview_fields:
values.update({
'url_src': snippet['thumbnails']['high']['url'],
'title': snippet['title'],
'description': snippet['description']
})
return values
values.update({
'name': snippet['title'],
'image': self._fetch_data(snippet['thumbnails']['high']['url'], {}, 'image')['values'],
'description': snippet['description'],
})
return {'values': values}
@api.model
def _parse_google_document(self, document_id, only_preview_fields):
def get_slide_type(vals):
# TDE FIXME: WTF ??
slide_type = 'presentation'
if vals.get('image'):
image = Image.open(io.BytesIO(base64.b64decode(vals['image'])))
width, height = image.size
if height > width:
return 'document'
return slide_type
# Google drive doesn't use a simple API key to access the data, but requires an access
# token. However, this token is generated in module google_drive, which is not in the
# dependencies of website_slides. We still keep the 'key' parameter just in case, but that
# is probably useless.
params = {}
params['projection'] = 'BASIC'
if 'google.drive.config' in self.env:
access_token = self.env['google.drive.config'].get_access_token()
if access_token:
params['access_token'] = access_token
if not params.get('access_token'):
params['key'] = self.env['ir.config_parameter'].sudo().get_param('website_slides.google_app_key')
fetch_res = self._fetch_data('https://www.googleapis.com/drive/v2/files/%s' % document_id, params, "json")
if fetch_res.get('error'):
return fetch_res
google_values = fetch_res['values']
if only_preview_fields:
return {
'url_src': google_values['thumbnailLink'],
'title': google_values['title'],
}
values = {
'name': google_values['title'],
'image': self._fetch_data(google_values['thumbnailLink'].replace('=s220', ''), {}, 'image')['values'],
'mime_type': google_values['mimeType'],
'document_id': document_id,
}
if google_values['mimeType'].startswith('video/'):
values['slide_type'] = 'video'
elif google_values['mimeType'].startswith('image/'):
values['datas'] = values['image']
values['slide_type'] = 'infographic'
elif google_values['mimeType'].startswith('application/vnd.google-apps'):
values['slide_type'] = get_slide_type(values)
if 'exportLinks' in google_values:
values['datas'] = self._fetch_data(google_values['exportLinks']['application/pdf'], params, 'pdf', extra_params=True)['values']
# Content indexing
if google_values['exportLinks'].get('text/plain'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/plain'], params, extra_params=True)['values']
elif google_values['exportLinks'].get('text/csv'):
values['index_content'] = self._fetch_data(google_values['exportLinks']['text/csv'], params, extra_params=True)['values']
elif google_values['mimeType'] == 'application/pdf':
# TODO: Google Drive PDF document doesn't provide plain text transcript
values['datas'] = self._fetch_data(google_values['webContentLink'], {}, 'pdf')['values']
values['slide_type'] = get_slide_type(values)
return {'values': values}
| gpl-3.0 | -3,198,032,734,659,106,000 | 48.63141 | 189 | 0.607653 | false |
bugsnag/bugsnag-python | tests/integrations/test_flask.py | 1 | 11037 | import json
import re
from flask import Flask
from bugsnag.flask import handle_exceptions
import bugsnag.event
from bugsnag.breadcrumbs import BreadcrumbType
from tests.utils import IntegrationTest
class SentinelError(RuntimeError):
pass
class TestFlask(IntegrationTest):
def setUp(self):
super(TestFlask, self).setUp()
bugsnag.configure(endpoint=self.server.url,
api_key='3874876376238728937',
notify_release_stages=['dev'],
release_stage='dev',
asynchronous=False,
max_breadcrumbs=25)
def test_bugsnag_middleware_working(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
return "OK"
handle_exceptions(app)
resp = app.test_client().get('/hello')
self.assertEqual(resp.data, b'OK')
self.assertEqual(0, len(self.server.received))
def test_bugsnag_crash(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/hello')
assert 'environment' not in event['metaData']
breadcrumbs = payload['events'][0]['breadcrumbs']
assert len(breadcrumbs) == 1
assert breadcrumbs[0]['name'] == 'http request'
assert breadcrumbs[0]['metaData'] == {'to': '/hello'}
assert breadcrumbs[0]['type'] == BreadcrumbType.NAVIGATION.value
def test_enable_environment(self):
bugsnag.configure(send_environment=True)
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData']['environment']['REMOTE_ADDR'],
'127.0.0.1')
def test_bugsnag_notify(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.notify(SentinelError("oops"))
return "OK"
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
self.assertEqual(payload['events'][0]['metaData']['request']['url'],
'http://localhost/hello')
def test_bugsnag_custom_data(self):
metadata = [{"hello": {"world": "once"}},
{"again": {"hello": "world"}}]
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.configure_request(metadata=metadata.pop())
raise SentinelError("oops")
handle_exceptions(app)
with app.test_client() as client:
client.get('/hello')
client.get('/hello')
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData'].get('hello'), None)
self.assertEqual(event['metaData']['again']['hello'], 'world')
payload = self.server.received[1]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData']['hello']['world'], 'once')
self.assertEqual(event['metaData'].get('again'), None)
self.assertEqual(2, len(self.server.received))
def test_bugsnag_includes_posted_json_data(self):
app = Flask("bugsnag")
@app.route("/ajax", methods=["POST"])
def hello():
raise SentinelError("oops")
handle_exceptions(app)
body = {
'_links': {
'self': {
'href': 'http://example.com/api/resource/a'
}
},
'id': 'res-a',
'name': 'Resource A'
}
app.test_client().post(
'/ajax', data=json.dumps(body),
content_type='application/hal+json')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/ajax')
self.assertEqual(event['metaData']['request']['data'], body)
def test_bugsnag_includes_request_when_json_malformed(self):
app = Flask("bugsnag")
@app.route("/ajax", methods=["POST"])
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().post(
'/ajax', data='{"key": "value"', content_type='application/json')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/ajax')
self.assertEqual(event['metaData']['request']['data']['body'],
'{"key": "value"')
def test_bugsnag_add_metadata_tab(self):
app = Flask("bugsnag")
@app.route("/form", methods=["PUT"])
def hello():
bugsnag.add_metadata_tab("account", {"id": 1, "premium": True})
bugsnag.add_metadata_tab("account", {"premium": False})
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().put(
'/form', data='_data', content_type='application/octet-stream')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['metaData']['account']['premium'], False)
self.assertEqual(event['metaData']['account']['id'], 1)
def test_bugsnag_includes_unknown_content_type_posted_data(self):
app = Flask("bugsnag")
@app.route("/form", methods=["PUT"])
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().put(
'/form', data='_data', content_type='application/octet-stream')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/form')
body = event['metaData']['request']['data']['body']
self.assertTrue('_data' in body)
def test_bugsnag_notify_with_custom_context(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
bugsnag.notify(SentinelError("oops"),
context="custom_context_event_testing")
return "OK"
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
self.assertEqual(payload['events'][0]['context'],
'custom_context_event_testing')
def test_flask_intergration_includes_middleware_severity(self):
app = Flask("bugsnag")
@app.route("/test")
def test():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get("/test")
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['severityReason'], {
"type": "unhandledExceptionMiddleware",
"attributes": {
"framework": "Flask"
}
})
def test_appends_framework_version(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
handle_exceptions(app)
app.test_client().get('/hello')
self.assertEqual(len(self.server.received), 1)
payload = self.server.received[0]['json_body']
device_data = payload['events'][0]['device']
self.assertEquals(len(device_data['runtimeVersions']), 2)
self.assertTrue(re.match(r'\d+\.\d+\.\d+',
device_data['runtimeVersions']['python']))
self.assertTrue(re.match(r'\d+\.\d+\.\d+',
device_data['runtimeVersions']['flask']))
def test_read_request_in_callback(self):
def callback(event):
event.set_user(id=event.request.args['id'])
return True
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
bugsnag.before_notify(callback)
handle_exceptions(app)
app.test_client().get('/hello?id=foo')
assert len(self.server.received) == 1
payload = self.server.received[0]['json_body']
assert payload['events'][0]['user']['id'] == 'foo'
def test_bugsnag_middleware_leaves_breadcrumb_with_referer(self):
app = Flask("bugsnag")
@app.route("/hello")
def hello():
raise SentinelError("oops")
handle_exceptions(app)
headers = {'referer': 'http://localhost/hi?password=hunter2'}
app.test_client().get('/hello', headers=headers)
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertEqual(event['exceptions'][0]['errorClass'],
'test_flask.SentinelError')
self.assertEqual(event['metaData']['request']['url'],
'http://localhost/hello')
assert 'environment' not in event['metaData']
breadcrumbs = payload['events'][0]['breadcrumbs']
assert len(breadcrumbs) == 1
assert breadcrumbs[0]['name'] == 'http request'
assert breadcrumbs[0]['metaData'] == {
'to': '/hello',
'from': 'http://localhost/hi'
}
assert breadcrumbs[0]['type'] == BreadcrumbType.NAVIGATION.value
| mit | -6,688,906,742,728,496,000 | 33.276398 | 77 | 0.556129 | false |
olhoneles/politicos | politicos_api/handlers/marital_status.py | 1 | 1220 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Marcelo Jorge Vieira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from politicos_api.cache import cache
from politicos_api.handlers.base import BaseHandler
class MaritalStatusHandler(BaseHandler):
@cache(5)
async def get(self):
response = await self.agg_query(['ds_estado_civil', 'cd_estado_civil'])
await self.json_response(response)
class MaritalStatusSuggestHandler(BaseHandler):
@cache(5)
async def get(self):
await self.suggest_response('ds_estado_civil', ['cd_estado_civil'])
| agpl-3.0 | -4,413,909,736,209,988,000 | 34.882353 | 79 | 0.731148 | false |
pyhmsa/pyhmsa | pyhmsa/fileformat/datafile.py | 1 | 13644 | """
Reader and writer of data file
"""
# Standard library modules.
import os
import io
import logging
logger = logging.getLogger(__name__)
import binascii
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
# Third party modules.
from pkg_resources import iter_entry_points
# Local modules.
from pyhmsa.datafile import DataFile
from pyhmsa.fileformat.xmlhandler.header import HeaderXMLHandler
from pyhmsa.type.checksum import calculate_checksum, calculate_checksum_sha1
from pyhmsa.type.uid import generate_uid
from pyhmsa.util.monitorable import _Monitorable, _MonitorableThread
# Globals and constants variables.
def _extract_filepath(filepath):
base, ext = os.path.splitext(filepath)
if ext not in ['.xml', '.hmsa']:
raise IOError('File must be either a XML or HMSA')
filepath_xml = base + '.xml'
filepath_hmsa = base + '.hmsa'
return filepath_xml, filepath_hmsa
class _DataFileReaderMixin:
def _read(self, xml_file, hmsa_file, *args, **kwargs):
self._update_status(0.0, 'Running')
if self.is_cancelled(): return
# Read XML
root = etree.ElementTree(file=xml_file).getroot()
# Create object
self._update_status(0.1, 'Creating data file')
if self.is_cancelled(): return
datafile = DataFile(version=root.attrib['Version'])
# Read
self._update_status(0.13, 'Reading XML')
if self.is_cancelled(): return
self._read_root(datafile, root)
self._update_status(0.16, 'Reading header')
if self.is_cancelled(): return
self._read_header(datafile, root)
self._update_status(0.2, 'Reading conditions')
if self.is_cancelled(): return
self._read_conditions(datafile, root)
self._update_status(0.6, 'Reading data')
if self.is_cancelled(): return
self._read_data(datafile, root, hmsa_file)
self._update_status(1.0, 'Completed')
return datafile
def _read_root(self, datafile, root):
datafile.language = \
root.get('{http://www.w3.org/XML/1998/namespace}lang', 'en-US')
def _read_header(self, datafile, root):
handler = HeaderXMLHandler(datafile.version)
datafile.header.update(handler.parse(root.find('Header')))
def _read_conditions(self, datafile, root):
# Load handlers
handlers = set()
for entry_point in iter_entry_points('pyhmsa.fileformat.xmlhandler.condition'):
handler_class = entry_point.resolve()
handler = handler_class(datafile.version)
handlers.add(handler)
# Parse conditions
elements = root.findall('Conditions/*')
count = len(elements)
for i, element in enumerate(elements):
key = element.get('ID', 'Inst%i' % len(datafile.conditions))
self._update_status(0.2 + i / count * 0.4,
'Reading condition %s' % key)
if self.is_cancelled(): return
for handler in handlers:
if handler.can_parse(element):
datafile.conditions[key] = handler.parse(element)
break
def _read_data(self, datafile, root, hmsa_file):
# Check UID
xml_uid = root.attrib['UID'].encode('ascii')
hmsa_uid = binascii.hexlify(hmsa_file.read(8))
if xml_uid.upper() != hmsa_uid.upper():
raise ValueError('UID in XML (%s) does not match UID in HMSA (%s)' % \
(xml_uid, hmsa_uid))
logger.debug('Check UID: %s == %s', xml_uid, hmsa_uid)
# Check checksum
xml_checksum = getattr(datafile.header, 'checksum', None)
if xml_checksum is not None:
xml_checksum = datafile.header.checksum
hmsa_file.seek(0)
buffer = hmsa_file.read()
hmsa_checksum = calculate_checksum(xml_checksum.algorithm, buffer)
if xml_checksum.value.upper() != hmsa_checksum.value.upper():
raise ValueError('Checksum in XML (%s) does not match checksum in HMSA (%s)' % \
(xml_checksum.value, hmsa_checksum.value))
logger.debug('Check sum: %s == %s', xml_checksum.value, hmsa_checksum.value)
# Load handlers
handlers = set()
for entry_point in iter_entry_points('pyhmsa.fileformat.xmlhandler.datum'):
handler_class = entry_point.resolve()
handler = handler_class(datafile.version, hmsa_file,
datafile.conditions)
handlers.add(handler)
# Parse data
elements = root.findall('Data/*')
count = len(elements)
for i, element in enumerate(elements):
key = element.get('Name', 'Inst%i' % len(datafile.data))
self._update_status(0.6 + i / count * 0.4, 'Reading datum %s' % key)
if self.is_cancelled(): return
for handler in handlers:
if handler.can_parse(element):
datafile.data[key] = handler.parse(element)
break
class _BufferedDataFileReaderThread(_MonitorableThread, _DataFileReaderMixin):
def __init__(self, xml_file, hmsa_file):
super().__init__(args=(xml_file, hmsa_file,))
def _run(self, xml_file, hmsa_file, *args, **kwargs):
return self._read(xml_file, hmsa_file)
class _DataFileReaderThread(_MonitorableThread, _DataFileReaderMixin):
def __init__(self, filepath):
filepath_xml, filepath_hmsa = _extract_filepath(filepath)
if not os.path.exists(filepath_xml):
raise IOError('XML file is missing')
if not os.path.exists(filepath_hmsa):
raise IOError('HMSA file is missing')
super().__init__(args=(filepath,))
def _run(self, filepath, *args, **kwargs):
self._update_status(0.0, 'Running')
if self.is_cancelled(): return
filepath_xml, filepath_hmsa = _extract_filepath(filepath)
xml_file = open(filepath_xml, 'rb')
hmsa_file = open(filepath_hmsa, 'rb')
try:
datafile = self._read(xml_file, hmsa_file)
if self.is_cancelled(): return
datafile._filepath = filepath_hmsa
finally:
hmsa_file.close()
xml_file.close()
self._update_status(1.0, 'Completed')
return datafile
class DataFileReader(_Monitorable):
def _create_thread(self, filepath=None, xml_file=None, hmsa_file=None, *args, **kwargs):
if xml_file is not None and hmsa_file is not None:
return _BufferedDataFileReaderThread(xml_file, hmsa_file)
else:
return _DataFileReaderThread(filepath)
def read(self, filepath=None, xml_file=None, hmsa_file=None):
"""
Reads an existing MSA hyper dimensional data file.
:arg filepath: either the location of the XML or HMSA file.
Note that both have to be present.
"""
self._start(filepath, xml_file, hmsa_file)
class _DataFileWriterMixin:
def _write(self, datafile, xml_file, hmsa_file, *args, **kwargs):
self._update_status(0.0, 'Running')
if self.is_cancelled(): return
# Generate UID
self._update_status(0.025, 'Generating UID')
if self.is_cancelled(): return
uid = generate_uid()
hmsa_file.write(uid)
# Create XML
self._update_status(0.05, 'Writing XML')
if self.is_cancelled(): return
root = etree.Element('MSAHyperDimensionalDataFile')
self._write_root(datafile, root, uid)
self._update_status(0.075, 'Writing header')
if self.is_cancelled(): return
self._write_header(datafile, root)
self._update_status(0.1, 'Writing conditions')
if self.is_cancelled(): return
self._write_conditions(datafile, root)
self._update_status(0.5, 'Writing data')
if self.is_cancelled(): return
self._write_data(datafile, root, hmsa_file)
# Calculate and add checksum
self._update_status(0.93, 'Calculating checksum')
if self.is_cancelled(): return
checksum = calculate_checksum_sha1(hmsa_file.getvalue())
element = root.find('Header')
subelement = etree.Element('Checksum')
subelement.text = checksum.value
subelement.set('Algorithm', checksum.algorithm)
element.append(subelement)
# Write XML file
self._update_status(0.96, 'Writing XML to file')
if self.is_cancelled(): return
output = etree.tostring(root, encoding='UTF-8')
document = minidom.parseString(output)
output = document.toprettyxml(encoding='UTF-8')
# Fix add stand-alone manually
output = output.replace(b'<?xml version="1.0" encoding="UTF-8"?>',
b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
xml_file.write(output)
self._update_status(1.0, 'Completed')
return datafile
def _write_root(self, datafile, root, uid):
root.set('Version', datafile.version)
root.set('UID', binascii.hexlify(uid).decode('utf-8').upper())
root.set('{http://www.w3.org/XML/1998/namespace}lang', datafile.language)
def _write_header(self, datafile, root):
handler = HeaderXMLHandler(datafile.version)
element = handler.convert(datafile.header)
root.append(element)
def _write_conditions(self, datafile, root):
# Load handlers
handlers = set()
for entry_point in iter_entry_points('pyhmsa.fileformat.xmlhandler.condition'):
handler_class = entry_point.resolve()
handler = handler_class(datafile.version)
handlers.add(handler)
# Convert conditions
element = etree.Element('Conditions')
count = len(datafile.conditions)
for i, item in enumerate(datafile.conditions.items()):
identifier, condition = item
self._update_status(0.1 + i / count * 0.4,
'Writing condition %s' % identifier)
if self.is_cancelled(): return
for handler in handlers:
if handler.can_convert(condition):
subelement = handler.convert(condition)
subelement.set('ID', identifier)
element.append(subelement)
break
root.append(element)
def _write_data(self, datafile, root, hmsa_file):
# Load handlers
handlers = set()
for entry_point in iter_entry_points('pyhmsa.fileformat.xmlhandler.datum'):
handler_class = entry_point.resolve()
handler = handler_class(datafile.version, hmsa_file,
datafile.conditions)
handlers.add(handler)
# Parse data
element = etree.Element('Data')
count = len(datafile.data)
for i, item in enumerate(datafile.data.items()):
identifier, datum = item
self._update_status(0.5 + i / count * 0.4,
'Writing datum %s' % identifier)
if self.is_cancelled(): return
for handler in handlers:
if handler.can_convert(datum):
subelement = handler.convert(datum)
subelement.set('Name', identifier)
element.append(subelement)
break
root.append(element)
class _BufferedDataFileWriterThread(_MonitorableThread, _DataFileWriterMixin):
def __init__(self, datafile, xml_file, hmsa_file):
super().__init__(args=(datafile, xml_file, hmsa_file))
def _run(self, datafile, xml_file, hmsa_file, *args, **kwargs):
return self._write(datafile, xml_file, hmsa_file)
class _DataFileWriterThread(_MonitorableThread, _DataFileWriterMixin):
def __init__(self, datafile, filepath=None):
if filepath is None:
filepath = datafile.filepath
if filepath is None:
raise ValueError('No filepath given and none defined in datafile')
super().__init__(args=(datafile, filepath))
def _run(self, datafile, filepath, *args, **kwargs):
self._update_status(0.0, 'Running')
if self.is_cancelled(): return
xml_file = io.BytesIO()
hmsa_file = io.BytesIO()
try:
self._write(datafile, xml_file, hmsa_file)
filepath_xml, filepath_hmsa = _extract_filepath(filepath)
with open(filepath_xml, 'wb') as fp:
fp.write(xml_file.getvalue())
with open(filepath_hmsa, 'wb') as fp:
fp.write(hmsa_file.getvalue())
finally:
hmsa_file.close()
xml_file.close()
self._update_status(1.0, 'Completed')
datafile._filepath = filepath_hmsa
return datafile
class DataFileWriter(_Monitorable):
def _create_thread(self, datafile, filepath=None, xml_file=None, hmsa_file=None,
*args, **kwargs):
if xml_file is not None and hmsa_file is not None:
return _BufferedDataFileWriterThread(datafile, xml_file, hmsa_file)
else:
return _DataFileWriterThread(datafile, filepath)
def write(self, datafile, filepath=None, xml_file=None, hmsa_file=None):
"""
Writes a data file to disk.
:arg datafile: data file
:arg filepath: either the location of the XML or HMSA file
"""
self._start(datafile, filepath, xml_file, hmsa_file)
| mit | -8,320,272,475,336,594,000 | 34.34715 | 96 | 0.597699 | false |
dormouse/read | make_mobi.py | 1 | 9766 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import logging
import os
import platform
import re
import subprocess
import shutil
from functools import reduce
from urllib.parse import urlparse, urljoin
import httplib2
from bs4 import BeautifulSoup
from jinja2 import Template
from yttools import YtTools
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)s %(levelname)s %(message)s')
class MakeMobi():
""" make mobi ebook """
def __init__(self, **book_info):
self.logger = logging.getLogger(__name__)
timeout = 20 # second
self.http = httplib2.Http('.cache', timeout=timeout)
self.url = ''
self.template_path = 'template'
self.build_path = 'build'
self.output_path = ''
self.output_img_path = ''
self.all_h2 = []
self.name = ''
self.title = ''
if not os.path.exists(self.build_path):
os.mkdir(self.build_path)
self.creator = book_info.get('creator', 'default creator')
self.copyrights = book_info.get('copyrights', 'default copyrights')
self.publisher = book_info.get('publisher', 'default publisher')
def set_url(self, url):
self.url = url
def make_book(self):
""" make the whole book """
html = YtTools().download(self.url)
if html:
self.make_content(html)
self.make_style_css()
self.make_toc_html()
self.make_toc_ncx()
self.make_opf()
self.make_cover()
# start gen
filename = os.path.join(
os.getcwd(), self.output_path, '%s.opf' % self.name)
if platform.system() == 'Darwin':
# osx
return_code = subprocess.call(['./kindlegen_mac', filename])
elif platform.system() == 'Linux':
# Linux
return_code = subprocess.call(['./kindlegen_linux', filename])
else:
# other
return_code = 999
else:
return_code = 110
return return_code
def guess_charset(self, html):
charset = 'utf-8'
soup = BeautifulSoup(html, 'html.parser')
if 'gb2312' in soup.meta['content'].lower():
charset = 'gb2312'
return charset
def make_content(self, html):
""" the content of book """
charset = self.guess_charset(html)
html = html.decode(charset, 'ignore')
soup = BeautifulSoup(html, 'html.parser')
self.title = soup.find_all(href=re.compile('more'))[0].string
# name
self.name = 'pttg%s' % self.title[5:13]
# output path
output_path = 'pttg%s' % self.title[5:13]
self.output_path = os.path.join(self.build_path, output_path)
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
# output img path
self.output_img_path = os.path.join(self.output_path, 'img')
if not os.path.exists(self.output_img_path):
os.mkdir(self.output_img_path)
# del bad img
tag_imgs = soup.find_all('img')
self.make_content_make_img(tag_imgs)
# html body
self.all_h2 = []
txts = soup.find_all(class_='oblog_text')[1]
body_tags = txts.find_all(['p', 'div'])
# body_tags = map(self.make_content_replace_br, body_tags)
body_tags = reduce(
lambda x, y: x + self.make_content_replace_br(y),
body_tags,
[]
)
# body_tags = map(self.make_content_make_h2, body_tags)
# body_tags = map(self.make_content_make_img, body_tags)
output = '%s.html' % self.name
template = 'index.html'
objs = {
'title': self.title,
'body': '\n'.join([tag.prettify() for tag in body_tags])
}
self.make_output(template, objs, output)
def make_content_replace_br(self, tag_p):
""" replce all br in tag p
:keyword
tag_p: tag p
:return
a list make up of tag p
"""
pattern = re.compile(r'</?br\s*/?>')
if tag_p.br:
old_html = tag_p.prettify()
new_html = pattern.sub("</p><p>", old_html)
new_soup = BeautifulSoup(new_html, 'html.parser')
value = new_soup.find_all('p')
return value
else:
return [tag_p]
def make_content_make_h2(self, tag):
""" make the line that contain "【\d+】" to h2"""
h2_pattern = re.compile(r'【\d+】')
if h2_pattern.search(''.join(tag.strings)):
self.all_h2.append(''.join(tag.stripped_strings))
new_h2_tag = BeautifulSoup('', 'html.parser').new_tag('h2')
new_h2_tag.contents = tag.contents
new_h2_tag['id'] = "ch%d" % len(self.all_h2)
return new_h2_tag
else:
return tag
def make_content_make_img(self, tag_imgs):
""" make the img relative content """
for tag in tag_imgs:
if not tag['src'].strip().startswith('http'):
# delete all bad img like:
# <img src="file://C:...>
# evernotecid://A6B65A9E-9762-414E-82B3-4C06FE717BD2/
tag.decompose()
else:
try:
url = tag['src']
if urlparse(url).scheme == '':
# is relative url?
url = urljoin(self.url, url)
img_url = self.download_img(url)
if img_url:
tag['src'] = img_url
except Exception as e:
self.logger.error("download fail:%s", tag['src'])
self.logger.error(e)
def download_img(self, url):
""" download image
:keyword
url: the url of image
:return
if download success return the new url of image
else return None
"""
base_name = os.path.split(urlparse(url).path)[1]
ext_name = os.path.splitext(base_name)[1]
m = hashlib.md5()
m.update(url.encode())
target_base_name = m.hexdigest() + ext_name
target_filename = os.path.join(self.output_img_path, target_base_name)
new_url = os.path.join('img', target_base_name)
# check image exists
if os.path.exists(target_filename):
return new_url
# download now
content = YtTools().download(url)
if content:
with open(target_filename, 'wb') as f:
f.write(content)
return new_url
else:
return None
def make_style_css(self):
""" make the style.css """
template = 'style.css'
self.make_output(template)
def make_toc_html(self):
""" make the toc.html """
lis = ['<li><a href="%s.html#ch%d">%s</a></li>' % (
self.name, index + 1, h2)
for index, h2 in enumerate(self.all_h2)
]
template = 'toc.html'
objs = {'tocs': '\n'.join(lis)}
self.make_output(template, objs)
def make_toc_ncx(self):
""" navigation page """
# make navpoints
navpoint_template = """
<navPoint id="ch{{ id }}" playOrder="{{ id }}">
<navLabel>
<text>
{{ text }}
</text>
</navLabel>
<content src="{{ name }}.html#ch{{ id }}" />
</navPoint>
"""
jinja_template = Template(navpoint_template)
navpoints = [jinja_template.render(
id=index + 1, text=h2, name=self.name)
for index, h2 in enumerate(self.all_h2)
]
# make toc.ncx
template = 'toc.ncx'
objs = {
'title': self.title,
'navpoints': '\n'.join(navpoints),
}
self.make_output(template, objs)
def make_opf(self):
""" book info, ie: ISBN, title, cover """
template = 'index.opf'
output = '%s.opf' % self.name
objs = {
'name': self.name,
'title': self.title,
'creator': self.creator,
'copyrights': self.copyrights,
'publisher': self.publisher,
}
self.make_output(template, objs, output)
def make_cover(self):
template = os.path.join(self.template_path, 'cover.jpg')
output = os.path.join(self.output_img_path, 'cover.jpg')
shutil.copyfile(template, output)
def make_output(self, template, objs=None, output=None):
"""
make output file
:keyword
template: the base filename of template
objs: the render objs
output: the base filename of output, if output is None then
same as template
"""
output = output if output else template
objs = objs if objs else {}
output_file = os.path.join(self.output_path, output)
template_file = os.path.join(self.template_path, template)
with open(template_file, mode='r', encoding='utf-8') as a_file:
output_content = Template(a_file.read()).render(objs)
with open(output_file, mode='w', encoding='utf-8') as a_file:
a_file.write(output_content)
def test():
# url = 'http://www.dapenti.com/blog/more.asp?name=xilei&id=116232'
url = 'http://www.dapenti.com/blog/more.asp?name=xilei&id=116383'
# url = 'http://127.0.0.1:8000'
tea = MakeMobi()
tea.set_url(url)
tea.make_book()
if __name__ == '__main__':
test()
| lgpl-3.0 | 5,856,424,823,413,545,000 | 31.098684 | 78 | 0.521931 | false |
espressopp/espressopp | src/FixedPairListAdress.py | 1 | 3687 | # Copyright (C) 2012,2013,2015,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
espressopp.FixedPairListAdress
******************************
The FixedPairListAdress is the Fixed Pair List to be used for AdResS or H-AdResS
simulations. When creating the FixedPairListAdress one has to provide the storage
and the tuples. Afterwards the bonds can be added. In the example "bonds" is a
python list of the form ( (pid1, pid2), (pid3, pid4), ...) where each inner pair
defines a bond between the particles with the given particle ids.
Example - creating the FixedPairListAdress and adding bonds:
>>> ftpl = espressopp.FixedTupleList(system.storage)
>>> fpl = espressopp.FixedPairListAdress(system.storage, ftpl)
>>> fpl.addBonds(bonds)
.. function:: espressopp.FixedPairListAdress(storage, fixedtupleList)
:param storage:
:param fixedtupleList:
:type storage:
:type fixedtupleList:
.. function:: espressopp.FixedPairListAdress.add(pid1, pid2)
:param pid1:
:param pid2:
:type pid1:
:type pid2:
:rtype:
.. function:: espressopp.FixedPairListAdress.addBonds(bondlist)
:param bondlist:
:type bondlist:
:rtype:
.. function:: espressopp.FixedPairListAdress.remove()
remove the FixedPairListAdress and disconnect
.. function:: espressopp.FixedPairListAdress.getBonds()
:rtype:
"""
from espressopp import pmi
import _espressopp
import espressopp
from espressopp.esutil import cxxinit
class FixedPairListAdressLocal(_espressopp.FixedPairListAdress):
def __init__(self, storage, fixedtupleList):
if pmi.workerIsActive():
cxxinit(self, _espressopp.FixedPairListAdress, storage, fixedtupleList)
def add(self, pid1, pid2):
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2)
def getBonds(self):
if pmi.workerIsActive():
bonds=self.cxxclass.getBonds(self)
return bonds
def remove(self):
if pmi.workerIsActive():
self.cxxclass.remove(self)
return
def addBonds(self, bondlist):
"""
Each processor takes the broadcasted bondlist and
adds those pairs whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for bond in bondlist:
pid1, pid2 = bond
self.cxxclass.add(self, pid1, pid2)
if pmi.isController:
class FixedPairListAdress(metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.FixedPairListAdressLocal',
localcall = [ "add" ],
pmicall = [ "addBonds","remove" ],
pmiinvoke = ['getBonds']
)
| gpl-3.0 | -5,355,600,725,343,704,000 | 31.06087 | 83 | 0.643341 | false |
Telefonica/toolium | toolium/test/pageobjects/examples/ios/login.py | 1 | 1025 | # -*- coding: utf-8 -*-
u"""
Copyright 2016 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from selenium.webdriver.common.by import By
from toolium.pageelements import InputText, Button
from toolium.test.pageobjects.examples.base.login import BaseLoginPageObject
class IosLoginPageObject(BaseLoginPageObject):
username = InputText(By.ID, 'username_id_ios')
password = InputText(By.ID, 'password_id_ios')
login_button = Button(By.ID, 'login_id_ios')
| apache-2.0 | -4,552,817,205,235,152,000 | 35.535714 | 76 | 0.772239 | false |
ArcherSys/ArcherSys | ArcherBash/lists.py | 1 | 2561 | <<<<<<< HEAD
<<<<<<< HEAD
def count(sequence, item):
number = 0
for i in sequence:
if i is item:
number += 1
return number
def purify(x):
l=[]
for c in x:
if c%2==0:
l.append(c)
return l
def product(x):
res = 1
for i in x:
res *= i
return res
def remove_duplicates(list):
tidy = []
for i in list:
if i not in tidy:
tidy.append(i)
return tidy
def median(numbers):
num = sorted(numbers)
if len(num) % 2 != 0:
return num[len(num)/2]
else:
return (num[len(num)/2] + num[len(num)/2-1]) / 2.0
def join_lists(x, y):
return x + y
def flatten(lists):
result = []
for numbers in lists:
for i in numbers:
result.append(i)
return result
=======
def count(sequence, item):
number = 0
for i in sequence:
if i is item:
number += 1
return number
def purify(x):
l=[]
for c in x:
if c%2==0:
l.append(c)
return l
def product(x):
res = 1
for i in x:
res *= i
return res
def remove_duplicates(list):
tidy = []
for i in list:
if i not in tidy:
tidy.append(i)
return tidy
def median(numbers):
num = sorted(numbers)
if len(num) % 2 != 0:
return num[len(num)/2]
else:
return (num[len(num)/2] + num[len(num)/2-1]) / 2.0
def join_lists(x, y):
return x + y
def flatten(lists):
result = []
for numbers in lists:
for i in numbers:
result.append(i)
return result
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
def count(sequence, item):
number = 0
for i in sequence:
if i is item:
number += 1
return number
def purify(x):
l=[]
for c in x:
if c%2==0:
l.append(c)
return l
def product(x):
res = 1
for i in x:
res *= i
return res
def remove_duplicates(list):
tidy = []
for i in list:
if i not in tidy:
tidy.append(i)
return tidy
def median(numbers):
num = sorted(numbers)
if len(num) % 2 != 0:
return num[len(num)/2]
else:
return (num[len(num)/2] + num[len(num)/2-1]) / 2.0
def join_lists(x, y):
return x + y
def flatten(lists):
result = []
for numbers in lists:
for i in numbers:
result.append(i)
return result
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 5,035,323,708,409,660,000 | 17.557971 | 60 | 0.5041 | false |
kinsney/sport | bikeConvert.py | 1 | 3455 | import pymysql as pm
import time
from django.contrib.auth.hashers import make_password
qike = pm.connect(host='localhost',user='root',passwd='root',db='qike',port=3306,charset='utf8')
sport = pm.connect(host='localhost',user='root',passwd='root',db='sport',port=3306,charset='utf8')
try:
qc = qike.cursor()
sc = sport.cursor()
qc.execute('select * from bike where bid>=242 ')
bike_data = qc.fetchall()
for index,bike in enumerate(bike_data):
# 现在的车号+1
id = index + 75
bikeNumber = bike[1]
bikeName = bike[2]
map = {1:"renting",4:"deleted",3:"checking"}
status = map[bike[44]]
hourRent = float(bike[29])
dayRent = float(bike[30])
weekRent = float(bike[31])
deposit = int(bike[32])
if bike[33] == '是':
studentDeposit = 1
else :
studentDeposit = 0
pledge_map = {"校园卡":'campusId',"身份证":'userId',"无需抵押":'noPledge',"学生证":'studentId'}
pledge = pledge_map[bike[34]]
suitHeight = bike[4]
howOld_map = {"九成":90,"八成":80,"七成":70,"全新":100}
howOld = howOld_map[bike[6]]
sexualFix_map = {"男女通用":'heterosexual',"男":'man',"女":'female'}
sexualFix = sexualFix_map[bike[13]]
equipment_map = {"车锁":"lock","头盔":"helmet","手套":"glove","手机支架":"phoneHolder","水壶架":"kettleHolder","梁包":"bag","后座":"backseat","码表":"stopwatch","手电":"flashlight","尾灯":"backlight","指南针":"compass"}
temp = bike[15].split(',')
for i,equipment in enumerate(temp):
temp[i] = equipment_map[equipment]
equipments = ','.join(temp)
minDuration = float(bike[37])*3600*1000000
maxDuration = float(bike[38])*3600*1000000*7*24
added = str(bike[40])
begintime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(bike[35]))
endtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(bike[36]))
# 现在的地址+1
address_id = index + 75
# id重新设置+++++++++++++++++++++++++++++++++++++++++
address_name = bike[25] or '未知'
latitude = bike[28]
longitude = bike[27]
description = bike[20]
# 现在的照片+1
photo_id = index + 75
photo_url = "user/{0}/{1}".format(bike[42],bike[16].split('/')[-1])
sql_address = "INSERT INTO bike_address VALUES ({0},'{1}','{2}','{3}')".format(address_id,address_name,longitude,latitude)
sql_photo = "INSERT INTO bike_photo VALUES ({0},'缩略图1','{1}',{2})".format(photo_id,photo_url,id)
try:
sql_find = "SELECT id FROM auth_user WHERE username = {0}".format(bike[42])
sc.execute(sql_find)
user_id = sc.fetchone()[0]
except:
pass
sql_bike = "INSERT INTO bike_bike VALUES ({0},'{1}','{2}',1,'{3}',{4},{5},{6},{7},{8},'{9}','{10}',{11},'{12}','{13}',{14},{15},'{16}','{17}','{18}','{19}',0,{20},{21},'其他',0)".format(id,bikeName,bikeNumber,status,hourRent,dayRent,weekRent,deposit,studentDeposit,pledge,suitHeight,howOld,sexualFix,equipments,maxDuration,minDuration,added,begintime,endtime,description,address_id,user_id)
sc.execute(sql_address)
sc.execute(sql_bike)
sc.execute(sql_photo)
print(bikeName+'/n')
finally:
qike.close()
sport.commit()
sport.close()
| mit | 4,098,447,866,214,811,000 | 46.753623 | 396 | 0.566313 | false |
RedHatInsights/insights-core | insights/parsers/tests/test_aws_instance_type.py | 1 | 2273 | import pytest
import doctest
from insights.parsers import aws_instance_type
from insights.parsers.aws_instance_type import AWSInstanceType
from insights.tests import context_wrap
from insights.parsers import SkipException, ParseException
from insights.core.plugins import ContentException
AWS_TYPE = "r3.xlarge"
AWS_TYPE_CURL_STATS = """
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 1126 100 1126 0 0 1374k 0 --:--:-- --:--:-- --:--:-- 1099k
r3.xlarge"""
AWS_TYPE_AB_1 = """
curl: (7) Failed to connect to 169.254.169.254 port 80: Connection timed out
""".strip()
AWS_TYPE_AB_2 = """
curl: (7) couldn't connect to host
""".strip()
AWS_TYPE_AB_3 = """
curl: (28) connect() timed out!
""".strip()
AWS_TYPE_AB_4 = """
.micro
""".strip()
AWS_TYPE_AB_5 = """
No module named insights.tools
""".strip()
def test_aws_instance_type_ab_other():
with pytest.raises(SkipException):
AWSInstanceType(context_wrap(AWS_TYPE_AB_1))
with pytest.raises(SkipException):
AWSInstanceType(context_wrap(AWS_TYPE_AB_2))
with pytest.raises(SkipException):
AWSInstanceType(context_wrap(AWS_TYPE_AB_3))
with pytest.raises(ParseException) as pe:
AWSInstanceType(context_wrap(AWS_TYPE_AB_4))
assert 'Unrecognized type' in str(pe)
with pytest.raises(ContentException) as pe:
AWSInstanceType(context_wrap(AWS_TYPE_AB_5))
def test_aws_instance_type_ab_empty():
with pytest.raises(SkipException):
AWSInstanceType(context_wrap(''))
def test_aws_instance_type():
aws = AWSInstanceType(context_wrap(AWS_TYPE))
assert aws.type == "R3"
assert aws.raw == "r3.xlarge"
assert 'large' in str(aws)
def test_aws_instance_type_stats():
aws = AWSInstanceType(context_wrap(AWS_TYPE_CURL_STATS))
assert aws.type == "R3"
assert aws.raw == "r3.xlarge"
assert 'large' in str(aws)
def test_doc_examples():
env = {
'aws_inst': AWSInstanceType(context_wrap(AWS_TYPE))
}
failed, total = doctest.testmod(aws_instance_type, globs=env)
assert failed == 0
| apache-2.0 | 1,821,836,155,644,708,900 | 28.907895 | 79 | 0.639683 | false |
hortont424/phiface | phiface/capitals.py | 1 | 22542 | # -*- coding: utf-8 -*-
from glyph import *
@glyph('A')
class AGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(AGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(AGlyph, self).setupDrawing()
leftLine = Line(self.p(0.5, 1.0), self.p(0.0, 0.0),
self.weight(), serif=3)
rightLine = Line(self.p(0.5, 1.0), self.p(1.0, 0.0),
self.weight(), serif=3)
midHeight = self.p(0.0, 0.5, xHeight=True)[1]
midLeft = leftLine.atY(midHeight)
midRight = rightLine.atY(midHeight)
midLine = Line((midLeft, midHeight),
(midRight, midHeight), self.weight())
return [leftLine, rightLine, midLine]
@glyph('B')
class BGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(BGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(BGlyph, self).setupDrawing()
shift = ((self.weight() / 2.0) / self.capHeight()) * 4
bottomHeight = 0.5
bottomY = bottomHeight / 2.0
bottomYY = bottomY + (bottomHeight / 2.0)
topHeight = 1.0 - bottomHeight
topY = bottomYY + (topHeight / 2.0)
topYY = bottomYY
bottomYY += shift / 2.0
bottomY += shift / 4.0
topYY -= shift / 2.0
topY -= shift / 4.0
circa = Circle(self.p(0.5, bottomY),
self.p(0.5, bottomYY),
self.weight())
circb = Circle(self.p(0.5, topY),
self.p(0.5, topYY),
self.weight())
clipPoly = Polygon((self.p(0.5, 0.0), self.p(0.5, 1.0),
self.p(1.5, 1.0), self.p(1.5, 0.0)))
threePoly = mergeSubPolys([circa, circb]).intersection(
mergeSubPolys([clipPoly]))
topLine = Line(self.p(0.0, 1.0), self.p(0.5, 1.0),
self.weight(), shift="down")
bottomLine = Line(self.p(0.0, 0.0), self.p(0.5, 0.0),
self.weight(), shift="up")
midLine = Line(self.p(0.0, 0.5), self.p(0.5, 0.5),
self.weight())
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="right", serif=4)
return [threePoly, topLine, bottomLine, leftLine, midLine]
@glyph('C')
class CGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(CGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.capHeight()
def getPolygon(self):
super(CGlyph, self).setupDrawing()
circ = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
self.weight(),
semiA=self.p(1.0, 0.8),
semiB=self.p(1.0, 0.2))
return [circ]
@glyph('D')
class DGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(DGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.capHeight()
def getPolygon(self):
super(DGlyph, self).setupDrawing()
circ = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
self.weight(),
semiA=self.p(0.0, 0.8),
semiB=self.p(0.0, 0.2))
clipPoly = Polygon((self.p(0.5, 0.0), self.p(0.5, 1.0),
self.p(1.5, 1.0), self.p(1.5, 0.0)))
circ = mergeSubPolys([circ]).intersection(
mergeSubPolys([clipPoly]))
dWidth = 0.2
leftLine = Line(self.p(dWidth, 1.0), self.p(dWidth, 0.0),
self.weight(), shift="right", serif=4)
topLine = Line(self.p(dWidth, 1.0), self.p(0.5, 1.0),
self.weight(), shift="down")
bottomLine = Line(self.p(dWidth, 0.0), self.p(0.5, 0.0),
self.weight(), shift="up")
return [circ, leftLine, topLine, bottomLine]
@glyph('E')
class EGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(EGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(EGlyph, self).setupDrawing()
leftLine = Line(self.p(0.0, 0.0), self.p(0.0, 1.0),
self.weight(), shift="right")
topLine = Line(self.p(0.0, 1.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=1)
bottomLine = Line(self.p(0.0, 0.0), self.p(1.0, 0.0),
self.weight(), shift="up", serif=1)
midHeight = self.p(0.0, 0.5, xHeight=True)[1]
midLeft = leftLine.atY(midHeight)
midLine = Line((midLeft, midHeight),
(midLeft + self.width() / PHI, midHeight),
self.weight())
return [leftLine, topLine, midLine, bottomLine]
@glyph('F')
class FGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(FGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(FGlyph, self).setupDrawing()
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="right", serif=3)
topLine = Line(self.p(0.0, 1.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=1)
midHeight = self.p(0.0, 0.5, xHeight=True)[1]
midLeft = leftLine.atY(midHeight)
midLine = Line((midLeft, midHeight),
(midLeft + self.width() / PHI, midHeight),
self.weight())
return [leftLine, topLine, midLine]
@glyph('G')
class GGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(GGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.capHeight()
def getPolygon(self):
super(GGlyph, self).setupDrawing()
leftShift = self.weight() / self.capHeight()
circ = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
self.weight())
clipPoly = Polygon((self.p(0.5, 0.5), self.p(1.0, 0.8),
self.p(1.0, 0.5, xHeight=True),
self.p(0.5, 0.5, xHeight=True)))
circ = mergeSubPolys([circ]).difference(
mergeSubPolys([clipPoly]))
midLine = Line(self.p(1.0, 0.5, xHeight=True),
self.p(0.5, 0.5, xHeight=True),
self.weight())
lineClip = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
-1.0)
midLine = mergeSubPolys([midLine]).intersection(
mergeSubPolys([lineClip]))
return [circ, midLine]
@glyph('H')
class HGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(HGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(HGlyph, self).setupDrawing()
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), serif=4)
rightLine = Line(self.p(1.0, 1.0), self.p(1.0, 0.0),
self.weight(), serif=4)
midHeight = self.p(0.0, 0.5, xHeight=True)[1]
midLeft = leftLine.atY(midHeight)
midRight = rightLine.atY(midHeight)
midLine = Line((midLeft, midHeight),
(midRight, midHeight), self.weight())
return [leftLine, rightLine, midLine]
@glyph('I')
class IGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(IGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em() / PHI
def getPolygon(self):
super(IGlyph, self).setupDrawing()
mainLine = Line(self.p(0.5, 0.0), self.p(0.5, 1.0),
self.weight())
topLine = Line(self.p(0.0, 1.0), self.p(1.0, 1.0),
self.weight() / PHI, shift="down")
bottomLine = Line(self.p(0.0, 0.0), self.p(1.0, 0.0),
self.weight() / PHI, shift="up")
return [mainLine, topLine, bottomLine]
@glyph('J')
class JGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(JGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em() / PHI
def getPolygon(self):
super(JGlyph, self).setupDrawing()
mainLine = Line(self.p(1.0, 1.0), self.p(1.0, 0.0),
self.weight(), shift="left", serif=6)
circ = Circle(self.p(0.5, 0.0),
self.p(1.0, 0.0),
self.weight())
clipPoly = Polygon((self.p(0.5, 0.0), self.p(1.0, 0.0),
self.p(1.0, -1.0), self.p(0.5, -1.0)))
circ = mergeSubPolys([circ]).intersection(
mergeSubPolys([clipPoly]))
return [mainLine, circ]
@glyph('K')
class KGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(KGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em() * 0.8
def getPolygon(self):
super(KGlyph, self).setupDrawing()
mainLine = Line(self.p(0.0, 0.0), self.p(0.0, 1.0),
self.weight(), shift="down", serif=4)
topLine = Line(self.p(0.0, 0.5), self.p(1.0, 1.0),
self.weight(), shift="down", serif=3)
bottomLine = Line(self.p(0.0, 0.5), self.p(1.0, 0.0),
self.weight(), shift="up", serif=3)
return [topLine, bottomLine, mainLine]
@glyph('L')
class LGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(LGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em() / PHI
def getPolygon(self):
super(LGlyph, self).setupDrawing()
mainLine = Line(self.p(0.0, 0.0), self.p(0.0, 1.0),
self.weight(), shift="down", serif=3)
bottomLine = Line(self.p(0.0, 0.0), self.p(1.0, 0.0),
self.weight(), shift="up", serif=1)
return [mainLine, bottomLine]
@glyph('M')
class MGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(MGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(MGlyph, self).setupDrawing()
midHeight = (self.weight()) / self.xHeight()
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="up", serif=3)
downCrossLine = Line(self.p(0.0, 1.0),
self.p(0.5, 0.5 - midHeight, xHeight=True),
self.weight())
upCrossLine = Line(self.p(0.5, 0.5 - midHeight, xHeight=True),
self.p(1.0, 1.0),
self.weight())
rightLine = Line(self.p(1.0, 1.0), self.p(1.0, 0.0),
self.weight(), shift="up", serif=3)
return [leftLine, downCrossLine, upCrossLine, rightLine]
@glyph('N')
class NGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(NGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(NGlyph, self).setupDrawing()
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="up", serif=3)
crossLine = Line(self.p(0.0, 1.0), self.p(1.0, 0.0),
self.weight())
rightLine = Line(self.p(1.0, 0.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=3)
return [leftLine, crossLine, rightLine]
@glyph('O')
class OGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(OGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.capHeight()
def getPolygon(self):
super(OGlyph, self).setupDrawing()
circ = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
self.weight())
return [circ]
@glyph('P')
class PGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(PGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(PGlyph, self).setupDrawing()
shift = ((self.weight() / 2.0) / self.capHeight()) * 4
bottomHeight = 0.5
bottomY = bottomHeight / 2.0
bottomYY = bottomY + (bottomHeight / 2.0)
topHeight = 1.0 - bottomHeight
topY = bottomYY + (topHeight / 2.0)
topYY = bottomYY
topYY -= shift / 2.0
topY -= shift / 4.0
circa = Circle(self.p(0.5, topY),
self.p(0.5, topYY),
self.weight())
clipPoly = Polygon((self.p(0.5, 0.0), self.p(0.5, 1.0),
self.p(1.5, 1.0), self.p(1.5, 0.0)))
threePoly = mergeSubPolys([circa]).intersection(
mergeSubPolys([clipPoly]))
topLine = Line(self.p(0.0, 1.0), self.p(0.5, 1.0),
self.weight(), shift="down")
midLine = Line(self.p(0.0, 0.5), self.p(0.5, 0.5),
self.weight())
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="right", serif=4)
return [threePoly, topLine, leftLine, midLine]
@glyph('Q')
class QGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(QGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.capHeight()
def getPolygon(self):
super(QGlyph, self).setupDrawing()
shift = (self.weight() / 20.0) / (self.capHeight() / 40.0)
circ = Circle(self.p(0.5, 0.5),
self.p(0.5, 1.0),
self.weight())
crossLine = Line(self.p(0.75 - shift, 0.25 + shift),
self.p(1.0, 0.0), self.weight(), noclip=True)
return [circ, crossLine]
@glyph('R')
class RGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(RGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(RGlyph, self).setupDrawing()
shift = ((self.weight() / 2.0) / self.capHeight()) * 4
bottomHeight = 0.5
bottomY = bottomHeight / 2.0
bottomYY = bottomY + (bottomHeight / 2.0)
topHeight = 1.0 - bottomHeight
topY = bottomYY + (topHeight / 2.0)
topYY = bottomYY
topYY -= shift / 2.0
topY -= shift / 4.0
dy = topY - topYY
circa = Circle(self.p(0.5, topY),
self.p(0.5, topYY),
self.weight())
clipPoly = Polygon((self.p(0.5, 0.0), self.p(0.5, 1.0),
self.p(1.5, 1.0), self.p(1.5, 0.0)))
threePoly = mergeSubPolys([circa]).intersection(
mergeSubPolys([clipPoly]))
topLine = Line(self.p(0.0, 1.0), self.p(0.5, 1.0),
self.weight(), shift="down")
midLine = Line(self.p(0.0, 0.5), self.p(0.5, 0.5),
self.weight())
leftLine = Line(self.p(0.0, 1.0), self.p(0.0, 0.0),
self.weight(), shift="right", serif=4)
downLine = Line(self.p(0.5, 0.5), self.p(1.0, 0.0),
self.weight(), shift="up", serif=3)
return [threePoly, topLine, leftLine, midLine, downLine]
@glyph('S')
class SGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(SGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(SGlyph, self).setupDrawing()
shift = ((self.weight() / 2.0) / self.capHeight()) * 4
bottomHeight = 0.5
bottomY = bottomHeight / 2.0
bottomYY = bottomY + (bottomHeight / 2.0)
topHeight = 1.0 - bottomHeight
topY = bottomYY + (topHeight / 2.0)
topYY = bottomYY
bottomYY += shift / 2.0
bottomY += shift / 4.0
topYY -= shift / 2.0
topY -= shift / 4.0
rw = self.weight() / self.capHeight() * 100
if rw >= 7.0:
topYY -= (self.weight() / self.capHeight()) * 0.0725
elif rw >= 5.0:
topYY -= (self.weight() / self.capHeight()) * 0.09
elif rw >= 3.0:
topYY -= (self.weight() / self.capHeight()) * 0.14
elif rw >= 2.0:
topYY -= (self.weight() / self.capHeight()) * 0.205
elif rw >= 0.5:
topYY -= (self.weight() / self.capHeight()) * 0.81
circa = Circle(self.p(0.45, bottomY),
self.p(0.45, bottomYY),
self.weight())
circb = Circle(self.p(0.55, topY),
self.p(0.55, topYY),
self.weight())
bclipPoly = Polygon((self.p(0.5, topY), self.p(1.2, 1.1),
self.p(1.0, -1.0), self.p(0.5, -1.0)))
circb = mergeSubPolys([circb]).difference(
mergeSubPolys([bclipPoly]))
aclipPoly = Polygon((self.p(0.5, bottomY), self.p(-1.0, -0.25),
self.p(-1.0, 1.0), self.p(0.5, 1.0)))
circa = mergeSubPolys([circa]).difference(
mergeSubPolys([aclipPoly]))
return [circa, circb]
@glyph('T')
class TGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(TGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(TGlyph, self).setupDrawing()
mainLine = Line(self.p(0.5, 0.0), self.p(0.5, 1.0), self.weight())
topLine = Line(self.p(0.0, 1.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=2)
return [mainLine, topLine]
@glyph('U')
class UGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(UGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(UGlyph, self).setupDrawing()
rad = 0.309
shift = self.p(0.5, rad)[0] - self.p(0.0, 0.0)[0]
shift -= self.p(0.5, 0.0)[1] - self.p(0.5, rad)[1]
shift /= self.capHeight()
circ = Circle(self.p(0.5, rad),
self.p(0.5, 0.0),
self.weight())
clipPoly = Polygon((self.p(0.0, rad), self.p(1.0, rad),
self.p(1.0, -1.0), self.p(0.0, -1.0)))
circ = mergeSubPolys([circ]).intersection(
mergeSubPolys([clipPoly]))
s = self.weight() * 1.25 / self.capHeight()
leftLine = Line(self.p(0.0 + shift, rad), self.p(0.0 + shift, 1.0 - s),
self.weight(), shift="right", serif=3)
rightLine = Line(self.p(1.0 - shift, rad), self.p(1.0 - shift, 1.0 - s),
self.weight(), shift="left", serif=3)
return [circ, leftLine, rightLine]
@glyph('V')
class VGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(VGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(VGlyph, self).setupDrawing()
leftLine = Line(self.p(0.5, 0.0), self.p(0.0, 1.0),
self.weight(), shift="down", serif=3)
rightLine = Line(self.p(0.5, 0.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=3)
return [leftLine, rightLine]
@glyph('W')
class WGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(WGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(WGlyph, self).setupDrawing()
midHeight = (self.weight()) / self.capHeight()
leftLine = Line(self.p(0.0, 0.0), self.p(0.0, 1.0),
self.weight(), shift="down", serif=3)
downCrossLine = Line(self.p(0.0, 0.0),
self.p(0.5, 0.6 + midHeight),
self.weight())
upCrossLine = Line(self.p(0.5, 0.6 + midHeight),
self.p(1.0, 0.0),
self.weight())
rightLine = Line(self.p(1.0, 0.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=3)
return [leftLine, downCrossLine, upCrossLine, rightLine]
@glyph('X')
class XGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(XGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(XGlyph, self).setupDrawing()
upCrossLine = Line(self.p(0.0, 0.0), self.p(1.0, 1.0),
self.weight(), shift="down", serif=4)
downCrossLine = Line(self.p(0.0, 1.0), self.p(1.0, 0.0),
self.weight(), shift="up", serif=4)
return [upCrossLine, downCrossLine]
@glyph('Y')
class YGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(YGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(YGlyph, self).setupDrawing()
# Try with xHeight off, too
# TODO: Something is wrong with how this attaches at large weights
leftLine = Line(self.p(0.5, 0.5, xHeight=True), self.p(0.0, 1.0),
self.weight(), shift="down", serif=3)
rightLine = Line(self.p(0.5, 0.5, xHeight=True), self.p(1.0, 1.0),
self.weight(), shift="down", serif=3)
downLine = Line(self.p(0.5, 0.5, xHeight=True), self.p(0.5, 0.0),
self.weight(), serif=3)
return [leftLine, rightLine, downLine]
@glyph('Z')
class ZGlyph(Glyph):
def __init__(self, x, y, capHeight):
super(ZGlyph, self).__init__(x, y, capHeight)
def width(self):
return self.em()
def getPolygon(self):
super(ZGlyph, self).setupDrawing()
topLine = Line(self.p(0.9, 1.0), self.p(0.1, 1.0),
self.weight(), shift="down", serif=1)
slashLine = Line(self.p(0.9, 1.0), self.p(0.0, 0.0),
self.weight(), shift="down")
bottomLine = Line(self.p(0.0, 0.0), self.p(1.0, 0.0),
self.weight(), shift="up", serif=1)
return [topLine, slashLine, bottomLine]
| bsd-2-clause | -2,085,489,251,625,412,600 | 31.202857 | 80 | 0.497516 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-consumption/azure/mgmt/consumption/consumption_management_client.py | 1 | 4350 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.usage_details_operations import UsageDetailsOperations
from .operations.reservations_summaries_operations import ReservationsSummariesOperations
from .operations.reservations_details_operations import ReservationsDetailsOperations
from .operations.operations import Operations
from . import models
class ConsumptionManagementClientConfiguration(AzureConfiguration):
"""Configuration for ConsumptionManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ConsumptionManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-consumption/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ConsumptionManagementClient(object):
"""Consumption management client provides access to consumption resources for Azure Enterprise Subscriptions.
:ivar config: Configuration for client.
:vartype config: ConsumptionManagementClientConfiguration
:ivar usage_details: UsageDetails operations
:vartype usage_details: azure.mgmt.consumption.operations.UsageDetailsOperations
:ivar reservations_summaries: ReservationsSummaries operations
:vartype reservations_summaries: azure.mgmt.consumption.operations.ReservationsSummariesOperations
:ivar reservations_details: ReservationsDetails operations
:vartype reservations_details: azure.mgmt.consumption.operations.ReservationsDetailsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.consumption.operations.Operations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ConsumptionManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2017-11-30'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.usage_details = UsageDetailsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.reservations_summaries = ReservationsSummariesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.reservations_details = ReservationsDetailsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
| mit | -1,939,566,008,101,267,000 | 44.3125 | 113 | 0.716322 | false |
previtus/MGR-Project-Code | DatasetHandler/DatasetObj.py | 1 | 22075 | import Downloader.DataOperations as DataOperations
import Downloader.KerasPreparation as KerasPreparation
import os
import shutil
import numpy as np
import random
import math
import Downloader.Defaults
from Omnipresent import file_exists_and_accesible
class Dataset:
'''
Common base class for a Dataset
What does dataset have?
- source data folder with images and their scores - via Segments, but we don't use Segments anymore anywhere out
What can it do?
- give it's data nicely out ([x],[y]) as image data and labels
- give us subsets, which are uniform in sense of Scoring
- provide us with statistics - without worrying about unsuccessful downloads or anything
- give us *views* like exporting images into folder for inspection - for example coded with score
'''
__list_of_images = []
__labels = []
__osm = []
img_width = -1
img_height = -1
num_of_images = 0
unique_id = ''
has_osm_loaded = False
flag_is_extended = False
def __init__(self):
return None
def randomize_all_list_order_deterministically_same_segment(self, local_seed):
'''
According to a chosen seed number will shuffle contents of lists (of urls, of scores, of osm) so they are kept
intact.
:return:
'''
n = len(self.__list_of_images)
indices = range(0,n)
#indices = range(0,n)*6
#indices += range(0,n)
a = self.__list_of_images
b = self.__labels
c = self.__osm
d = self.__segment_ids
#print len(a), len(b), len(c), len(d), d[0:10]
lists = list(zip(a, b, c, d))
shuffled_lists = []
already_used_data = []
random.Random(local_seed).shuffle(indices)
for index in indices:
if index in already_used_data:
continue
str_ = ''
l = lists[index]
seg_id = l[3]
#print index, seg_id, l
#for k in range(-6,6):
# print index+k, lists[index+k][3]
k = index-1
found_first_of_such_id = False
if k == -1:
found_first_of_such_id = True
while not found_first_of_such_id:
seg_id_prev = lists[k][3]
if seg_id == seg_id_prev:
k -= 1
else:
k += 1
found_first_of_such_id = True
first_index = k
last_index = k
found_last_of_such_id = False
k += 1
while not found_last_of_such_id:
if k >= n:
found_last_of_such_id = True
last_index = k-1
elif seg_id == lists[k][3]:
k += 1
else:
found_last_of_such_id = True
last_index = k-1
str_ = ''
for i in range(first_index, last_index+1):
#print i, lists[i][3]
str_ += str(i)+'('+str(lists[i][3])+'), '
shuffled_lists.append(lists[i])
already_used_data.append(i)
#print str_
#print 'pre', first_index-1, lists[first_index-1][3]
#print 'post', last_index+1, lists[last_index+1][3]
'''
for k in range(0,images_per_segment):
index = i*images_per_segment+k
str_ += str(i*images_per_segment+k)+', '
shuffled_lists.append(lists[index])
#print str_
'''
a, b, c, d = zip(*shuffled_lists)
self.__list_of_images = a
self.__labels = np.array(b)
self.__osm = c
self.__segment_ids = d
#print len(a), len(b), len(c), len(d), d[0:10]
def remove_dual_osms(self):
'''
We know that every third entry is actually unique (in sense of osm,score pair) - when we are talking about osm_only model
:return:
'''
indices = []
images_per_segment = 3
n = len(self.__list_of_images)
bytripples = range(0,int(n/images_per_segment))
for i in bytripples:
indices.append(i*images_per_segment)
print len(self.__list_of_images), len(self.__labels), len(self.__osm), len(self.__segment_ids)
self.__list_of_images = [self.__list_of_images[i] for i in indices]
self.__labels = [self.__labels[i] for i in indices]
self.__osm = [self.__osm[i] for i in indices]
self.__segment_ids = [self.__segment_ids[i] for i in indices]
print len(self.__list_of_images), len(self.__labels), len(self.__osm), len(self.__segment_ids)
def test_existence_of_all_images(self):
'''
Test physical presence of the images - useful for debuging.
:return:
'''
for url in self.__list_of_images:
b = file_exists_and_accesible(url)
if not b:
print "File cannot be accessed! ", url
def init_from_lists(self, list_of_images, labels, osm, segment_ids, img_width, img_height):
'''
Initialization from lists of data
'''
self.img_width = img_width
self.img_height = img_height
self.__list_of_images = list_of_images
self.__labels = labels
self.__osm = osm
self.__segment_ids = segment_ids
self.num_of_images = len(self.__list_of_images)
self.has_osm_loaded = (len(self.__osm)>0)
def init_from_segments(self, path_to_segments_file, img_width, img_height):
'''
# Initialization from loaded Segment in path_to_segments_file
# Segments are not used apart from initialization
'''
Segments = DataOperations.LoadDataFile(path_to_segments_file)
segments_dir = os.path.dirname(path_to_segments_file) + '/'
__list_of_images, __labels, __osm, __segment_ids, flag_is_extended = KerasPreparation.LoadDataFromSegments(Segments, has_score=True, path_to_images=segments_dir)
self.flag_is_extended = flag_is_extended
self.init_from_lists(__list_of_images, __labels, __osm, __segment_ids, img_width, img_height)
# Osm data editation
def cast_osm_to_bool(self):
'''
Transforms the osm vector data to boolean values, aka i=0 -> 0, i>0 -> 1
:return:
'''
def boo(x):
if x > 0:
return 1
else:
return 0
for i in range(len(self.__osm)):
for j in range(len(self.__osm[i])):
self.__osm[i][j] = boo(self.__osm[i][j])
def cast_osm_to_one_hot_categories(self):
'''
Transforms the osm vector data to one hot categories - low,mid,high represented as 001,010,100 binary.
:return:
'''
if len(self.__osm) == 0:
return False
statistics_for_attributes = []
for attribute_id in range(len(self.__osm[0])):
attribute_values = []
for vector_id in range(len(self.__osm)):
val = self.__osm[vector_id][attribute_id]
attribute_values.append(val)
q1 = np.percentile(attribute_values, 33)
q3 = np.percentile(attribute_values, 66)
#print attribute_id, q1, q3
statistics_for_attributes.append([q1, q3])
new_osm_vector = []
for vector_id in range(len(self.__osm)):
new_osm_vector.append([])
for attribute_id in range(len(self.__osm[vector_id])):
stats = statistics_for_attributes[attribute_id]
val = self.__osm[vector_id][attribute_id]
if val <= stats[0]: # value smaller than lower percentile -> "low"
new_osm_vector[vector_id] += [0,0,1]
elif val <= stats[1]: # value in between percentiles -> "mid"
new_osm_vector[vector_id] += [0,1,0]
else: # bigger than percentiles -> "high"
new_osm_vector[vector_id] += [1,0,0]
self.__osm = new_osm_vector
def log_the_osm(self):
'''
Apply log to values in OSM vector.
:return:
'''
for i in range(len(self.__osm)):
for j in range(len(self.__osm[i])):
val = self.__osm[i][j]
if val > 1:
val = math.log(val)
self.__osm[i][j] = val
# Data access: ---------------------------------------------------------------------------------------------
def getDataLabels(self, resize=None):
'''
# ([x,y]) as image data and labels
'''
x = KerasPreparation.LoadActualImages(self.__list_of_images, resize=resize, dim_ordering=Downloader.Defaults.KERAS_SETTING_DIMENSIONS)
y = np.array(self.__labels)
return [x, y]
def getDataLabels_split(self, resize=None, validation_split=0.2):
'''
# ([x, y, x_val, y_val]) as image data and labels after being split
'''
x = KerasPreparation.LoadActualImages(self.__list_of_images, resize=resize, dim_ordering=Downloader.Defaults.KERAS_SETTING_DIMENSIONS) # th or tf
y = np.array(self.__labels)
x, y, x_val, y_val = KerasPreparation.split_data(x, y, validation_split)
return [x, y, x_val, y_val]
def getDataLabels_split_only_y(self, resize=None, validation_split=0.2):
'''
# Get just label data, after validation split
'''
y = np.array(self.__labels)
y, y_val = KerasPreparation.split_one_array(y, validation_split)
return [y, y_val]
def getDataLabels_only_y(self):
'''
# Get just label data
'''
y = np.array(self.__labels)
return y
def getDataLabels_split_only_osm(self, validation_split=0.2):
'''
# Get just osm data, after validation split
'''
osm, osm_val = KerasPreparation.split_one_array(self.__osm, validation_split)
osm = np.asarray(osm)
osm_val = np.asarray(osm_val)
return [osm, osm_val]
def getDataLabels_only_osm(self):
'''
# Get just osm data
'''
osm = np.array(self.__osm)
return osm
def getShapeOfOsm(self):
'''
Get shape of the osm data - aka dimension of the vectors, traditionally (594,) needed for building Keras models.
:return:
'''
return np.asarray(self.__osm).shape[1:]
# For generators
def generator_images_scores(self, order, image_paths, scores, resize=None):
'''
Get generator of images
:param order: prearanged order (1,2,3...) or (2,55,1,980, ...)
:param image_paths: paths to images, these are kept in memory while the big 640x640x3 image data is not
:param scores: score to be associated with returned image
:param resize: parameter to resize loaded images on the fly
:return: generator, which yields (image, score)
'''
while True:
for index in order:
img_path = image_paths[index]
image = KerasPreparation.LoadActualImages([img_path], resize=resize)
score = scores[index]
yield (image, score)
def getImageGenerator(self, validation_split, resize=None):
'''
# Return generators
# take the lists on images and their labels - split these two arrays by the validation split
'''
y = np.array(self.__labels)
images_paths, scores, images_paths_val, scores_val = KerasPreparation.split_data(self.__list_of_images, y, validation_split)
size = len(scores)
size_val = len(scores_val)
order = range(size)
order_val = range(size_val)
# We can mix up the orders, but then we need to mix the future data too
image_generator = self.generator_images_scores(order, image_paths=images_paths, scores=scores, resize=resize)
image_generator_val = self.generator_images_scores(order_val, image_paths=images_paths_val, scores=scores_val, resize=resize)
return [order, order_val, image_generator, size, image_generator_val, size_val]
# Dataset reporting: ---------------------------------------------------------------------------------------------
def statistics(self):
'''
# Report important information about the dataset.
'''
print "Dataset of", len(self.__list_of_images), " scored images of", self.img_width, "x", self.img_height, "resolution."
labels = np.array(self.__labels)
min = np.amin(labels)
max = np.amax(labels)
mean = np.mean(labels)
q1 = np.percentile(labels, 25)
q3 = np.percentile(labels, 75)
print min, "|---[", q1, "{", mean, "}", q3, "]---|", max
print "min |---[ 25perc { mean } 75perc ]---| max"
def debug_print_first(self, n):
'''
# Debug print first n values in this dataset.
'''
for i in range(0,n):
print self.__segment_ids[i], self.__labels[i], self.__list_of_images[i]
def plotHistogram(self, save_to_pdf=False, labels_override=None):
'''
Plot score of this dataset as histogram.
:param save_to_pdf: flag to save into output.pdf
:return:
'''
import DatasetVizualizators
if labels_override is not None:
labels = np.array(self.__labels)
else:
labels = labels_override
DatasetVizualizators.plotHistogram(labels, 'Score distribution histogram')
DatasetVizualizators.plotWhisker(labels, 'Score box plot')
DatasetVizualizators.plotX_sortValues(labels, 'Distribution of score (sorted)', notReverse=True)
if save_to_pdf:
DatasetVizualizators.saveAllPlotsToPDF()
DatasetVizualizators.show()
def MapScoreToImages(self, into_bins=100):
'''
Gets a dict which give to a index from 0-100 a list of images of such score (score goes in range 0-1, so *100 in this case)
:return:
'''
into_bins -= 1
# Empty dict
dict = {key: [] for key in range(0,into_bins+1)}
for i in range(0, self.num_of_images):
name = self.__list_of_images[i]
score = float(self.__labels[i])
score_label = int(round(score, 2)*100)
score_label = int(score_label*into_bins/100)
dict[score_label].append(name)
return dict
def DumpFilesIntoDirectory_keepNamesOnlyScored(self, target_directory = ''):
"""
Copy those images which are loaded to this database and thus also have score into another folder.
This can be used to filter how much of the initial dataset is useful as labeled dataset.
:param target_directory: target folder, file names are kept the same
"""
if not os.path.exists(target_directory):
os.makedirs(target_directory)
for i in range(0, self.num_of_images):
name = self.__list_of_images[i]
score = self.__labels[i]
#head, tail = os.path.split(name)
filename = target_directory + "/" + os.path.basename(name)
print name, score, filename
shutil.copy2(name, filename)
def build_dictionary_instead_of_object(self, include_osms=False):
"""
Output all the knowledge of labels of this dataset into dictionary which can give label from id, which is the
name of the file (0123_0<?>.jpg, keep in mind that further processing can add more into <?>).
File name goes as first 4 numbers 0123, underscore and more numbers. The first set corresponds to the id of
segment, thus one unique road. It has the same score for all variants which are created when rotating around
the spot and generating other images. However these can have variable osm data (as the "center" where we were
standing was differing).
USE LIKE THIS:
np.save('id_to_score.npy', dictionary)
dict_loaded = np.load('id_to_score.npy').item()
// if we have scores only, we can look at them like histogram like this:
list1 = d1.values()
flat_list = [item for sublist in list1 for item in sublist]
// ...
:param include_osms: Flag if we also keep osm data
:return:
"""
dictionary_id_to_labels = {}
for i in range(0, self.num_of_images):
#for i in range(0, 5):
name = self.__list_of_images[i]
seq = name.split("/")
file_name_id = seq[-1][0:-4]
score = self.__labels[i]
if include_osms:
osm = self.__osm[i]
dictionary_id_to_labels[file_name_id] = [score, osm]
else:
dictionary_id_to_labels[file_name_id] = [score]
return dictionary_id_to_labels
def DumpFilesIntoDirectory_withScores(self, target_directory = ''):
'''
Simple way of visualizing which images are considered "attractive" (with high score) and which are not
:param target_directory: target directory, for example target_directory = '../debugViewOfDataset/'
:return: returns list of new names of files, with the order unchanged
'''
# Copy images from their original location to a new directory while naming them:
# Score_<OriginalName.jpg>
if not os.path.exists(target_directory):
os.makedirs(target_directory)
new_names = []
for i in range(0, self.num_of_images):
name = self.__list_of_images[i]
score = self.__labels[i]
#head, tail = os.path.split(name)
filename = target_directory + "{0:.2f}".format(score) + '_' + os.path.basename(name)
#print name, score, filename
new_names.append(filename)
shutil.copy2(name, filename)
return new_names
def sampleUniform(self, desired_number):
'''
# randomized subsample of a dataset
'''
indices = random.sample(xrange(self.num_of_images), desired_number)
return indices
def spawnUniformSubset(self, desired_number):
'''
Spawn a subset from dataset uniform distribution over the original data.
:param desired_number: size of the desired dataset
:return: the resulting new dataset
'''
indices = self.sampleUniform(desired_number)
sel_imgs = [self.__list_of_images[i] for i in indices]
sel_labels = [self.__labels[i] for i in indices]
sel_segment_ids = [self.__segment_ids[i] for i in indices]
if self.__osm == []:
sel_osm = []
else:
sel_osm = [self.__osm[i] for i in indices]
newDataset = Dataset()
newDataset.init_from_lists(sel_imgs, sel_labels, sel_osm, sel_segment_ids, self.img_width, self.img_height)
return newDataset
def getDataLabels_only_osm_raw(self):
'''
# return list of osm without coversion to numpy format
'''
return self.__osm
def expandOsmDataWithMultipleRadii(self, model_settings):
'''
# idea is to load all the radii data we have available and add it to each of the segments
# we assume the basic experiment definition
'''
r50 = 'SegmentsData_marked_R50_4TablesN.dump'
r100 = 'SegmentsData_marked_R50_4TablesN.dump'
r200 = 'SegmentsData_marked_R200_4TablesN.dump'
import DatasetHandler
dataset_r50 = DatasetHandler.CreateDataset.load_custom(model_settings["dataset_name"], model_settings["pixels"],
desired_number=model_settings["number_of_images"],
seed=model_settings["seed"],
filename_override=r50)
r50osm = dataset_r50.getDataLabels_only_osm_raw()
dataset_r100 = DatasetHandler.CreateDataset.load_custom(model_settings["dataset_name"], model_settings["pixels"],
desired_number=model_settings["number_of_images"],
seed=model_settings["seed"],
filename_override=r100)
r100osm = dataset_r100.getDataLabels_only_osm_raw()
dataset_r200 = DatasetHandler.CreateDataset.load_custom(model_settings["dataset_name"], model_settings["pixels"],
desired_number=model_settings["number_of_images"],
seed=model_settings["seed"],
filename_override=r200)
r200osm = dataset_r200.getDataLabels_only_osm_raw()
from Omnipresent import len_
print "osm", len(self.__osm), len_(self.__osm), self.__osm[0][0:10]
print "osm50", len(r50osm), len_(r50osm), r50osm[0][0:10]
print "osm50", len(r100osm), len_(r100osm), r100osm[0][0:10]
print "osm200", len(r200osm), len_(r200osm), r200osm[0][0:10]
new_osm = []
for i in range(0,len(r50osm)):
osm_of_i = []
if model_settings["multiple_radii_mark"] == 'I':
osm_of_i = list(r100osm[i]) + list(r50osm[i]) + list(r200osm[i])
elif model_settings["multiple_radii_mark"] == 'II':
osm_of_i = list(r100osm[i]) + list(r200osm[i])
elif model_settings["multiple_radii_mark"] == 'III':
osm_of_i = list(r100osm[i]) + list(r50osm[i])
new_osm.append(osm_of_i)
print "enhanced", len(new_osm), len_(new_osm), new_osm[0][0:10]
self.__osm = new_osm
print "enhanced", len(self.__osm), len_(self.__osm), self.__osm[0][0:10]
| mit | 866,294,932,582,097,700 | 36.799658 | 169 | 0.555062 | false |
vuolter/pyload | src/pyload/plugins/accounts/SmoozedCom.py | 1 | 2338 | # -*- coding: utf-8 -*-
import hashlib
import json
import time
from ..base.multi_account import MultiAccount
class SmoozedCom(MultiAccount):
__name__ = "SmoozedCom"
__type__ = "account"
__version__ = "0.13"
__status__ = "testing"
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Smoozed.com account plugin"""
__license__ = "GPLv3"
__authors__ = [(None, None)]
def grab_hosters(self, user, password, data):
return self.get_data("hosters")
def grab_info(self, user, password, data):
status = self.get_account_status(user, password)
self.log_debug(status)
if status["state"] != "ok":
info = {"validuntil": None, "trafficleft": None, "premium": False}
else:
#: Parse account info
info = {
"validuntil": float(status["data"]["user"]["user_premium"]),
"trafficleft": max(
0, status["data"]["traffic"][1] - status["data"]["traffic"][0]
),
"session": status["data"]["session_key"],
"hosters": [hoster["name"] for hoster in status["data"]["hoster"]],
}
if info["validuntil"] < time.time():
if float(status["data"]["user"].get("user_trial", 0)) > time.time():
info["premium"] = True
else:
info["premium"] = False
else:
info["premium"] = True
return info
def signin(self, user, password, data):
#: Get user data from premiumize.me
status = self.get_account_status(user, password)
#: Check if user and password are valid
if status["state"] != "ok":
self.fail_login()
def get_account_status(self, user, password):
b_password = password.encode()
encrypted = hashlib.pbkdf2_hmac("sha256", b_password, b_password, 1000).hex()[
32
]
html = self.load(
"http://www2.smoozed.com/api/login",
get={"auth": user, "password": encrypted},
)
return json.loads(html)
| agpl-3.0 | -2,423,730,721,708,056,000 | 30.173333 | 86 | 0.517964 | false |
yuokada/pyp2rpm | pyp2rpm/package_data.py | 1 | 3746 | import subprocess
import time
import locale
import logging
from pyp2rpm import version
from pyp2rpm import utils
logger = logging.getLogger(__name__)
def get_deps_names(runtime_deps_list):
'''
data['runtime_deps'] has format:
[['Requires', 'name', '>=', 'version'], ...]
this function creates list of lowercase deps names
'''
return [x[1].lower() for x in runtime_deps_list]
class PackageData(object):
credit_line = '# Created by pyp2rpm-{0}'.format(version.version)
"""A simple object that carries data about a package."""
def __init__(self, local_file, name, pkg_name, version, md5='', url='', srcname=None):
object.__setattr__(self, 'data', {})
self.data['local_file'] = local_file
self.data['name'] = name
self.data['srcname'] = srcname
self.data['pkg_name'] = pkg_name
self.data['version'] = version
self.data['python_versions'] = []
self.data['md5'] = md5
self.data['url'] = url
self.data['sphinx_dir'] = None
def __getattr__(self, name):
if name == 'underscored_name':
return self.data['name'].replace('-', '_')
elif name == 'changelog_date_packager':
return self.get_changelog_date_packager()
elif name in ['runtime_deps', 'build_deps', 'classifiers', 'doc_files', 'doc_license']:
return self.data.get(name, [])
elif name in ['packages', 'py_modules', 'scripts']:
return self.data.get(name, set())
elif name in ['has_egg_info', 'has_test_suite', 'has_pth', 'has_extension']:
return self.data.get(name, False)
return self.data.get(name, 'TODO:')
def __setattr__(self, name, value):
if name == 'summary' and isinstance(value, utils.str_classes):
value = value.rstrip('.').replace('\n', ' ')
if value is not None:
self.data[name] = value
def update_attr(self, name, value):
if name in self.data and value:
if name in ['runtime_deps', 'build_deps']: # compare lowercase names of deps
for item in value:
if not item[1].lower() in get_deps_names(self.data[name]):
self.data[name].append(item)
elif isinstance(self.data[name], list):
for item in value:
if item not in self.data[name]:
self.data[name].append(item)
elif isinstance(self.data[name], set):
if not isinstance(value, set):
value = set(value)
self.data[name] |= value
elif not self.data[name] and self.data[name] is not False:
self.data[name] = value
elif name not in self.data and value is not None:
self.data[name] = value
def set_from(self, data_dict, update=False):
for k, v in data_dict.items():
if update:
self.update_attr(k, v)
else:
setattr(self, k, v)
def get_changelog_date_packager(self):
"""Returns part of the changelog entry, containing date and packager.
"""
try:
packager = subprocess.Popen(
'rpmdev-packager', stdout=subprocess.PIPE).communicate()[0].strip()
except OSError:
# Hi John Doe, you should install rpmdevtools
packager = "John Doe <[email protected]>"
logger.warn(
'Package rpmdevtools is missing, using default name: {0}.'.format(packager))
date_str = time.strftime('%a %b %d %Y', time.gmtime())
encoding = locale.getpreferredencoding()
return u'{0} {1}'.format(date_str, packager.decode(encoding))
| mit | -8,890,636,851,773,060,000 | 38.020833 | 95 | 0.563001 | false |
daspecster/google-cloud-python | spanner/unit_tests/test_streamed.py | 1 | 36966 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestStreamedResultSet(unittest.TestCase):
def _getTargetClass(self):
from google.cloud.spanner.streamed import StreamedResultSet
return StreamedResultSet
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_ctor_defaults(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
self.assertIs(streamed._response_iterator, iterator)
self.assertEqual(streamed.rows, [])
self.assertIsNone(streamed.metadata)
self.assertIsNone(streamed.stats)
self.assertIsNone(streamed.resume_token)
def test_fields_unset(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
with self.assertRaises(AttributeError):
_ = streamed.fields
@staticmethod
def _makeScalarField(name, type_):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
return StructType.Field(name=name, type=Type(code=type_))
@staticmethod
def _makeArrayField(name, element_type_code=None, element_type=None):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
if element_type is None:
element_type = Type(code=element_type_code)
array_type = Type(
code='ARRAY', array_element_type=element_type)
return StructType.Field(name=name, type=array_type)
@staticmethod
def _makeStructType(struct_type_fields):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
fields = [
StructType.Field(name=key, type=Type(code=value))
for key, value in struct_type_fields
]
struct_type = StructType(fields=fields)
return Type(code='STRUCT', struct_type=struct_type)
@staticmethod
def _makeValue(value):
from google.cloud.spanner._helpers import _make_value_pb
return _make_value_pb(value)
@staticmethod
def _makeListValue(values=(), value_pbs=None):
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.cloud.spanner._helpers import _make_list_value_pb
if value_pbs is not None:
return Value(list_value=ListValue(values=value_pbs))
return Value(list_value=_make_list_value_pb(values))
def test_properties_set(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
]
metadata = streamed._metadata = _ResultSetMetadataPB(FIELDS)
stats = streamed._stats = _ResultSetStatsPB()
self.assertEqual(list(streamed.fields), FIELDS)
self.assertIs(streamed.metadata, metadata)
self.assertIs(streamed.stats, stats)
def test__merge_chunk_bool(self):
from google.cloud.spanner.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('registered_voter', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(True)
chunk = self._makeValue(False)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def test__merge_chunk_int64(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('age', 'INT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(42)
chunk = self._makeValue(13)
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, '4213')
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_float64_nan_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('weight', 'FLOAT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(u'Na')
chunk = self._makeValue(u'N')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, u'NaN')
def test__merge_chunk_float64_w_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('weight', 'FLOAT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(3.14159)
chunk = self._makeValue('')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.number_value, 3.14159)
def test__merge_chunk_float64_w_float64(self):
from google.cloud.spanner.streamed import Unmergeable
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('weight', 'FLOAT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(3.14159)
chunk = self._makeValue(2.71828)
with self.assertRaises(Unmergeable):
streamed._merge_chunk(chunk)
def test__merge_chunk_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('name', 'STRING'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(u'phred')
chunk = self._makeValue(u'wylma')
merged = streamed._merge_chunk(chunk)
self.assertEqual(merged.string_value, u'phredwylma')
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_bool(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeArrayField('name', element_type_code='BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue([True, True])
chunk = self._makeListValue([False, False, False])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue([True, True, False, False, False])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_int(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeArrayField('name', element_type_code='INT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue([0, 1, 2])
chunk = self._makeListValue([3, 4, 5])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue([0, 1, 23, 4, 5])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_float(self):
import math
PI = math.pi
EULER = math.e
SQRT_2 = math.sqrt(2.0)
LOG_10 = math.log(10)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeArrayField('name', element_type_code='FLOAT64'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue([PI, SQRT_2])
chunk = self._makeListValue(['', EULER, LOG_10])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue([PI, SQRT_2, EULER, LOG_10])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_string(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeArrayField('name', element_type_code='STRING'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue([u'A', u'B', u'C'])
chunk = self._makeListValue([None, u'D', u'E'])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue([u'A', u'B', u'C', None, u'D', u'E'])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_string_with_null(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeArrayField('name', element_type_code='STRING'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue([u'A', u'B', u'C'])
chunk = self._makeListValue([u'D', u'E'])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue([u'A', u'B', u'CD', u'E'])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_array_of_int(self):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
subarray_type = Type(
code='ARRAY', array_element_type=Type(code='INT64'))
array_type = Type(code='ARRAY', array_element_type=subarray_type)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
StructType.Field(name='loloi', type=array_type)
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue(value_pbs=[
self._makeListValue([0, 1]),
self._makeListValue([2]),
])
chunk = self._makeListValue(value_pbs=[
self._makeListValue([3]),
self._makeListValue([4, 5]),
])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue(value_pbs=[
self._makeListValue([0, 1]),
self._makeListValue([23]),
self._makeListValue([4, 5]),
])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_array_of_string(self):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
from google.cloud.proto.spanner.v1.type_pb2 import Type
subarray_type = Type(
code='ARRAY', array_element_type=Type(code='STRING'))
array_type = Type(code='ARRAY', array_element_type=subarray_type)
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
StructType.Field(name='lolos', type=array_type)
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeListValue(value_pbs=[
self._makeListValue([u'A', u'B']),
self._makeListValue([u'C']),
])
chunk = self._makeListValue(value_pbs=[
self._makeListValue([u'D']),
self._makeListValue([u'E', u'F']),
])
merged = streamed._merge_chunk(chunk)
expected = self._makeListValue(value_pbs=[
self._makeListValue([u'A', u'B']),
self._makeListValue([u'CD']),
self._makeListValue([u'E', u'F']),
])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_struct(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
struct_type = self._makeStructType([
('name', 'STRING'),
('age', 'INT64'),
])
FIELDS = [
self._makeArrayField('test', element_type=struct_type),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
partial = self._makeListValue([u'Phred '])
streamed._pending_chunk = self._makeListValue(value_pbs=[partial])
rest = self._makeListValue([u'Phlyntstone', 31])
chunk = self._makeListValue(value_pbs=[rest])
merged = streamed._merge_chunk(chunk)
struct = self._makeListValue([u'Phred Phlyntstone', 31])
expected = self._makeListValue(value_pbs=[struct])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test__merge_chunk_array_of_struct_unmergeable(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
struct_type = self._makeStructType([
('name', 'STRING'),
('registered', 'BOOL'),
('voted', 'BOOL'),
])
FIELDS = [
self._makeArrayField('test', element_type=struct_type),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
partial = self._makeListValue([u'Phred Phlyntstone', True])
streamed._pending_chunk = self._makeListValue(value_pbs=[partial])
rest = self._makeListValue([True])
chunk = self._makeListValue(value_pbs=[rest])
merged = streamed._merge_chunk(chunk)
struct = self._makeListValue([u'Phred Phlyntstone', True, True])
expected = self._makeListValue(value_pbs=[struct])
self.assertEqual(merged, expected)
self.assertIsNone(streamed._pending_chunk)
def test_merge_values_empty_and_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._current_row = []
streamed._merge_values([])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
def test_merge_values_empty_and_partial(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._makeValue(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
def test_merge_values_empty_and_filled(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BARE = [u'Phred Phlyntstone', 42, True]
VALUES = [self._makeValue(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [BARE])
self.assertEqual(streamed._current_row, [])
def test_merge_values_empty_and_filled_plus(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
VALUES = [self._makeValue(bare) for bare in BARE]
streamed._current_row = []
streamed._merge_values(VALUES)
self.assertEqual(streamed.rows, [BARE[0:3], BARE[3:6]])
self.assertEqual(streamed._current_row, BARE[6:])
def test_merge_values_partial_and_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BEFORE = [
u'Phred Phlyntstone'
]
streamed._current_row[:] = BEFORE
streamed._merge_values([])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BEFORE)
def test_merge_values_partial_and_partial(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BEFORE = [u'Phred Phlyntstone']
streamed._current_row[:] = BEFORE
MERGED = [42]
TO_MERGE = [self._makeValue(item) for item in MERGED]
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BEFORE + MERGED)
def test_merge_values_partial_and_filled(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BEFORE = [
u'Phred Phlyntstone'
]
streamed._current_row[:] = BEFORE
MERGED = [42, True]
TO_MERGE = [self._makeValue(item) for item in MERGED]
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [BEFORE + MERGED])
self.assertEqual(streamed._current_row, [])
def test_merge_values_partial_and_filled_plus(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
streamed._metadata = _ResultSetMetadataPB(FIELDS)
BEFORE = [
self._makeValue(u'Phred Phlyntstone')
]
streamed._current_row[:] = BEFORE
MERGED = [
42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
TO_MERGE = [self._makeValue(item) for item in MERGED]
VALUES = BEFORE + MERGED
streamed._merge_values(TO_MERGE)
self.assertEqual(streamed.rows, [VALUES[0:3], VALUES[3:6]])
self.assertEqual(streamed._current_row, VALUES[6:])
def test_consume_next_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
with self.assertRaises(StopIteration):
streamed.consume_next()
def test_consume_next_first_set_partial(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set = _PartialResultSetPB(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed.consume_next()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertTrue(streamed.metadata is metadata)
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_next_w_partial_result(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
VALUES = [
self._makeValue(u'Phred '),
]
result_set = _PartialResultSetPB(VALUES, chunked_value=True)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed.consume_next()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, VALUES[0])
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_next_w_pending_chunk(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
BARE = [
u'Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone',
]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set = _PartialResultSetPB(VALUES)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = _ResultSetMetadataPB(FIELDS)
streamed._pending_chunk = self._makeValue(u'Phred ')
streamed.consume_next()
self.assertEqual(streamed.rows, [
[u'Phred Phlyntstone', BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
])
self.assertEqual(streamed._current_row, [BARE[6]])
self.assertIsNone(streamed._pending_chunk)
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_next_last_set(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
stats = _ResultSetStatsPB(
rows_returned="1",
elapsed_time="1.23 secs",
cpu_tme="0.98 secs",
)
BARE = [u'Phred Phlyntstone', 42, True]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set = _PartialResultSetPB(VALUES, stats=stats)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed._metadata = metadata
streamed.consume_next()
self.assertEqual(streamed.rows, [BARE])
self.assertEqual(streamed._current_row, [])
self.assertTrue(streamed._stats is stats)
self.assertEqual(streamed.resume_token, result_set.resume_token)
def test_consume_all_empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
streamed.consume_all()
def test_consume_all_one_result_set_partial(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set = _PartialResultSetPB(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
streamed.consume_all()
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertTrue(streamed.metadata is metadata)
def test_consume_all_multiple_result_sets_filled(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set1 = _PartialResultSetPB(VALUES[:4], metadata=metadata)
result_set2 = _PartialResultSetPB(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
streamed.consume_all()
self.assertEqual(streamed.rows, [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
def test___iter___empty(self):
iterator = _MockCancellableIterator()
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [])
def test___iter___one_result_set_partial(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
BARE = [u'Phred Phlyntstone', 42]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set = _PartialResultSetPB(VALUES, metadata=metadata)
iterator = _MockCancellableIterator(result_set)
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, BARE)
self.assertTrue(streamed.metadata is metadata)
def test___iter___multiple_result_sets_filled(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set1 = _PartialResultSetPB(VALUES[:4], metadata=metadata)
result_set2 = _PartialResultSetPB(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
found = list(streamed)
self.assertEqual(found, [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
def test___iter___w_existing_rows_read(self):
FIELDS = [
self._makeScalarField('full_name', 'STRING'),
self._makeScalarField('age', 'INT64'),
self._makeScalarField('married', 'BOOL'),
]
metadata = _ResultSetMetadataPB(FIELDS)
ALREADY = [
[u'Pebbylz Phlyntstone', 4, False],
[u'Dino Rhubble', 4, False],
]
BARE = [
u'Phred Phlyntstone', 42, True,
u'Bharney Rhubble', 39, True,
u'Wylma Phlyntstone', 41, True,
]
VALUES = [self._makeValue(bare) for bare in BARE]
result_set1 = _PartialResultSetPB(VALUES[:4], metadata=metadata)
result_set2 = _PartialResultSetPB(VALUES[4:])
iterator = _MockCancellableIterator(result_set1, result_set2)
streamed = self._make_one(iterator)
streamed._rows[:] = ALREADY
found = list(streamed)
self.assertEqual(found, ALREADY + [
[BARE[0], BARE[1], BARE[2]],
[BARE[3], BARE[4], BARE[5]],
[BARE[6], BARE[7], BARE[8]],
])
self.assertEqual(streamed.rows, [])
self.assertEqual(streamed._current_row, [])
self.assertIsNone(streamed._pending_chunk)
class _MockCancellableIterator(object):
cancel_calls = 0
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
def __next__(self): # pragma: NO COVER Py3k
return self.next()
class _ResultSetMetadataPB(object):
def __init__(self, fields):
from google.cloud.proto.spanner.v1.type_pb2 import StructType
self.row_type = StructType(fields=fields)
class _ResultSetStatsPB(object):
def __init__(self, query_plan=None, **query_stats):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner._helpers import _make_value_pb
self.query_plan = query_plan
self.query_stats = Struct(fields={
key: _make_value_pb(value) for key, value in query_stats.items()})
class _PartialResultSetPB(object):
resume_token = b'DEADBEEF'
def __init__(self, values, metadata=None, stats=None, chunked_value=False):
self.values = values
self.metadata = metadata
self.stats = stats
self.chunked_value = chunked_value
def HasField(self, name):
assert name == 'stats'
return self.stats is not None
class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase):
_json_tests = None
def _getTargetClass(self):
from google.cloud.spanner.streamed import StreamedResultSet
return StreamedResultSet
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _load_json_test(self, test_name):
import os
if self.__class__._json_tests is None:
dirname = os.path.dirname(__file__)
filename = os.path.join(
dirname, 'streaming-read-acceptance-test.json')
raw = _parse_streaming_read_acceptance_tests(filename)
tests = self.__class__._json_tests = {}
for (name, partial_result_sets, results) in raw:
tests[name] = partial_result_sets, results
return self.__class__._json_tests[test_name]
# Non-error cases
def _match_results(self, testcase_name, assert_equality=None):
partial_result_sets, expected = self._load_json_test(testcase_name)
iterator = _MockCancellableIterator(*partial_result_sets)
partial = self._make_one(iterator)
partial.consume_all()
if assert_equality is not None:
assert_equality(partial.rows, expected)
else:
self.assertEqual(partial.rows, expected)
def test_basic(self):
self._match_results('Basic Test')
def test_string_chunking(self):
self._match_results('String Chunking Test')
def test_string_array_chunking(self):
self._match_results('String Array Chunking Test')
def test_string_array_chunking_with_nulls(self):
self._match_results('String Array Chunking Test With Nulls')
def test_string_array_chunking_with_empty_strings(self):
self._match_results('String Array Chunking Test With Empty Strings')
def test_string_array_chunking_with_one_large_string(self):
self._match_results('String Array Chunking Test With One Large String')
def test_int64_array_chunking(self):
self._match_results('INT64 Array Chunking Test')
def test_float64_array_chunking(self):
import math
def assert_float_equality(lhs, rhs):
# NaN, +Inf, and -Inf can't be tested for equality
if lhs is None:
self.assertIsNone(rhs)
elif math.isnan(lhs):
self.assertTrue(math.isnan(rhs))
elif math.isinf(lhs):
self.assertTrue(math.isinf(rhs))
# but +Inf and -Inf can be tested for magnitude
self.assertTrue((lhs > 0) == (rhs > 0))
else:
self.assertEqual(lhs, rhs)
def assert_rows_equality(lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for l_rows, r_rows in zip(lhs, rhs):
self.assertEqual(len(l_rows), len(r_rows))
for l_row, r_row in zip(l_rows, r_rows):
self.assertEqual(len(l_row), len(r_row))
for l_cell, r_cell in zip(l_row, r_row):
assert_float_equality(l_cell, r_cell)
self._match_results(
'FLOAT64 Array Chunking Test', assert_rows_equality)
def test_struct_array_chunking(self):
self._match_results('Struct Array Chunking Test')
def test_nested_struct_array(self):
self._match_results('Nested Struct Array Test')
def test_nested_struct_array_chunking(self):
self._match_results('Nested Struct Array Chunking Test')
def test_struct_array_and_string_chunking(self):
self._match_results('Struct Array And String Chunking Test')
def test_multiple_row_single_chunk(self):
self._match_results('Multiple Row Single Chunk')
def test_multiple_row_multiple_chunks(self):
self._match_results('Multiple Row Multiple Chunks')
def test_multiple_row_chunks_non_chunks_interleaved(self):
self._match_results('Multiple Row Chunks/Non Chunks Interleaved')
def _generate_partial_result_sets(prs_text_pbs):
from google.protobuf.json_format import Parse
from google.cloud.proto.spanner.v1.result_set_pb2 import PartialResultSet
partial_result_sets = []
for prs_text_pb in prs_text_pbs:
prs = PartialResultSet()
partial_result_sets.append(Parse(prs_text_pb, prs))
return partial_result_sets
def _normalize_int_array(cell):
normalized = []
for subcell in cell:
if subcell is not None:
subcell = int(subcell)
normalized.append(subcell)
return normalized
def _normalize_float(cell):
if cell == u'Infinity':
return float('inf')
if cell == u'-Infinity':
return float('-inf')
if cell == u'NaN':
return float('nan')
if cell is not None:
return float(cell)
def _normalize_results(rows_data, fields):
"""Helper for _parse_streaming_read_acceptance_tests"""
from google.cloud.proto.spanner.v1 import type_pb2
normalized = []
for row_data in rows_data:
row = []
assert len(row_data) == len(fields)
for cell, field in zip(row_data, fields):
if field.type.code == type_pb2.INT64:
cell = int(cell)
if field.type.code == type_pb2.FLOAT64:
cell = _normalize_float(cell)
elif field.type.code == type_pb2.BYTES:
cell = cell.encode('utf8')
elif field.type.code == type_pb2.ARRAY:
if field.type.array_element_type.code == type_pb2.INT64:
cell = _normalize_int_array(cell)
elif field.type.array_element_type.code == type_pb2.FLOAT64:
cell = [_normalize_float(subcell) for subcell in cell]
row.append(cell)
normalized.append(row)
return normalized
def _parse_streaming_read_acceptance_tests(filename):
"""Parse acceptance tests from JSON
See: streaming-read-acceptance-test.json
"""
import json
with open(filename) as json_file:
test_json = json.load(json_file)
for test in test_json['tests']:
name = test['name']
partial_result_sets = _generate_partial_result_sets(test['chunks'])
fields = partial_result_sets[0].metadata.row_type.fields
result = _normalize_results(test['result']['value'], fields)
yield name, partial_result_sets, result
| apache-2.0 | -401,389,291,719,153,150 | 36.643585 | 79 | 0.608722 | false |
dlebauer/plantcv | scripts/dev/vis_tv_z500_L2.py | 1 | 5084 | #!/usr/bin/python
import sys, traceback
import cv2
import numpy as np
import argparse
import string
import plantcv as pcv
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i", "--image", help="Input image file.", required=True)
parser.add_argument("-m", "--roi", help="Input region of interest file.", required=False)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=True)
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true")
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
# Read image
img, path, filename = pcv.readimage(args.image)
brass_mask = cv2.imread(args.roi)
# Pipeline step
device = 0
# Convert RGB to HSV and extract the Saturation channel
device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
# Threshold the Saturation image
device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug)
# Median Filter
device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
# Fill small objects
device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)
# Convert RGB to LAB and extract the Blue channel
device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
# Threshold the blue image
device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
# Fill small objects
device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug)
# Join the thresholded saturation and blue-yellow images
device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)
# Apply Mask (for vis images, mask_color=white)
device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
# Mask pesky brass piece
device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug)
device, brass_inv=pcv.invert(brass_thresh, device, args.debug)
device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug)
# Further mask soil and car
device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug)
device, soil_car = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug)
device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug)
device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug)
# Threshold the green-magenta and blue images
device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, 'dark', device, args.debug)
device, soilb_thresh = pcv.binary_threshold(soil_b, 150, 255, 'light', device, args.debug)
# Join the thresholded saturation and blue-yellow images (OR)
device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
# Fill small objects
device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 150, device, args.debug)
# Median Filter
#device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
#device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)
# Apply mask (for vis images, mask_color=white)
device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug)
# Identify objects
device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)
# Define ROI
device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,True, 0,0,-50,-50)
# Decide which objects to keep
device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
# Object combine kept objects
device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
############## Analysis ################
# Find shape properties, output shape image (optional)
device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename)
# Output shape and color data
pcv.print_results(args.image, shape_header, shape_data)
pcv.print_results(args.image, color_header, color_data)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,379,036,647,077,785,000 | 42.084746 | 167 | 0.700236 | false |
cheral/orange3 | Orange/widgets/data/tests/test_owimpute.py | 1 | 1025 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import numpy as np
from Orange.data import Table
from Orange.widgets.data.owimpute import OWImpute
from Orange.widgets.tests.base import WidgetTest
class TestOWImpute(WidgetTest):
def setUp(self):
self.widget = self.create_widget(OWImpute)
def test_empty_data(self):
"""No crash on empty data"""
data = Table("iris")
widget = self.widget
widget.default_method_index = widget.MODEL_BASED_IMPUTER
widget.default_method = widget.METHODS[widget.default_method_index]
self.send_signal("Data", data)
widget.unconditional_commit()
imp_data = self.get_output("Data")
np.testing.assert_equal(imp_data.X, data.X)
np.testing.assert_equal(imp_data.Y, data.Y)
self.send_signal("Data", Table(data.domain))
widget.unconditional_commit()
imp_data = self.get_output("Data")
self.assertEqual(len(imp_data), 0)
| bsd-2-clause | -2,602,709,922,889,076,700 | 33.166667 | 75 | 0.677073 | false |
andybrnr/QuantEcon.py | lecture scratch scripts/QuantEcon_Exercises_Ch2Sec2.py | 1 | 2123 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 08:45:53 2015
@author: ABerner
"""
from __future__ import print_function
# Ex 1
import numpy as np
import scipy as sp
import scipy.stats as stats
import quantecon as qe
import matplotlib.pyplot as plt
alpha = 0.1
beta = 0.1
P = np.array([[1-alpha,alpha],[beta,1-beta]])
p_UnEmp = beta/(alpha+beta)
n = 10000
x0s = [0,1]
fig, ax = plt.subplots()
inds = np.linspace(1,n+1,n,dtype=float)
for x0 in x0s:
s = qe.mc_sample_path(P, init=x0, sample_size=n)
tmp = np.cumsum(s==0)/inds-p_UnEmp
ax.plot(inds-1,tmp)
ax.set_xlim([0,n])
# Ex 2
import re
readpath = "C:\\Users\\andrewb\\Documents\\GitHub\\QuantEcon.py\\data\\"
filename = "web_graph_data.txt"
filepath = readpath+filename
#convert lnklst into graph matrix
def graph_from_file(filepath):
f = open(filepath, 'r')
idxdict = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3, 'e' : 4, 'f' : 5, 'g' : 6,
'h' : 7, 'i' : 8, 'j' : 9, 'k' : 10, 'l' : 11, 'm' : 12,
'n' : 13}
graph_mat = np.zeros((14,14),dtype=float)
for line in f:
tmp = (re.findall('\w', line))
graph_mat[idxdict[tmp[0]],idxdict[tmp[1]]] = 1
return graph_mat
gmat = graph_from_file(filepath)
#normalize graph by number of links to form Markov matrix
n = gmat.shape[0]
gmat = gmat/np.reshape(np.repeat(gmat.sum(axis=1).T,n,axis=0),(n,n))
#solve for stationary distribution
pstat = qe.mc_compute_stationary(gmat)
# Ex 3
def approx_markov(rho, sigma_u, m=3, n=7):
sigma_y = np.sqrt((sigma_u**2)/(1.-rho**2))
x_arr = np.linspace(start=-m*sigma_y,stop=m*sigma_y,num=n)
F = stats.norm(scale=sigma_u**2).cdf
s = x_arr[1]-x_arr[0]
P_arr = np.empty((n,n),dtype=float)
for i in range(0,n):
for j in range(0,n):
if j==0:
P_arr[i][j] = F(x_arr[0]-rho*x_arr[i]+s/2)
elif j==n-1:
P_arr[i][j] = 1-F(x_arr[n-1]-rho*x_arr[i]-s/2)
else:
P_arr[i][j] = F(x_arr[j]-rho*x_arr[i]+s/2)-F(x_arr[j]-rho*x_arr[i]-s/2)
return x_arr, P_arr
| bsd-3-clause | 4,790,539,041,443,056,000 | 21.585106 | 87 | 0.558172 | false |
fmfn/UnbalancedDataset | imblearn/metrics/pairwise.py | 2 | 7730 | """Metrics to perform pairwise computation."""
# Authors: Guillaume Lemaitre <[email protected]>
# License: MIT
import numpy as np
from scipy.spatial import distance_matrix
from sklearn.base import BaseEstimator
from sklearn.utils import check_consistent_length
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
class ValueDifferenceMetric(BaseEstimator):
r"""Class implementing the Value Difference Metric.
This metric computes the distance between samples containing only
categorical features. The distance between feature values of two samples is
defined as:
.. math::
\delta(x, y) = \sum_{c=1}^{C} |p(c|x_{f}) - p(c|y_{f})|^{k} \ ,
where :math:`x` and :math:`y` are two samples and :math:`f` a given
feature, :math:`C` is the number of classes, :math:`p(c|x_{f})` is the
conditional probability that the output class is :math:`c` given that
the feature value :math:`f` has the value :math:`x` and :math:`k` an
exponent usually defined to 1 or 2.
The distance for the feature vectors :math:`X` and :math:`Y` is
subsequently defined as:
.. math::
\Delta(X, Y) = \sum_{f=1}^{F} \delta(X_{f}, Y_{f})^{r} \ ,
where :math:`F` is the number of feature and :math:`r` an exponent usually
defined equal to 1 or 2.
The definition of this distance was propoed in [1]_.
Read more in the :ref:`User Guide <vdm>`.
.. versionadded:: 0.8
Parameters
----------
n_categories : "auto" or array-like of shape (n_features,), default="auto"
The number of unique categories per features. If `"auto"`, the number
of categories will be computed from `X` at `fit`. Otherwise, you can
provide an array-like of such counts to avoid computation. You can use
the fitted attribute `categories_` of the
:class:`~sklearn.preprocesssing.OrdinalEncoder` to deduce these counts.
k : int, default=1
Exponent used to compute the distance between feature value.
r : int, default=2
Exponent used to compute the distance between the feature vector.
Attributes
----------
n_categories_ : ndarray of shape (n_features,)
The number of categories per features.
proba_per_class_ : list of ndarray of shape (n_categories, n_classes)
List of length `n_features` containing the conditional probabilities
for each category given a class.
Notes
-----
The input data `X` are expected to be encoded by an
:class:`~sklearn.preprocessing.OrdinalEncoder` and the data type is used
should be `np.int32`. If other data types are given, `X` will be converted
to `np.int32`.
References
----------
.. [1] Stanfill, Craig, and David Waltz. "Toward memory-based reasoning."
Communications of the ACM 29.12 (1986): 1213-1228.
Examples
--------
>>> import numpy as np
>>> X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
>>> y = [1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1]
>>> from sklearn.preprocessing import OrdinalEncoder
>>> encoder = OrdinalEncoder(dtype=np.int32)
>>> X_encoded = encoder.fit_transform(X)
>>> from imblearn.metrics.pairwise import ValueDifferenceMetric
>>> vdm = ValueDifferenceMetric().fit(X_encoded, y)
>>> pairwise_distance = vdm.pairwise(X_encoded)
>>> pairwise_distance.shape
(30, 30)
>>> X_test = np.array(["green", "red", "blue"]).reshape(-1, 1)
>>> X_test_encoded = encoder.transform(X_test)
>>> vdm.pairwise(X_test_encoded)
array([[ 0. , 0.04, 1.96],
[ 0.04, 0. , 1.44],
[ 1.96, 1.44, 0. ]])
"""
def __init__(self, *, n_categories="auto", k=1, r=2):
self.n_categories = n_categories
self.k = k
self.r = r
def fit(self, X, y):
"""Compute the necessary statistics from the training set.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
y : ndarray of shape (n_features,)
The target.
Returns
-------
self
"""
check_consistent_length(X, y)
X, y = self._validate_data(X, y, reset=True, dtype=np.int32)
if isinstance(self.n_categories, str) and self.n_categories == "auto":
# categories are expected to be encoded from 0 to n_categories - 1
self.n_categories_ = X.max(axis=0) + 1
else:
if len(self.n_categories) != self.n_features_in_:
raise ValueError(
f"The length of n_categories is not consistent with the "
f"number of feature in X. Got {len(self.n_categories)} "
f"elements in n_categories and {self.n_features_in_} in "
f"X."
)
self.n_categories_ = np.array(self.n_categories, copy=False)
classes = unique_labels(y)
# list of length n_features of ndarray (n_categories, n_classes)
# compute the counts
self.proba_per_class_ = [
np.empty(shape=(n_cat, len(classes)), dtype=np.float64)
for n_cat in self.n_categories_
]
for feature_idx in range(self.n_features_in_):
for klass_idx, klass in enumerate(classes):
self.proba_per_class_[feature_idx][:, klass_idx] = np.bincount(
X[y == klass, feature_idx],
minlength=self.n_categories_[feature_idx],
)
# normalize by the summing over the classes
with np.errstate(invalid="ignore"):
# silence potential warning due to in-place division by zero
for feature_idx in range(self.n_features_in_):
self.proba_per_class_[feature_idx] /= (
self.proba_per_class_[feature_idx].sum(axis=1).reshape(-1, 1)
)
np.nan_to_num(self.proba_per_class_[feature_idx], copy=False)
return self
def pairwise(self, X, Y=None):
"""Compute the VDM distance pairwise.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Y : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Returns
-------
distance_matrix : ndarray of shape (n_samples, n_samples)
The VDM pairwise distance.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, dtype=np.int32)
n_samples_X = X.shape[0]
if Y is not None:
Y = self._validate_data(Y, reset=False, dtype=np.int32)
n_samples_Y = Y.shape[0]
else:
n_samples_Y = n_samples_X
distance = np.zeros(shape=(n_samples_X, n_samples_Y), dtype=np.float64)
for feature_idx in range(self.n_features_in_):
proba_feature_X = self.proba_per_class_[feature_idx][X[:, feature_idx]]
if Y is not None:
proba_feature_Y = self.proba_per_class_[feature_idx][Y[:, feature_idx]]
else:
proba_feature_Y = proba_feature_X
distance += (
distance_matrix(proba_feature_X, proba_feature_Y, p=self.k) ** self.r
)
return distance
| mit | 1,577,381,145,551,678,200 | 37.267327 | 87 | 0.590944 | false |
yousseb/meld | meld/accelerators.py | 1 | 2040 |
from typing import Dict, Sequence, Union
from gi.repository import Gtk
VIEW_ACCELERATORS: Dict[str, Union[str, Sequence[str]]] = {
'app.quit': '<Primary>Q',
'view.find': '<Primary>F',
'view.find-next': '<Primary>G',
'view.find-previous': '<Primary><Shift>G',
'view.find-replace': '<Primary>H',
'view.go-to-line': '<Primary>I',
# Overridden in CSS
'view.next-change': ('<Alt>Down', '<Alt>KP_Down', '<Primary>D'),
'view.next-pane': '<Alt>Page_Down',
# Overridden in CSS
'view.previous-change': ('<Alt>Up', '<Alt>KP_Up', '<Primary>E'),
'view.previous-pane': '<Alt>Page_Up',
'view.redo': '<Primary><Shift>Z',
'view.refresh': ('<control>R', 'F5'),
'view.save': '<Primary>S',
'view.save-all': '<Primary><Shift>L',
'view.save-as': '<Primary><Shift>S',
'view.undo': '<Primary>Z',
'win.close': '<Primary>W',
'win.new-tab': '<Primary>N',
'win.stop': 'Escape',
# File comparison actions
'view.file-previous-conflict': '<Primary>I',
'view.file-next-conflict': '<Primary>K',
'view.file-push-left': '<Alt>Left',
'view.file-push-right': '<Alt>Right',
'view.file-pull-left': '<Alt><shift>Right',
'view.file-pull-right': '<Alt><shift>Left',
'view.file-copy-left-up': '<Alt>bracketleft',
'view.file-copy-right-up': '<Alt>bracketright',
'view.file-copy-left-down': '<Alt>semicolon',
'view.file-copy-right-down': '<Alt>quoteright',
'view.file-delete': ('<Alt>Delete', '<Alt>KP_Delete'),
'view.show-overview-map': 'F9',
# Folder comparison actions
'view.folder-compare': 'Return',
'view.folder-copy-left': '<Alt>Left',
'view.folder-copy-right': '<Alt>Right',
'view.folder-delete': 'Delete',
# Version control actions
'view.vc-commit': '<Primary>M',
'view.vc-console-visible': 'F9',
}
def register_accels(app: Gtk.Application):
for name, accel in VIEW_ACCELERATORS.items():
accel = accel if isinstance(accel, tuple) else (accel,)
app.set_accels_for_action(name, accel)
| gpl-2.0 | -7,862,199,245,909,584,000 | 36.090909 | 68 | 0.608333 | false |
ingenieroariel/geonode | geonode/layers/urls.py | 1 | 3526 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.conf import settings
from django.views.generic import TemplateView
js_info_dict = {
'packages': ('geonode.layers',),
}
urlpatterns = patterns(
'geonode.layers.views',
url(r'^$',
TemplateView.as_view(template_name='layers/layer_list.html'),
{'facet_type': 'layers', 'is_layer': True},
name='layer_browse'),
url(r'^upload$', 'layer_upload', name='layer_upload'),
url(r'^upload_metadata$', 'layer_metadata_upload', name='layer_metadata_upload'),
url(r'^upload_style$', 'layer_sld_upload', name='layer_sld_upload'),
url(r'^load_layer_data$', 'load_layer_data', name='load_layer_data'),
url(r'^(?P<layername>[^/]*)$', 'layer_detail', name="layer_detail"),
url(r'^(?P<layername>[^/]*)/metadata$', 'layer_metadata', name="layer_metadata"),
url(r'^(?P<layername>[^/]*)/metadata_advanced$', 'layer_metadata_advanced', name="layer_metadata_advanced"),
url(r'^(?P<layername>[^/]*)/remove$', 'layer_remove', name="layer_remove"),
url(r'^(?P<granule_id>[^/]*)/(?P<layername>[^/]*)/granule_remove$', 'layer_granule_remove',
name="layer_granule_remove"),
url(r'^(?P<layername>[^/]*)/replace$', 'layer_replace', name="layer_replace"),
url(r'^(?P<layername>[^/]*)/thumbnail$', 'layer_thumbnail', name='layer_thumbnail'),
url(r'^(?P<layername>[^/]*)/get$', 'get_layer', name='get_layer'),
url(r'^(?P<layername>[^/]*)/metadata_detail$', 'layer_metadata_detail', name='layer_metadata_detail'),
url(r'^(?P<layername>[^/]*)/metadata_upload$', 'layer_metadata_upload', name='layer_metadata_upload'),
url(r'^(?P<layername>[^/]*)/style_upload$', 'layer_sld_upload', name='layer_sld_upload'),
url(r'^(?P<layername>[^/]*)/feature_catalogue$', 'layer_feature_catalogue', name='layer_feature_catalogue'),
url(r'^metadata/batch/(?P<ids>[^/]*)/$', 'layer_batch_metadata', name='layer_batch_metadata'),
# url(r'^api/batch_permissions/?$', 'batch_permissions',
# name='batch_permssions'),
# url(r'^api/batch_delete/?$', 'batch_delete', name='batch_delete'),
)
# -- Deprecated url routes for Geoserver authentication -- remove after GeoNode 2.1
# -- Use /gs/acls, gs/resolve_user/, gs/download instead
if 'geonode.geoserver' in settings.INSTALLED_APPS:
urlpatterns = patterns('geonode.geoserver.views',
url(r'^acls/?$', 'layer_acls', name='layer_acls_dep'),
url(r'^resolve_user/?$', 'resolve_user', name='layer_resolve_user_dep'),
url(r'^download$', 'layer_batch_download', name='layer_batch_download_dep'),
) + urlpatterns
| gpl-3.0 | -1,857,138,730,750,675,500 | 52.424242 | 112 | 0.615712 | false |
qzane/you-get | src/you_get/extractors/qq.py | 1 | 7043 | #!/usr/bin/env python
__all__ = ['qq_download']
from ..common import *
from .qie import download as qieDownload
from urllib.parse import urlparse,parse_qs
def qq_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False):
info_api = 'http://vv.video.qq.com/getinfo?otype=json&appver=3%2E2%2E19%2E333&platform=11&defnpayver=1&vid=' + vid
info = get_html(info_api)
video_json = json.loads(match1(info, r'QZOutputJson=(.*)')[:-1])
parts_vid = video_json['vl']['vi'][0]['vid']
parts_ti = video_json['vl']['vi'][0]['ti']
parts_prefix = video_json['vl']['vi'][0]['ul']['ui'][0]['url']
parts_formats = video_json['fl']['fi']
if parts_prefix.endswith('/'):
parts_prefix = parts_prefix[:-1]
# find best quality
# only looking for fhd(1080p) and shd(720p) here.
# 480p usually come with a single file, will be downloaded as fallback.
best_quality = ''
for part_format in parts_formats:
if part_format['name'] == 'fhd':
best_quality = 'fhd'
break
if part_format['name'] == 'shd':
best_quality = 'shd'
for part_format in parts_formats:
if (not best_quality == '') and (not part_format['name'] == best_quality):
continue
part_format_id = part_format['id']
part_format_sl = part_format['sl']
if part_format_sl == 0:
part_urls= []
total_size = 0
try:
# For fhd(1080p), every part is about 100M and 6 minutes
# try 100 parts here limited download longest single video of 10 hours.
for part in range(1,100):
filename = vid + '.p' + str(part_format_id % 10000) + '.' + str(part) + '.mp4'
key_api = "http://vv.video.qq.com/getkey?otype=json&platform=11&format=%s&vid=%s&filename=%s" % (part_format_id, parts_vid, filename)
#print(filename)
#print(key_api)
part_info = get_html(key_api)
key_json = json.loads(match1(part_info, r'QZOutputJson=(.*)')[:-1])
#print(key_json)
vkey = key_json['key']
url = '%s/%s?vkey=%s' % (parts_prefix, filename, vkey)
part_urls.append(url)
_, ext, size = url_info(url, faker=True)
total_size += size
except:
pass
print_info(site_info, parts_ti, ext, total_size)
if not info_only:
download_urls(part_urls, parts_ti, ext, total_size, output_dir=output_dir, merge=merge)
else:
fvkey = video_json['vl']['vi'][0]['fvkey']
mp4 = video_json['vl']['vi'][0]['cl'].get('ci', None)
if mp4:
old_id = mp4[0]['keyid'].split('.')[1]
new_id = 'p' + str(int(old_id) % 10000)
mp4 = mp4[0]['keyid'].replace(old_id, new_id) + '.mp4'
else:
mp4 = video_json['vl']['vi'][0]['fn']
url = '%s/%s?vkey=%s' % ( parts_prefix, mp4, fvkey )
_, ext, size = url_info(url, faker=True)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size, output_dir=output_dir, merge=merge)
def kg_qq_download_by_shareid(shareid, output_dir='.', info_only=False, caption=False):
BASE_URL = 'http://cgi.kg.qq.com/fcgi-bin/kg_ugc_getdetail'
params_str = '?dataType=jsonp&jsonp=callback&jsonpCallback=jsopgetsonginfo&v=4&outCharset=utf-8&shareid=' + shareid
url = BASE_URL + params_str
content = get_content(url)
json_str = content[len('jsonpcallback('):-1]
json_data = json.loads(json_str)
playurl = json_data['data']['playurl']
videourl = json_data['data']['playurl_video']
real_url = playurl if playurl else videourl
real_url = real_url.replace('\/', '/')
ksong_mid = json_data['data']['ksong_mid']
lyric_url = 'http://cgi.kg.qq.com/fcgi-bin/fcg_lyric?jsonpCallback=jsopgetlrcdata&outCharset=utf-8&ksongmid=' + ksong_mid
lyric_data = get_content(lyric_url)
lyric_string = lyric_data[len('jsopgetlrcdata('):-1]
lyric_json = json.loads(lyric_string)
lyric = lyric_json['data']['lyric']
title = match1(lyric, r'\[ti:([^\]]*)\]')
type, ext, size = url_info(real_url)
if not title:
title = shareid
print_info('腾讯全民K歌', title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge=False)
if caption:
caption_filename = title + '.lrc'
caption_path = output_dir + '/' + caption_filename
with open(caption_path, 'w') as f:
lrc_list = lyric.split('\r\n')
for line in lrc_list:
f.write(line)
f.write('\n')
def qq_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
""""""
if 'kg.qq.com' in url or 'kg2.qq.com' in url:
shareid = url.split('?s=')[-1]
caption = kwargs['caption']
kg_qq_download_by_shareid(shareid, output_dir=output_dir, info_only=info_only, caption=caption)
return
if 'live.qq.com' in url:
qieDownload(url, output_dir=output_dir, merge=merge, info_only=info_only)
return
if 'mp.weixin.qq.com/s?' in url:
content = get_html(url)
vids = matchall(content, [r'\bvid=(\w+)'])
for vid in vids:
qq_download_by_vid(vid, vid, output_dir, merge, info_only)
return
#do redirect
if 'v.qq.com/page' in url:
# for URLs like this:
# http://v.qq.com/page/k/9/7/k0194pwgw97.html
content = get_html(url)
url = match1(content,r'window\.location\.href="(.*?)"')
if 'kuaibao.qq.com' in url or re.match(r'http://daxue.qq.com/content/content/id/\d+', url):
content = get_html(url)
vid = match1(content, r'vid\s*=\s*"\s*([^"]+)"')
title = match1(content, r'title">([^"]+)</p>')
title = title.strip() if title else vid
elif 'iframe/player.html' in url:
vid = match1(url, r'\bvid=(\w+)')
# for embedded URLs; don't know what the title is
title = vid
else:
content = get_html(url)
vid = parse_qs(urlparse(url).query).get('vid') #for links specified vid like http://v.qq.com/cover/p/ps6mnfqyrfo7es3.html?vid=q0181hpdvo5
vid = vid[0] if vid else match1(content, r'vid"*\s*:\s*"\s*([^"]+)"') #general fallback
title = match1(content,r'<a.*?id\s*=\s*"%s".*?title\s*=\s*"(.+?)".*?>'%vid)
title = match1(content, r'title">([^"]+)</p>') if not title else title
title = match1(content, r'"title":"([^"]+)"') if not title else title
title = vid if not title else title #general fallback
qq_download_by_vid(vid, title, output_dir, merge, info_only)
site_info = "QQ.com"
download = qq_download
download_playlist = playlist_not_supported('qq')
| mit | -2,776,723,440,680,657,000 | 42.41358 | 153 | 0.55723 | false |
HerculesShek/hercules.python.pro | HZ_BUS/forecast/createAlphaTable2.py | 1 | 2762 | # -*- coding: utf-8 -*-
import MySQLdb, time
conn=MySQLdb.connect(host="localhost",user="root",passwd="xrt512",db="hz_bus", port=3306)
cur = conn.cursor();
#创建alphaTable2
cur.execute(u'''
create table alphaTable2 as
select *
from
(
select lineid,linedir,stopInfo,AVG(internalTime) as averageInternalTime,internalIndex,timeIndex,dayType,quarter,100.00/100 as cfftOfVar
from alphaTable1
group by lineid,linedir,stopInfo,internalIndex,timeIndex,dayType,quarter
) as t''')
conn.commit()
time.sleep(2)
#计算alphaTable1中的平均值并更新alphaTable2中的averageInternalTime 这个阶段的主要工作是过滤
cur.execute(u'''select distinct lineid, linedir, internalIndex,timeIndex,dayType, quarter from alphaTable1''')
groups = cur.fetchall()
groups = list(groups)
sql = '''SELECT lineid,linedir,tid,runIndex,fromStop,toStop,stopInfo,internalTime,internalIndex
,timeIndex,dayValue,dayType,quarter FROM hz_bus.alphaTable1'''
cur.execute(sql)
resList = cur.fetchall()
resList = list(resList)
'''
lineid, 0
linedir, 1
internalIndex, 2
timeIndex, 3
dayType, 4
quarter 5
'''
for g in groups:
t=sorted([resList.pop(resList.index(elem)) for elem in resList \
if elem[0]==g[0] and elem[1]==g[1] and elem[8]==g[2] and elem[9]==g[3] and \
elem[11]==g[4] and elem[12]==g[5]],key=lambda e:e[7])
t=t[int(round(len(t)*0.03)):int(round(len(t)*0.97))] #去掉前 3% 和后 3% 的极端数据
g=list(g)
g.insert(0,sum([e[7] for e in t])/len(t))
params=tuple(g)
sql=u'''update hz_bus.alphaTable2 set averageInternalTime = %d
where lineid=%d and linedir=%d and internalIndex=%d and
timeIndex=%d and dayType=%d and quarter=%d''' % params
cur.execute(sql)
conn.commit()
#计算变化系数
cur.execute(u'''
UPDATE hz_bus.alphaTable2 AS t1,
(
SELECT b.lineid,b.linedir,b.stopInfo,b.internalIndex,
b.timeIndex,b.dayType,b.quarter,b.averageInternalTime*1.00/a.averageInternalTime AS factor
FROM hz_bus.alphaTable2 a,hz_bus.alphaTable2 b
WHERE a.lineid = b.lineid AND
a.linedir = b.linedir AND
a.stopInfo = b.stopInfo AND
a.internalIndex = b.internalIndex AND
a.timeIndex = b.timeIndex-1 AND
a.dayType = b.dayType AND
a.quarter = b.quarter
) AS t2
SET t1.cfftOfVar = t2.factor
WHERE t1.lineid = t2.lineid AND
t1.linedir = t2.linedir AND
t1.stopInfo = t2.stopInfo AND
t1.internalIndex = t2.internalIndex AND
t1.timeIndex = t2.timeIndex AND
t1.dayType = t2.dayType AND
t1.quarter = t2.quarter
''')
conn.commit()
conn.close()
| mit | 4,831,342,640,800,055,000 | 29.880952 | 135 | 0.650859 | false |
lvmgeo/GISPython | GISPython/AGServerHelperNTLM.py | 1 | 22192 | # -*- coding: utf-8 -*-
"""
Module for operations with ArcGIS Server services
"""
import urllib
import json
import urllib2
import ssl
import urlparse
from collections import OrderedDict
from ntlm import HTTPNtlmAuthHandler
import MyError
class AGServerHelperNTLM(object):
"""Class for operations with ArcGIS Server services"""
def __init__(self,
username,
password,
ags_admin_url,
tool=None,
basic=False,
allowunverifiedssl=False):
"""Class initialization procedure
Args:
self: The reserved object 'self'
username: ArcGIS Server administrator username
password: ArcGIS Server administrator password
ags_admin_url: ArcGIS server rest admin url
Tool: GISPython tool (optional)
basic: bool indicating that Basic autentification will be used instead of NTLM
"""
self.username = username
self.password = password
self.ags_admin_url = ags_admin_url
self.serverurl = ags_admin_url
if self.ags_admin_url.endswith("/"):
self.ags_admin_url = self.ags_admin_url[:-1]
self.Tool = tool
self.ags_admin_url = urlparse.urljoin(self.ags_admin_url, '/arcgis/admin')
if allowunverifiedssl:
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.ags_admin_url, self.username, self.password)
if not basic:
auth_handler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(passman)
else:
auth_handler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
def __request_from_server(self, adress, params, content_type='application/json', method="POST"):
"""Function for ntlm request creation
Args:
self: The reserved object 'self'
adress: Adress of request
params: Params as dictionary
content_type: Http content type
method: Http method
Returns:
Response string
"""
data = urllib.urlencode(params)
url_address = urlparse.urljoin(self.ags_admin_url + "/", adress)
req = urllib2.Request(url=url_address, data=data)
req.add_header('Content-Type', content_type)
req.get_method = lambda: method
response = urllib2.urlopen(req)
response_string = response.read()
if not response.code == 200:
raise MyError.MyError("Error: in getting url: {0}?{1} {2} message: {3}".format(
url_address, data, method, response.msg))
return response_string
def __assert_json_success(self, data):
"""Function for aserting json request state
Args:
self: The reserved object 'self'
data: Request response string
Returns:
boolean False if request has errors
"""
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
raise MyError.MyError("Error: JSON object returns an error. " + str(obj))
else:
return True
def __process_folder_string(self, folder):
"""Function for processing folder name string
Args:
self: The reserved object 'self'
folder: folder string
Returns:
corrected folder string
"""
if folder is None:
folder = "ROOT"
if folder.upper() == "ROOT":
folder = ""
else:
folder += "/"
return folder
def startService(self, folder, service):
"""Starts AGS Service
Args:
folder (string): AGS folder of the service. (CASE sensitive) Use ROOT for services without folder.
service (string): Service name (CASE sensitive)
"""
self.__start_stop_service(folder, service, "start")
def stopService(self, folder, service):
"""Stops AGS Service
Args:
folder (string): AGS folder of the service. (CASE sensitive) Use ROOT for services without folder.
service (string): Service name (CASE sensitive)
"""
self.__start_stop_service(folder, service, "stop")
def __start_stop_service(self, folder, service, action):
folder = self.__process_folder_string(folder)
folder_url = "services/" + folder + service + "/" + action + '?f=json'
params = {}
data = self.__request_from_server(folder_url, params)
if not self.__assert_json_success(data):
raise MyError.MyError("Error when reading folder information. " + str(data))
else:
if self.Tool != None:
self.Tool.AddMessage("Service {}{} {} done successfully ...".format(
folder, service, action))
def getServiceList(self, folder, return_running_state=True):
"""Retrieve ArcGIS server services
Args:
self: The reserved object 'self'
folder: Folder of the service (ROOT for root services)
"""
services = []
folder = self.__process_folder_string(folder)
url = "services/{}?f=json".format(folder)
params = {}
data = self.__request_from_server(url, params)
try:
service_list = json.loads(data)
except urllib2.URLError, exception:
raise MyError.MyError(exception)
for single in service_list["services"]:
services.append(folder + single['serviceName'] + '.' + single['type'])
folder_list = service_list["folders"] if u'folders' in service_list != False else []
if u'Utilities' in service_list != False:
folder_list.remove("Utilities")
if u'System' in service_list != False:
folder_list.remove("System")
if folder_list:
for subfolder in folder_list:
url = "services/{}?f=json".format(subfolder)
data = self.__request_from_server(url, params)
subfolder_list = json.loads(data)
for single in subfolder_list["services"]:
services.append(subfolder + "//" + single['serviceName'] + '.' + single['type'])
if not services:
if self.Tool != None:
self.Tool.AddMessage("No services found")
else:
if self.Tool != None:
self.Tool.AddMessage("Services on " + self.serverurl +":")
for service in services:
if return_running_state:
status_url = "services/{}/status?f=json".format(service)
data = self.__request_from_server(status_url, params)
status = json.loads(data)
if self.Tool != None:
self.Tool.AddMessage(" " + status["realTimeState"] + " > " + service)
else:
if self.Tool != None:
self.Tool.AddMessage(" > " + service)
return services
def GetServerJson(self, server_service):
"""Retrieve service parameters
Args:
self: The reserved object 'self'
serverService: Service which parameter configuration shall be retrieved
Returns:
json data object
"""
service_url = "services/" + server_service + "?f=json"
params = {}
data = self.__request_from_server(service_url, params, method='GET')
if not self.__assert_json_success(data):
raise MyError.MyError(
u'...Couldn\'t retrieve service parameter configuration: {}\n'.format(data))
else:
if self.Tool != None:
self.Tool.AddMessage(u'...Service parameter configuration successfully retrieved\n')
data_object = json.loads(data)
return data_object
def publishServerJson(self, service, data_object):
"""Publish service parameters to server
Args:
self: The reserved object 'self'
service: Service which parameter configuration shall be renewed
data_object: Parameter configuration
"""
updated_svc_json = json.dumps(data_object)
edit_svc_url = "services/" + service + "/edit"
params = {'f': 'json', 'service': updated_svc_json}
edit_data = self.__request_from_server(edit_svc_url,
params,
'application/x-www-form-urlencoded',
method='POST')
if not self.__assert_json_success(edit_data):
if self.Tool != None:
self.Tool.AddMessage(
u'...Service configuration renewal error: {}\n'.format(edit_data))
else:
if self.Tool != None:
self.Tool.AddMessage(u'...Service configuration succesfully renewed\n')
return
def getServiceFromServer(self, services, service, serviceDir):
"""Retrieve the full service name from the server
Args:
self: The reserved object 'self'
services: List of all services on server
service: Name of the service from which to get corresponding name from the server services list
serviceDir: Name of the service directory which is shown in the configuration of services to be published on the server
"""
server_service = ''
if serviceDir is None:
config_service = service
else:
config_service = serviceDir + "/" + service
for server_service in services:
if server_service.split('.')[0].upper() == config_service.upper():
return server_service
return ''
def getServicePermisions(self, folder, service):
"""Check service permisions
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
Returns: Dictionary of service principals
"""
folder = self.__process_folder_string(folder)
status_url = "services/" + folder + service + "/permissions?f=pjson"
params = {}
permisions_data = self.__request_from_server(status_url, params, method='GET')
if not self.__assert_json_success(permisions_data):
raise MyError.MyError(
"Error while retrieving permisions information for {}.".format(service))
statusdata_object = json.loads(permisions_data)
return_dict = OrderedDict()
for permision in statusdata_object["permissions"]:
principal = permision["principal"]
if permision.has_key("permission"):
if permision["permission"].has_key("isAllowed"):
return_dict[principal] = permision["permission"]["isAllowed"]
else:
return_dict[principal] = True
return return_dict
def addServicePermisions(self, folder, service, principal, is_allowed='true'):
"""Add service permisions
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
principal: The name of the role for whom the permission is being assigned.
is_allowed: Tells if access to a resource is allowed or denied.
"""
urlparams = urllib.urlencode({'principal':principal, 'isAllowed':is_allowed, 'f':'json'})
folder = self.__process_folder_string(folder)
status_url = "services/" + folder + service + "/permissions/add?{}".format(urlparams)
params = {}
permisions_data = self.__request_from_server(status_url, params, method='POST')
if not self.__assert_json_success(permisions_data):
raise MyError.MyError(
"Error while setting permisions information for {} to {}.".format(
service, principal))
def isServiceRunning(self, folder, service):
"""Check if service is running
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
Returns: True if is running
"""
folder = self.__process_folder_string(folder)
status_url = "services/" + folder + service + "/status?f=json"
params = {}
status_data = self.__request_from_server(status_url, params, method='GET')
if not self.__assert_json_success(status_data):
raise MyError.MyError(
"Error while retrieving status information for {}.".format(service))
statusdata_object = json.loads(status_data)
if statusdata_object['realTimeState'] == "STOPPED":
return False
else:
return True
def GetDatasetNames(self, folder, service):
"""Retrieve the service Dataset Names from the server
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
Returns: list of strings
"""
folder = self.__process_folder_string(folder)
manifest_url = "services/" + folder + service + "/iteminfo/manifest/manifest.json?f=json"
params = {}
status_data = self.__request_from_server(manifest_url, params, method='GET')
rezult = list()
if not self.__assert_json_success(status_data):
raise MyError.MyError(
"Error while retrieving manifest information for {}.".format(service))
statusdata_object = json.loads(status_data)
for database in statusdata_object['databases']:
rezult.append(database['onServerName'])
return rezult
def getRoles(self, pageSize=5000):
"""Retrieve the Role Names from the server
Args:
self: The reserved object 'self'
Returns: list of strings
"""
manifest_url = "security/roles/getRoles?startIndex=0&pageSize={}&f=json".format(pageSize)
params = {}
roles_data = self.__request_from_server(manifest_url, params, method='POST')
rezult = list()
if not self.__assert_json_success(roles_data):
raise MyError.MyError("Error while retrieving role information.")
roles = json.loads(roles_data)
for role in roles['roles']:
if role.has_key("description"):
rezult.append({"rolename": role["rolename"],
"description": role["description"]})
else:
rezult.append({"rolename": role["rolename"]})
return rezult
def addRole(self, rolename, description=''):
"""Retrieve the Role Names from the server
Args:
self: The reserved object 'self'
rolename: The name of the role. The name must be unique in the role store.
"""
urlparams = urllib.urlencode({'rolename':rolename, 'description':description, 'f':'json'})
manifest_url = "security/roles/add?{}".format(urlparams)
params = {}
roles_data = self.__request_from_server(manifest_url, params, method='POST')
if not self.__assert_json_success(roles_data):
raise MyError.MyError("Error while adding role {}".format(rolename))
def removeRole(self, rolename):
"""Retrieve the Role Names from the server
Args:
self: The reserved object 'self'
rolename: The name of the role.
"""
urlparams = urllib.urlencode({'rolename':rolename, 'f':'json'})
manifest_url = "security/roles/remove?{}".format(urlparams)
params = {}
roles_data = self.__request_from_server(manifest_url, params, method='POST')
if not self.__assert_json_success(roles_data):
raise MyError.MyError("Error while removing role {}".format(rolename))
def getUsersWithinRole(self, rolename, maxCount=5000):
"""Retrieve the Role Names from the server
Args:
self: The reserved object 'self'
Returns: list of strings
"""
urlparams = urllib.urlencode({'rolename':rolename, 'maxCount':maxCount, 'f':'json'})
manifest_url = "security/roles/getUsersWithinRole?{}".format(urlparams)
params = {}
users_data = self.__request_from_server(manifest_url, params, method='POST')
rezult = list()
if not self.__assert_json_success(users_data):
raise MyError.MyError("Error while retrieving role user information.")
users = json.loads(users_data)
for user in users['users']:
rezult.append(user)
return rezult
def addUsersToRole(self, rolename, users):
"""assign a role to multiple users with a single action
Args:
self: The reserved object 'self'
rolename: The name of the role.
users: A comma-separated list of user names. Each user name must exist in the user store.
"""
urlparams = urllib.urlencode({'rolename':rolename, 'users':users, 'f':'json'})
manifest_url = "security/roles/addUsersToRole?{}".format(urlparams)
params = {}
roles_data = self.__request_from_server(manifest_url, params, method='POST')
if not self.__assert_json_success(roles_data):
raise MyError.MyError(
"Error while adding users [{1}] to a role {0}".format(rolename, users))
def removeUsersFromRole(self, rolename, users):
"""Removes a role assignment from multiple users
Args:
self: The reserved object 'self'
rolename: The name of the role.
users: A comma-separated list of user names. Each user name must exist in the user store.
"""
urlparams = urllib.urlencode({'rolename':rolename, 'users':users, 'f':'json'})
manifest_url = "security/roles/removeUsersFromRole?{}".format(urlparams)
params = {}
roles_data = self.__request_from_server(manifest_url, params, method='POST')
if not self.__assert_json_success(roles_data):
raise MyError.MyError(
"Error while removing users [{1}] from a role {0}".format(rolename, users))
def GetDatasetNamesWithObjects(self, folder, service):
"""Retrieve the service Dataset Names from the server
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
Returns: list of strings
"""
folder = self.__process_folder_string(folder)
manifest_url = "services/" + folder + service + "/iteminfo/manifest/manifest.json?f=json"
params = {}
status_data = self.__request_from_server(manifest_url, params, method='GET')
rezult = list()
if not self.__assert_json_success(status_data):
raise MyError.MyError("Error while retrieving manifest information for " + service + ".")
statusdata_object = json.loads(status_data)
for database in statusdata_object['databases']:
dataset_names = [d['onServerName'] for d in database['datasets']]
item = {"database": database['onServerName'], "datasets":dataset_names}
rezult.append(item)
return rezult
def GetRightsGroupsNames(self, folder, service):
"""Retrieve the service permission role names from service
Args:
self: The reserved object 'self'
folder: Service directory
service: Name of a service
Returns: list of strings
"""
folder = self.__process_folder_string(folder)
manifest_url = "services/" + folder + service + "/permissions?f=json"
params = {}
status_data = self.__request_from_server(manifest_url, params, method='GET')
rezult = list()
if not self.__assert_json_success(status_data):
raise MyError.MyError(
"Error while retrieving permissions information for {}.".format(service))
permissions = json.loads(status_data)
for permission in permissions['permissions']:
rezult.append(permission['principal'])
return rezult
def GetServiceInfo(self, folder):
"""Retrieve the Folder List from the server
Args:
self: The reserved object 'self'
folder: Service directory
Returns: list of service objects
"""
folder = self.__process_folder_string(folder)
manifest_url = "services/" + folder + "/?f=json"
params = {}
status_data = self.__request_from_server(manifest_url, params, method='GET')
rezult = list()
if not self.__assert_json_success(status_data):
raise MyError.MyError("Error while retrieving folder information.")
statusdata_object = json.loads(status_data)
rezult.append(statusdata_object)
folderlist = list()
for folder_detail in statusdata_object['foldersDetail']:
folderlist.append(folder_detail['folderName'])
for subfolder in folderlist:
if not (subfolder.upper() == 'System'.upper()
or subfolder.upper() == 'Utilities'.upper()):
manifest_url = "services/" + subfolder + "/?f=json"
status_data = self.__request_from_server(manifest_url, params, method='GET')
statusdata_object = json.loads(status_data)
rezult.append(statusdata_object)
return rezult
| gpl-3.0 | 1,872,061,421,466,058,500 | 37 | 131 | 0.586698 | false |
a-lost-shadow/shadowcon | convention/utils.py | 1 | 1782 | from django.utils import timezone
from .models import ConInfo, Registration, BlockRegistration, TimeBlock, get_choice, Game
def friendly_username(user):
name = user.first_name + " " + user.last_name
name = name.strip()
if "" == name:
name = user.username
return name
def get_con_value(parameter):
con_objects = ConInfo.objects.all()
if len(con_objects) == 0:
raise ValueError("No con object found")
elif len(con_objects) > 1:
raise ValueError("Multiple con objects found")
info = con_objects[0]
return getattr(info, parameter)
def is_registration_open():
open_date = get_con_value("registration_opens")
return open_date <= timezone.now()
def is_pre_reg_open(user):
if user and user.id is not None:
return len(Game.objects.filter(user=user)) > 0
else:
return False
def get_registration(user):
registration = []
registration_object = Registration.objects.filter(user=user)
if registration_object:
item_dict = {}
for item in BlockRegistration.objects.filter(registration=registration_object):
item_dict[item.time_block] = item
if item.attendance != BlockRegistration.ATTENDANCE_NO:
registration.append("%s: %s" % (item.time_block, get_choice(item.attendance,
BlockRegistration.ATTENDANCE_CHOICES)))
for time_block in TimeBlock.objects.exclude(text__startswith='Not').order_by('sort_id'):
if time_block.text not in item_dict:
registration.append("<b>Partially Registered: Please re-register</b>")
break
else:
registration.append("Not Registered")
return registration
| gpl-3.0 | -3,381,440,770,113,010,000 | 30.821429 | 115 | 0.627946 | false |
jbalogh/zamboni | apps/discovery/cron.py | 1 | 2467 | import httplib
import os
import shutil
import time
import urllib2
from tempfile import NamedTemporaryFile
from django.conf import settings
import commonware.log
from pyquery import PyQuery as pq
import cronjobs
from .models import BlogCacheRyf
log = commonware.log.getLogger('z.cron')
RYF_IMAGE_PATH = os.path.join(settings.NETAPP_STORAGE, 'ryf')
@cronjobs.register
def fetch_ryf_blog():
"""Currently used in the discovery pane from the API. This job queries
rockyourfirefox.com and pulls the latest entry from the RSS feed. """
url = "http://rockyourfirefox.com/feed/"
try:
p = pq(url=url)
except (urllib2.URLError, httplib.HTTPException), e:
log.error("Couldn't open (%s): %s" % (url, e))
return
item = p('item:first')
# There should only be one row in this table, ever.
try:
page = BlogCacheRyf.objects.all()[0]
except IndexError:
page = BlogCacheRyf()
page.title = item('title').text()
page.excerpt = item('description').text()
page.permalink = item('link').text()
rfc_2822_format = "%a, %d %b %Y %H:%M:%S +0000"
t = time.strptime(item('pubDate').text(), rfc_2822_format)
page.date_posted = time.strftime("%Y-%m-%d %H:%M:%S", t)
# Another request because we have to get the image URL from the page. :-/
# An update to the feed has include <content:encoded>, but we'd have to use
# etree for that and I don't want to redo it right now.
try:
p = pq(url=page.permalink)
except urllib2.URLError, e:
log.error("Couldn't open (%s): %s" % (url, e))
return
# We want the first image in the post
image = p('.entry-content').find('img:first').attr('src')
if image:
offset = image.find('/uploads')
if not image or offset == -1:
log.error("Couldn't find a featured image for blog post (%s). "
"Fligtar said this would never happen." % page.permalink)
return
try:
img = urllib2.urlopen(image)
except urllib2.HTTPError, e:
log.error("Error fetching ryf image: %s" % e)
return
img_tmp = NamedTemporaryFile(delete=False)
img_tmp.write(img.read())
img_tmp.close()
image_basename = os.path.basename(image)
if not os.path.exists(RYF_IMAGE_PATH):
os.makedirs(RYF_IMAGE_PATH)
shutil.move(img_tmp.name, os.path.join(RYF_IMAGE_PATH, image_basename))
page.image = image_basename
page.save()
| bsd-3-clause | 4,913,052,559,919,793,000 | 27.686047 | 79 | 0.639643 | false |
xaxa89/mitmproxy | mitmproxy/contrib/wbxml/ASWBXMLCodePage.py | 1 | 1770 | #!/usr/bin/env python
'''
@author: David Shaw, [email protected]
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASWBXMLCodePage.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class ASWBXMLCodePage:
def __init__(self):
self.namespace = ""
self.xmlns = ""
self.tokenLookup = {}
self.tagLookup = {}
def addToken(self, token, tag):
self.tokenLookup[token] = tag
self.tagLookup[tag] = token
def getToken(self, tag):
if tag in self.tagLookup:
return self.tagLookup[tag]
return 0xFF
def getTag(self, token):
if token in self.tokenLookup:
return self.tokenLookup[token]
return None
def __repr__(self):
return str(self.tokenLookup)
| mit | 4,691,102,491,026,070,000 | 33.038462 | 77 | 0.757627 | false |
guillaume-philippon/aquilon | tests/broker/test_update_alias.py | 1 | 13569 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update archetype command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
from eventstest import EventsTestMixin
class TestUpdateAlias(EventsTestMixin, TestBrokerCommand):
def test_100_update(self):
command = ["update", "alias",
"--fqdn", "alias2host.aqd-unittest.ms.com",
"--target", "arecord14.aqd-unittest.ms.com"]
self.noouttest(command)
def test_110_update_mscom(self):
self.event_upd_dns('alias.ms.com')
command = ["update", "alias", "--fqdn", "alias.ms.com",
"--target", "arecord14.aqd-unittest.ms.com",
"--comments", "New alias comments"]
self.dsdb_expect("update_host_alias "
"-alias alias.ms.com "
"-new_host arecord14.aqd-unittest.ms.com "
"-new_comments New alias comments")
self.noouttest(command)
self.dsdb_verify()
def test_200_missing_target(self):
command = ["update", "alias",
"--fqdn", "alias2host.aqd-unittest.ms.com",
"--target", "no-such-name.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Target FQDN no-such-name.aqd-unittest.ms.com "
"does not exist in DNS environment internal.",
command)
def test_210_not_an_alias(self):
command = ["update", "alias",
"--fqdn", "arecord13.aqd-unittest.ms.com",
"--target", "arecord14.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Alias arecord13.aqd-unittest.ms.com not found.",
command)
def test_300_verify_alias(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2host.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Target: arecord14.aqd-unittest.ms.com", command)
def test_310_verify_mscom(self):
command = ["search", "dns", "--fullinfo", "--fqdn", "alias.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Target: arecord14.aqd-unittest.ms.com", command)
self.matchoutput(out, "Comments: New alias comments", command)
def test_320_verify_oldtarget(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord13.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "alias2host.aqd-unittest.ms.com", command)
self.matchclean(out, "alias.ms.com", command)
def test_330_verify_newtarget(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord14.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Aliases: alias.ms.com, "
"alias2alias.aqd-unittest.ms.com, "
"alias2host.aqd-unittest.ms.com", command)
def test_400_repoint_restrict1(self):
command = ["update", "alias", "--fqdn", "restrict1.aqd-unittest.ms.com",
"--target", "target2.restrict.aqd-unittest.ms.com"]
out = self.statustest(command)
self.matchoutput(out,
"WARNING: Will create a reference to "
"target2.restrict.aqd-unittest.ms.com, but ",
command)
def test_410_verify_target(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "target.restrict.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out, r'Aliases: restrict2.aqd-unittest.ms.com$',
command)
def test_410_verify_target2(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "target2.restrict.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.searchoutput(out, r'Aliases: restrict1.aqd-unittest.ms.com$',
command)
def test_420_repoint_restrict2(self):
command = ["update", "alias", "--fqdn", "restrict2.aqd-unittest.ms.com",
"--target", "target2.restrict.aqd-unittest.ms.com"]
self.noouttest(command)
def test_430_verify_target_gone(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "target.restrict.aqd-unittest.ms.com"]
self.notfoundtest(command)
def test_430_verify_target2(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "target2.restrict.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Aliases: restrict1.aqd-unittest.ms.com, "
"restrict2.aqd-unittest.ms.com",
command)
def test_500_repoint2diff_environment(self):
command = ["update", "alias",
"--fqdn", "alias2host.aqd-unittest-ut-env.ms.com",
"--dns_environment", "ut-env",
"--target", "alias13.aqd-unittest.ms.com",
"--target_environment", "ut-env"]
self.noouttest(command)
def test_505_verify_alias_repoint2diff_environment(self):
command = ["show", "alias",
"--fqdn", "alias2host.aqd-unittest-ut-env.ms.com",
"--dns_environment", "ut-env"]
out = self.commandtest(command)
self.matchoutput(out, "Alias: alias2host.aqd-unittest-ut-env.ms.com", command)
self.matchoutput(out, "Target: alias13.aqd-unittest.ms.com", command)
self.matchoutput(out, "DNS Environment: ut-env", command)
def test_600_update_ttl(self):
command = ["update", "alias",
"--fqdn", "alias2alias.aqd-unittest.ms.com",
"--ttl", 120]
self.noouttest(command)
def test_620_verify_update_ttl(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2alias.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Alias: alias2alias.aqd-unittest.ms.com", command)
self.matchoutput(out, "TTL: 120", command)
def test_700_remove_ttl(self):
command = ["update", "alias",
"--fqdn", "alias2alias.aqd-unittest.ms.com",
"--clear_ttl"]
self.noouttest(command)
def test_720_verify_remove_ttl(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2alias.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "TTL", command)
def test_800_update_grn(self):
command = ["update", "alias",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/unittest"]
self.noouttest(command)
def test_805_verify_update_grn(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest",
command)
def test_810_update_eon_id(self):
command = ["update", "alias",
"--fqdn", "alias3alias.aqd-unittest.ms.com",
"--eon_id", "2"]
self.noouttest(command)
def test_815_verify_update_eon_id(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias3alias.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd",
command)
def test_816_grn_conflict_with_multi_level_alias(self):
command = ["update", "alias",
"--fqdn", "alias2alias.aqd-unittest.ms.com",
"--target", "unittest00.one-nyp.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2alias.aqd-unittest.ms.com "
"is assoicated with GRN grn:/ms/ei/aquilon/aqd. "
"It conflicts with target "
"DNS Record unittest00.one-nyp.ms.com: "
"DNS Record unittest00.one-nyp.ms.com is a "
"primary name. GRN should not be set but derived "
"from the device.",
command)
def test_820_clear_grn(self):
command = ["update", "alias",
"--fqdn", "alias3alias.aqd-unittest.ms.com",
"--clear_grn"]
self.noouttest(command)
def test_825_verify_clear_grn(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "alias3alias.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Owned by GRN:", command)
def test_830_update_grn_conflict(self):
command = ["add", "alias",
"--fqdn", "temp-alias.aqd-unittest.ms.com",
"--target", "unittest00.one-nyp.ms.com"]
self.noouttest(command)
command = ["update", "alias",
"--fqdn", "temp-alias.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/aqd"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias temp-alias.aqd-unittest.ms.com depends on "
"DNS Record unittest00.one-nyp.ms.com. "
"It conflicts with GRN grn:/ms/ei/aquilon/aqd: "
"DNS Record unittest00.one-nyp.ms.com is a "
"primary name. GRN should not be set but derived "
"from the device.",
command)
command = ["del", "alias",
"--fqdn", "temp-alias.aqd-unittest.ms.com"]
self.noouttest(command)
def test_835_grn_conflict_with_primary_name(self):
command = ["update", "alias",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com",
"--target", "unittest00.one-nyp.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-grn.aqd-unittest.ms.com "
"is assoicated with GRN grn:/ms/ei/aquilon/unittest. "
"It conflicts with target "
"DNS Record unittest00.one-nyp.ms.com: "
"DNS Record unittest00.one-nyp.ms.com is a "
"primary name. GRN should not be set but derived "
"from the device.",
command)
def test_840_grn_conflict_with_service_address(self):
command = ["update", "alias",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com",
"--target", "zebra2.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-grn.aqd-unittest.ms.com "
"is assoicated with GRN grn:/ms/ei/aquilon/unittest. "
"It conflicts with target "
"DNS Record zebra2.aqd-unittest.ms.com: "
"DNS Record zebra2.aqd-unittest.ms.com is a "
"service address. GRN should not be set but derived "
"from the device.",
command)
def test_850_grn_conflict_with_interface_name(self):
command = ["update", "alias",
"--fqdn", "alias2host-grn.aqd-unittest.ms.com",
"--target", "unittest20-e1.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Alias alias2host-grn.aqd-unittest.ms.com "
"is assoicated with GRN grn:/ms/ei/aquilon/unittest. "
"It conflicts with target "
"DNS Record unittest20-e1.aqd-unittest.ms.com: "
"DNS Record unittest20-e1.aqd-unittest.ms.com is "
"already be used by the interfaces "
"unittest20.aqd-unittest.ms.com/eth1. "
"GRN should not be set but derived from the device.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateAlias)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | -5,472,717,046,711,124,000 | 43.343137 | 86 | 0.541307 | false |
decatur/ansatz27 | build.py | 1 | 1335 | # Builds the README.md
# Looks for areas between two markers of the form
# [//]: # "filename(#hash)?"
# and replaces the text of those areas by the referenced text.
import codecs
import logging
import re
logging.basicConfig(level=20) # info
f = codecs.open("README.md", "r", "utf-8")
readme = f.read()
f.close()
def mergeText( readme, marker, text, mime ):
beginPos = readme.find(marker)
if beginPos == -1:
logging.error("Invalid marker %s" % marker)
return readme
endPos = readme.find('[//]: #', beginPos+1)
if mime == 'm': mime = 'MATLAB'
elif mime == 'json': mime = 'JSON'
else: mime = ''
readme = readme[1:beginPos] + marker + '\n```' + mime + '\n' + text + '\n```\n' + readme[endPos:]
return readme
def process( readme, marker ):
logging.info("Processing %s" % marker)
# marker is of the form
# [//]: # "filename"
m = re.match('.*"(.*)"', marker)
filename = m.group(1)
mime = filename[filename.find('.')+1:]
f = codecs.open('test/' + filename, "r", "utf-8")
text = f.read()
f.close()
return mergeText( readme, marker, text, mime )
markers = re.findall('\[//\]: # ".*"', readme)
# print(markers)
for marker in markers:
readme = process(readme, marker)
f = codecs.open("README.md", "w", "utf-8")
f.write(readme)
f.close() | mit | 8,769,968,715,114,151,000 | 23.740741 | 101 | 0.59176 | false |
rootfoo/libctf | libctf/process.py | 1 | 1037 | from subprocess import Popen, PIPE
import fcntl
import os
import time
import signal
class Process(object):
"""
open a process with non-blocking stdin
unlike commuinicate() will allow interactive communication without
waiting for the process to exit
"""
def __init__(self, exe):
self.exe = exe
self.process = Popen(exe, stdout=PIPE, stdin=PIPE, stderr=PIPE)
self._set_non_blocking(self.process.stdout)
def _set_non_blocking(self, handle):
fd = handle.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def read(self):
try:
return self.process.stdout.read()
except:
return ""
def write(self, data):
self.process.stdin.write(data)
def run(self, sleep=.5):
r = self.process.poll()
time.sleep(sleep)
if self.process.returncode == -signal.SIGSEGV:
print "SIGSEGV"
else:
print self.process.returncode
return r
def attach_gdb(self):
raw_input("attach gdb now: \n $ gdb -p {pid}\n (gdb) attach {pid}\n".format(pid=self.process.pid))
| mit | -3,288,675,890,906,295,300 | 21.06383 | 102 | 0.693346 | false |
diffeo/rejester | rejester/tests/test_queue.py | 1 | 7584 | """Tests for rejester._queue
This software is released under an MIT/X11 open source license.
Copyright 2014 Diffeo, Inc.
"""
from __future__ import absolute_import
import logging
import os
import time
import pytest
from rejester.exceptions import ItemInUseError, LostLease
logger = logging.getLogger(__name__)
pytest_plugins = 'rejester.tests.fixtures'
def all_of_queue(queue):
while True:
item = queue.check_out_item(300)
if item is None: return
queue.return_item(item, None)
yield item
def test_worker_id(rejester_queue):
"""simple worker_id test"""
# since we start with a new namespace every time, the worker IDs
# should start fresh too
worker_id = rejester_queue.worker_id
assert worker_id == 1
def test_one(rejester_queue):
"""Add 'a' with priority 1, and get it back"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
rejester_queue.return_item(item, None)
item = rejester_queue.check_out_item(300)
assert item is None
def test_one_helper(rejester_queue):
"""Add 'a' with priority 1, and get it back"""
rejester_queue.add_item('a', 1)
assert list(all_of_queue(rejester_queue)) == ['a']
def test_interesting_priorities(rejester_queue):
"""Zero, one, many"""
rejester_queue.add_item('a', 1)
rejester_queue.add_item('b', 1048576)
rejester_queue.add_item('c', -65536)
rejester_queue.add_item('d', 0)
rejester_queue.add_item('e', 0.5)
rejester_queue.add_item('f', -0.1)
rejester_queue.add_item('g', 0.2)
assert list(all_of_queue(rejester_queue)) == ['b', 'a', 'e', 'g', 'd', 'f', 'c']
def test_add_checked_out(rejester_queue):
"""Adding a checked-out item should fail"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
with pytest.raises(ItemInUseError):
rejester_queue.add_item('a', 1)
def test_two(rejester_queue):
"""Add two items, out of order, and get them back"""
rejester_queue.add_item('a', 2)
rejester_queue.add_item('b', 1)
assert list(all_of_queue(rejester_queue)) == ['a', 'b']
rejester_queue.add_item('a', 1)
rejester_queue.add_item('b', 2)
assert list(all_of_queue(rejester_queue)) == ['b', 'a']
def test_reprioritize(rejester_queue):
"""Use add_item to change the priority of something"""
rejester_queue.add_item('a', 1)
rejester_queue.add_item('b', 2)
rejester_queue.add_item('b', 0)
assert list(all_of_queue(rejester_queue)) == ['a', 'b']
def test_requeue(rejester_queue):
"""Pull items out and put them back"""
rejester_queue.add_item('a', 3)
rejester_queue.add_item('b', 2)
item = rejester_queue.check_out_item(300)
assert item == 'a'
rejester_queue.return_item(item, 3)
item = rejester_queue.check_out_item(300)
assert item == 'a'
rejester_queue.return_item(item, 1)
assert list(all_of_queue(rejester_queue)) == ['b', 'a']
def test_wrong_worker(rejester_queue, rejester_queue2):
"""If one queue instance checks out an item, another can't return it"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
# NOTE: this also implicitly tests that creating a new queue
# object gets a different worker ID
with pytest.raises(LostLease):
rejester_queue2.return_item(item, None)
def test_check_out_two(rejester_queue):
"""Nothing prevents the same worker from checking out two things"""
rejester_queue.add_item('a', 2)
rejester_queue.add_item('b', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
item = rejester_queue.check_out_item(300)
assert item == 'b'
rejester_queue.return_item('a', None)
rejester_queue.return_item('b', 1)
assert list(all_of_queue(rejester_queue)) == ['b']
def test_reserve(rejester_queue, rejester_queue2):
"""Basic reservation/priority test
1. Insert a@3, b@2
2. Get, should return 'a'
3. 'a' reserves 'b'
4. Get, should be empty
5. Return a@1
6. Get, should return 'b'
"""
rejester_queue.add_item('a', 3)
rejester_queue.add_item('b', 2)
item = rejester_queue.check_out_item(300)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == ['b']
assert list(all_of_queue(rejester_queue2)) == []
rejester_queue.return_item(item, 1)
assert list(all_of_queue(rejester_queue2)) == ['b', 'a']
def test_reserve_twice(rejester_queue):
"""Reserving an item that's already reserved is a no-op"""
rejester_queue.add_item('a', 2)
rejester_queue.add_item('b', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == ['b']
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == []
def test_reserve_nonexistent(rejester_queue):
"""Reserving an item that doesn't exist is a no-op"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == []
rejester_queue.return_item(item, None)
def test_reserve_self(rejester_queue):
"""Reserving yourself is a no-op"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(300)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'a')
assert reserved == []
rejester_queue.return_item(item, None)
def test_basic_expire(rejester_queue):
"""Basic expiration test"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(1)
assert item == 'a'
time.sleep(3)
with pytest.raises(LostLease):
rejester_queue.return_item(item, None)
def test_expiration_releases_reservations(rejester_queue, rejester_queue2):
"""If a reserves b, and a expires, then b should be released"""
rejester_queue.add_item('a', 2)
rejester_queue.add_item('b', 1)
item = rejester_queue.check_out_item(1)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == ['b']
assert list(all_of_queue(rejester_queue2)) == []
time.sleep(3)
assert list(all_of_queue(rejester_queue2)) == ['a', 'b']
def test_renew(rejester_queue):
"""Basic renew test"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(2)
assert item == 'a'
time.sleep(1)
rejester_queue.renew_item(item, 3)
time.sleep(2)
# at this point we have slept(3), which is after the original
# expiration, so we should *not* get LostLease because we renewed
rejester_queue.return_item(item, None)
def test_renew_overdue(rejester_queue):
"""Can't renew something that's already expired"""
rejester_queue.add_item('a', 1)
item = rejester_queue.check_out_item(1)
assert item == 'a'
time.sleep(3)
with pytest.raises(LostLease):
rejester_queue.renew_item(item, 3)
def test_dump(rejester_queue):
"""dump_queue() shouldn't crash; doesn't validate output"""
rejester_queue.add_item('a', 1)
rejester_queue.add_item('b', 0)
item = rejester_queue.check_out_item(300)
assert item == 'a'
reserved = rejester_queue.reserve_items(item, 'b')
assert reserved == ['b']
rejester_queue.dump_queue('worker', 'available', 'priorities', 'expiration',
'workers', 'reservations_a', 'reservations_b',
'foo')
| mit | -3,833,877,877,336,585,700 | 33.162162 | 84 | 0.646493 | false |
pinac0099/dynamic-bus-scheduling | src/common/functions.py | 1 | 7323 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 Eleftherios Anagnostopoulos for Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
__author__ = 'Eleftherios Anagnostopoulos'
__email__ = '[email protected]'
__credits__ = [
'Azadeh Bararsani (Senior Researcher at Ericsson AB) - email: [email protected]'
'Aneta Vulgarakis Feljan (Senior Researcher at Ericsson AB) - email: [email protected]'
]
def quicksort(list_to_be_sorted, comparison_list, low, high):
if low < high:
p = partition(list_to_be_sorted=list_to_be_sorted, comparison_list=comparison_list, low=low, high=high)
quicksort(list_to_be_sorted=list_to_be_sorted, comparison_list=comparison_list, low=low, high=p-1)
quicksort(list_to_be_sorted=list_to_be_sorted, comparison_list=comparison_list, low=p+1, high=high)
def partition(list_to_be_sorted, comparison_list, low, high):
pivot = comparison_list[high]
i = low
for j in range(low, high):
if comparison_list[j] <= pivot:
swap(l=list_to_be_sorted, first=i, second=j)
swap(l=comparison_list, first=i, second=j)
i += 1
swap(l=list_to_be_sorted, first=i, second=high)
swap(l=comparison_list, first=i, second=high)
return i
def swap(l, first, second):
temp = l[first]
l[first] = l[second]
l[second] = temp
| mit | 5,143,557,972,820,507,000 | 41.086207 | 111 | 0.62529 | false |
slackapi/python-slackclient | slack_sdk/web/legacy_base_client.py | 1 | 22083 | """A Python module for interacting with Slack's Web API."""
import asyncio
import copy
import hashlib
import hmac
import io
import json
import logging
import mimetypes
import urllib
import uuid
import warnings
from http.client import HTTPResponse
from ssl import SSLContext
from typing import BinaryIO, Dict, List
from typing import Optional, Union
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
import aiohttp
from aiohttp import FormData, BasicAuth
import slack_sdk.errors as err
from slack_sdk.errors import SlackRequestError
from .async_internal_utils import _files_to_data, _get_event_loop, _request_with_session
from .deprecation import show_2020_01_deprecation
from .internal_utils import (
convert_bool_to_0_or_1,
get_user_agent,
_get_url,
_build_req_args,
)
from .legacy_slack_response import LegacySlackResponse as SlackResponse
class LegacyBaseClient:
BASE_URL = "https://www.slack.com/api/"
def __init__(
self,
token: Optional[str] = None,
base_url: str = BASE_URL,
timeout: int = 30,
loop: Optional[asyncio.AbstractEventLoop] = None,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
run_async: bool = False,
use_sync_aiohttp: bool = False,
session: Optional[aiohttp.ClientSession] = None,
headers: Optional[dict] = None,
user_agent_prefix: Optional[str] = None,
user_agent_suffix: Optional[str] = None,
):
self.token = None if token is None else token.strip()
self.base_url = base_url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.run_async = run_async
self.use_sync_aiohttp = use_sync_aiohttp
self.session = session
self.headers = headers or {}
self.headers["User-Agent"] = get_user_agent(
user_agent_prefix, user_agent_suffix
)
self._logger = logging.getLogger(__name__)
self._event_loop = loop
def api_call( # skipcq: PYL-R1710
self,
api_method: str,
*,
http_verb: str = "POST",
files: dict = None,
data: Union[dict, FormData] = None,
params: dict = None,
json: dict = None, # skipcq: PYL-W0621
headers: dict = None,
auth: dict = None,
) -> Union[asyncio.Future, SlackResponse]:
"""Create a request and execute the API call to Slack.
Args:
api_method (str): The target Slack API method.
e.g. 'chat.postMessage'
http_verb (str): HTTP Verb. e.g. 'POST'
files (dict): Files to multipart upload.
e.g. {image OR file: file_object OR file_path}
data: The body to attach to the request. If a dictionary is
provided, form-encoding will take place.
e.g. {'key1': 'value1', 'key2': 'value2'}
params (dict): The URL parameters to append to the URL.
e.g. {'key1': 'value1', 'key2': 'value2'}
json (dict): JSON for the body to attach to the request
(if files or data is not specified).
e.g. {'key1': 'value1', 'key2': 'value2'}
headers (dict): Additional request headers
auth (dict): A dictionary that consists of client_id and client_secret
Returns:
(SlackResponse)
The server's response to an HTTP request. Data
from the response can be accessed like a dict.
If the response included 'next_cursor' it can
be iterated on to execute subsequent requests.
Raises:
SlackApiError: The following Slack API call failed:
'chat.postMessage'.
SlackRequestError: Json data can only be submitted as
POST requests.
"""
api_url = _get_url(self.base_url, api_method)
if isinstance(auth, dict):
auth = BasicAuth(auth["client_id"], auth["client_secret"])
elif isinstance(auth, BasicAuth):
headers["Authorization"] = auth.encode()
headers = headers or {}
headers.update(self.headers)
req_args = _build_req_args(
token=self.token,
http_verb=http_verb,
files=files,
data=data,
params=params,
json=json, # skipcq: PYL-W0621
headers=headers,
auth=auth,
ssl=self.ssl,
proxy=self.proxy,
)
show_2020_01_deprecation(api_method)
if self.run_async or self.use_sync_aiohttp:
if self._event_loop is None:
self._event_loop = _get_event_loop()
future = asyncio.ensure_future(
self._send(http_verb=http_verb, api_url=api_url, req_args=req_args),
loop=self._event_loop,
)
if self.run_async:
return future
if self.use_sync_aiohttp:
# Using this is no longer recommended - just keep this for backward-compatibility
return self._event_loop.run_until_complete(future)
else:
return self._sync_send(api_url=api_url, req_args=req_args)
# =================================================================
# aiohttp based async WebClient
# =================================================================
async def _send(
self, http_verb: str, api_url: str, req_args: dict
) -> SlackResponse:
"""Sends the request out for transmission.
Args:
http_verb (str): The HTTP verb. e.g. 'GET' or 'POST'.
api_url (str): The Slack API url. e.g. 'https://slack.com/api/chat.postMessage'
req_args (dict): The request arguments to be attached to the request.
e.g.
{
json: {
'attachments': [{"pretext": "pre-hello", "text": "text-world"}],
'channel': '#random'
}
}
Returns:
The response parsed into a SlackResponse object.
"""
open_files = _files_to_data(req_args)
try:
if "params" in req_args:
# True/False -> "1"/"0"
req_args["params"] = convert_bool_to_0_or_1(req_args["params"])
res = await self._request(
http_verb=http_verb, api_url=api_url, req_args=req_args
)
finally:
for f in open_files:
f.close()
data = {
"client": self,
"http_verb": http_verb,
"api_url": api_url,
"req_args": req_args,
"use_sync_aiohttp": self.use_sync_aiohttp,
}
return SlackResponse(**{**data, **res}).validate()
async def _request(self, *, http_verb, api_url, req_args) -> Dict[str, any]:
"""Submit the HTTP request with the running session or a new session.
Returns:
A dictionary of the response data.
"""
return await _request_with_session(
current_session=self.session,
timeout=self.timeout,
logger=self._logger,
http_verb=http_verb,
api_url=api_url,
req_args=req_args,
)
# =================================================================
# urllib based WebClient
# =================================================================
def _sync_send(self, api_url, req_args) -> SlackResponse:
params = req_args["params"] if "params" in req_args else None
data = req_args["data"] if "data" in req_args else None
files = req_args["files"] if "files" in req_args else None
_json = req_args["json"] if "json" in req_args else None
headers = req_args["headers"] if "headers" in req_args else None
token = params.get("token") if params and "token" in params else None
auth = (
req_args["auth"] if "auth" in req_args else None
) # Basic Auth for oauth.v2.access / oauth.access
if auth is not None:
if isinstance(auth, BasicAuth):
headers["Authorization"] = auth.encode()
elif isinstance(auth, str):
headers["Authorization"] = auth
else:
self._logger.warning(
f"As the auth: {auth}: {type(auth)} is unsupported, skipped"
)
body_params = {}
if params:
body_params.update(params)
if data:
body_params.update(data)
return self._urllib_api_call(
token=token,
url=api_url,
query_params={},
body_params=body_params,
files=files,
json_body=_json,
additional_headers=headers,
)
def _request_for_pagination(self, api_url, req_args) -> Dict[str, any]:
"""This method is supposed to be used only for SlackResponse pagination
You can paginate using Python's for iterator as below:
for response in client.conversations_list(limit=100):
# do something with each response here
"""
response = self._perform_urllib_http_request(url=api_url, args=req_args)
return {
"status_code": int(response["status"]),
"headers": dict(response["headers"]),
"data": json.loads(response["body"]),
}
def _urllib_api_call(
self,
*,
token: str = None,
url: str,
query_params: Dict[str, str] = {},
json_body: Dict = {},
body_params: Dict[str, str] = {},
files: Dict[str, io.BytesIO] = {},
additional_headers: Dict[str, str] = {},
) -> SlackResponse:
"""Performs a Slack API request and returns the result.
:param token: Slack API Token (either bot token or user token)
:param url: a complete URL (e.g., https://www.slack.com/api/chat.postMessage)
:param query_params: query string
:param json_body: json data structure (it's still a dict at this point),
if you give this argument, body_params and files will be skipped
:param body_params: form params
:param files: files to upload
:param additional_headers: request headers to append
:return: API response
"""
files_to_close: List[BinaryIO] = []
try:
# True/False -> "1"/"0"
query_params = convert_bool_to_0_or_1(query_params)
body_params = convert_bool_to_0_or_1(body_params)
if self._logger.level <= logging.DEBUG:
def convert_params(values: dict) -> dict:
if not values or not isinstance(values, dict):
return {}
return {
k: ("(bytes)" if isinstance(v, bytes) else v)
for k, v in values.items()
}
headers = {
k: "(redacted)" if k.lower() == "authorization" else v
for k, v in additional_headers.items()
}
self._logger.debug(
f"Sending a request - url: {url}, "
f"query_params: {convert_params(query_params)}, "
f"body_params: {convert_params(body_params)}, "
f"files: {convert_params(files)}, "
f"json_body: {json_body}, "
f"headers: {headers}"
)
request_data = {}
if files is not None and isinstance(files, dict) and len(files) > 0:
if body_params:
for k, v in body_params.items():
request_data.update({k: v})
for k, v in files.items():
if isinstance(v, str):
f: BinaryIO = open(v.encode("utf-8", "ignore"), "rb")
files_to_close.append(f)
request_data.update({k: f})
elif isinstance(v, (bytearray, bytes)):
request_data.update({k: io.BytesIO(v)})
else:
request_data.update({k: v})
request_headers = self._build_urllib_request_headers(
token=token or self.token,
has_json=json is not None,
has_files=files is not None,
additional_headers=additional_headers,
)
request_args = {
"headers": request_headers,
"data": request_data,
"params": body_params,
"files": files,
"json": json_body,
}
if query_params:
q = urlencode(query_params)
url = f"{url}&{q}" if "?" in url else f"{url}?{q}"
response = self._perform_urllib_http_request(url=url, args=request_args)
if response.get("body", None): # skipcq: PTC-W0039
try:
response_body_data: dict = json.loads(response["body"])
except json.decoder.JSONDecodeError as e:
message = f"Failed to parse the response body: {str(e)}"
raise err.SlackApiError(message, response)
else:
response_body_data: dict = None
if query_params:
all_params = copy.copy(body_params)
all_params.update(query_params)
else:
all_params = body_params
request_args["params"] = all_params # for backward-compatibility
return SlackResponse(
client=self,
http_verb="POST", # you can use POST method for all the Web APIs
api_url=url,
req_args=request_args,
data=response_body_data,
headers=dict(response["headers"]),
status_code=response["status"],
use_sync_aiohttp=False,
).validate()
finally:
for f in files_to_close:
if not f.closed:
f.close()
def _perform_urllib_http_request(
self, *, url: str, args: Dict[str, Dict[str, any]]
) -> Dict[str, any]:
"""Performs an HTTP request and parses the response.
:param url: a complete URL (e.g., https://www.slack.com/api/chat.postMessage)
:param args: args has "headers", "data", "params", and "json"
"headers": Dict[str, str]
"data": Dict[str, any]
"params": Dict[str, str],
"json": Dict[str, any],
:return: dict {status: int, headers: Headers, body: str}
"""
headers = args["headers"]
if args["json"]:
body = json.dumps(args["json"])
headers["Content-Type"] = "application/json;charset=utf-8"
elif args["data"]:
boundary = f"--------------{uuid.uuid4()}"
sep_boundary = b"\r\n--" + boundary.encode("ascii")
end_boundary = sep_boundary + b"--\r\n"
body = io.BytesIO()
data = args["data"]
for key, value in data.items():
readable = getattr(value, "readable", None)
if readable and value.readable():
filename = "Uploaded file"
name_attr = getattr(value, "name", None)
if name_attr:
filename = (
name_attr.decode("utf-8")
if isinstance(name_attr, bytes)
else name_attr
)
if "filename" in data:
filename = data["filename"]
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
title = (
f'\r\nContent-Disposition: form-data; name="{key}"; filename="{filename}"\r\n'
+ f"Content-Type: {mimetype}\r\n"
)
value = value.read()
else:
title = f'\r\nContent-Disposition: form-data; name="{key}"\r\n'
value = str(value).encode("utf-8")
body.write(sep_boundary)
body.write(title.encode("utf-8"))
body.write(b"\r\n")
body.write(value)
body.write(end_boundary)
body = body.getvalue()
headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
headers["Content-Length"] = len(body)
elif args["params"]:
body = urlencode(args["params"])
headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
body = None
if isinstance(body, str):
body = body.encode("utf-8")
# NOTE: Intentionally ignore the `http_verb` here
# Slack APIs accepts any API method requests with POST methods
try:
# urllib not only opens http:// or https:// URLs, but also ftp:// and file://.
# With this it might be possible to open local files on the executing machine
# which might be a security risk if the URL to open can be manipulated by an external user.
# (BAN-B310)
if url.lower().startswith("http"):
req = Request(method="POST", url=url, data=body, headers=headers)
opener: Optional[OpenerDirector] = None
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
# NOTE: BAN-B310 is already checked above
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310
else:
resp = urlopen( # skipcq: BAN-B310
req, context=self.ssl, timeout=self.timeout
)
charset = resp.headers.get_content_charset() or "utf-8"
body: str = resp.read().decode(charset) # read the response body here
return {"status": resp.code, "headers": resp.headers, "body": body}
raise SlackRequestError(f"Invalid URL detected: {url}")
except HTTPError as e:
resp = {"status": e.code, "headers": e.headers}
if e.code == 429:
# for compatibility with aiohttp
resp["headers"]["Retry-After"] = resp["headers"]["retry-after"]
# read the response body here
charset = e.headers.get_content_charset() or "utf-8"
body: str = e.read().decode(charset)
resp["body"] = body
return resp
except Exception as err:
self._logger.error(f"Failed to send a request to Slack API server: {err}")
raise err
def _build_urllib_request_headers(
self, token: str, has_json: bool, has_files: bool, additional_headers: dict
) -> Dict[str, str]:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
headers.update(self.headers)
if token:
headers.update({"Authorization": "Bearer {}".format(token)})
if additional_headers:
headers.update(additional_headers)
if has_json:
headers.update({"Content-Type": "application/json;charset=utf-8"})
if has_files:
# will be set afterwards
headers.pop("Content-Type", None)
return headers
# =================================================================
@staticmethod
def validate_slack_signature(
*, signing_secret: str, data: str, timestamp: str, signature: str
) -> bool:
"""
Slack creates a unique string for your app and shares it with you. Verify
requests from Slack with confidence by verifying signatures using your
signing secret.
On each HTTP request that Slack sends, we add an X-Slack-Signature HTTP
header. The signature is created by combining the signing secret with the
body of the request we're sending using a standard HMAC-SHA256 keyed hash.
https://api.slack.com/docs/verifying-requests-from-slack#how_to_make_a_request_signature_in_4_easy_steps__an_overview
Args:
signing_secret: Your application's signing secret, available in the
Slack API dashboard
data: The raw body of the incoming request - no headers, just the body.
timestamp: from the 'X-Slack-Request-Timestamp' header
signature: from the 'X-Slack-Signature' header - the calculated signature
should match this.
Returns:
True if signatures matches
"""
warnings.warn(
"As this method is deprecated since slackclient 2.6.0, "
"use `from slack.signature import SignatureVerifier` instead",
DeprecationWarning,
)
format_req = str.encode(f"v0:{timestamp}:{data}")
encoded_secret = str.encode(signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return hmac.compare_digest(calculated_signature, signature)
| mit | -1,541,434,596,766,754,300 | 39.743542 | 125 | 0.525744 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/lib-tk/Tix.py | 1 | 62344 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id: Tix.py,v 1.7 2001/12/13 04:53:07 fdrake Exp $
#
# Tix.py -- Tix widget wrappers.
#
# For Tix, see http://tix.sourceforge.net
#
# - Sudhir Shenoy ([email protected]), Dec. 1995.
# based on an idea of Jean-Marc Lugrin ([email protected])
#
# NOTE: In order to minimize changes to Tkinter.py, some of the code here
# (TixWidget.__init__) has been taken from Tkinter (Widget.__init__)
# and will break if there are major changes in Tkinter.
#
# The Tix widgets are represented by a class hierarchy in python with proper
# inheritance of base classes.
#
# As a result after creating a 'w = StdButtonBox', I can write
# w.ok['text'] = 'Who Cares'
# or w.ok['bg'] = w['bg']
# or even w.ok.invoke()
# etc.
#
# Compare the demo tixwidgets.py to the original Tcl program and you will
# appreciate the advantages.
#
import string
from Tkinter import *
from Tkinter import _flatten, _cnfmerge, _default_root
# WARNING - TkVersion is a limited precision floating point number
if TkVersion < 3.999:
raise ImportError, "This version of Tix.py requires Tk 4.0 or higher"
import _tkinter # If this fails your Python may not be configured for Tk
# TixVersion = string.atof(tkinter.TIX_VERSION) # If this fails your Python may not be configured for Tix
# WARNING - TixVersion is a limited precision floating point number
# Some more constants (for consistency with Tkinter)
WINDOW = 'window'
TEXT = 'text'
STATUS = 'status'
IMMEDIATE = 'immediate'
IMAGE = 'image'
IMAGETEXT = 'imagetext'
BALLOON = 'balloon'
AUTO = 'auto'
ACROSSTOP = 'acrosstop'
# Some constants used by Tkinter dooneevent()
TCL_DONT_WAIT = 1 << 1
TCL_WINDOW_EVENTS = 1 << 2
TCL_FILE_EVENTS = 1 << 3
TCL_TIMER_EVENTS = 1 << 4
TCL_IDLE_EVENTS = 1 << 5
TCL_ALL_EVENTS = 0
# BEWARE - this is implemented by copying some code from the Widget class
# in Tkinter (to override Widget initialization) and is therefore
# liable to break.
import Tkinter, os
# Could probably add this to Tkinter.Misc
class tixCommand:
"""The tix commands provide access to miscellaneous elements
of Tix's internal state and the Tix application context.
Most of the information manipulated by these commands pertains
to the application as a whole, or to a screen or
display, rather than to a particular window.
This is a mixin class, assumed to be mixed to Tkinter.Tk
that supports the self.tk.call method.
"""
def tix_addbitmapdir(self, directory):
"""Tix maintains a list of directories under which
the tix_getimage and tix_getbitmap commands will
search for image files. The standard bitmap directory
is $TIX_LIBRARY/bitmaps. The addbitmapdir command
adds directory into this list. By using this
command, the image files of an applications can
also be located using the tix_getimage or tix_getbitmap
command.
"""
return self.tk.call('tix', 'addbitmapdir', directory)
def tix_cget(self, option):
"""Returns the current value of the configuration
option given by option. Option may be any of the
options described in the CONFIGURATION OPTIONS section.
"""
return self.tk.call('tix', 'cget', option)
def tix_configure(self, cnf=None, **kw):
"""Query or modify the configuration options of the Tix application
context. If no option is specified, returns a dictionary all of the
available options. If option is specified with no value, then the
command returns a list describing the one named option (this list
will be identical to the corresponding sublist of the value
returned if no option is specified). If one or more option-value
pairs are specified, then the command modifies the given option(s)
to have the given value(s); in this case the command returns an
empty string. Option may be any of the configuration options.
"""
# Copied from Tkinter.py
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(self.tk.call('tix', 'configure')):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if isinstance(cnf, StringType):
x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
return (x[0][1:],) + x[1:]
return self.tk.call(('tix', 'configure') + self._options(cnf))
def tix_filedialog(self, dlgclass=None):
"""Returns the file selection dialog that may be shared among
different calls from this application. This command will create a
file selection dialog widget when it is called the first time. This
dialog will be returned by all subsequent calls to tix_filedialog.
An optional dlgclass parameter can be passed to specified what type
of file selection dialog widget is desired. Possible options are
tix FileSelectDialog or tixExFileSelectDialog.
"""
if dlgclass is not None:
return self.tk.call('tix', 'filedialog', dlgclass)
else:
return self.tk.call('tix', 'filedialog')
def tix_getbitmap(self, name):
"""Locates a bitmap file of the name name.xpm or name in one of the
bitmap directories (see the tix_addbitmapdir command above). By
using tix_getbitmap, you can avoid hard coding the pathnames of the
bitmap files in your application. When successful, it returns the
complete pathname of the bitmap file, prefixed with the character
'@'. The returned value can be used to configure the -bitmap
option of the TK and Tix widgets.
"""
return self.tk.call('tix', 'getbitmap', name)
def tix_getimage(self, name):
"""Locates an image file of the name name.xpm, name.xbm or name.ppm
in one of the bitmap directories (see the addbitmapdir command
above). If more than one file with the same name (but different
extensions) exist, then the image type is chosen according to the
depth of the X display: xbm images are chosen on monochrome
displays and color images are chosen on color displays. By using
tix_ getimage, you can advoid hard coding the pathnames of the
image files in your application. When successful, this command
returns the name of the newly created image, which can be used to
configure the -image option of the Tk and Tix widgets.
"""
return self.tk.call('tix', 'getimage', name)
def tix_option_get(self, name):
"""Gets the options manitained by the Tix
scheme mechanism. Available options include:
active_bg active_fg bg
bold_font dark1_bg dark1_fg
dark2_bg dark2_fg disabled_fg
fg fixed_font font
inactive_bg inactive_fg input1_bg
input2_bg italic_font light1_bg
light1_fg light2_bg light2_fg
menu_font output1_bg output2_bg
select_bg select_fg selector
"""
# could use self.tk.globalgetvar('tixOption', name)
return self.tk.call('tix', 'option', 'get', name)
def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None):
"""Resets the scheme and fontset of the Tix application to
newScheme and newFontSet, respectively. This affects only those
widgets created after this call. Therefore, it is best to call the
resetoptions command before the creation of any widgets in a Tix
application.
The optional parameter newScmPrio can be given to reset the
priority level of the Tk options set by the Tix schemes.
Because of the way Tk handles the X option database, after Tix has
been has imported and inited, it is not possible to reset the color
schemes and font sets using the tix config command. Instead, the
tix_resetoptions command must be used.
"""
if newScmPrio is not None:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio)
else:
return self.tk.call('tix', 'resetoptions', newScheme, newFontSet)
class Tk(Tkinter.Tk, tixCommand):
"""Toplevel widget of Tix which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
def __init__(self, screenName=None, baseName=None, className='Tix'):
Tkinter.Tk.__init__(self, screenName, baseName, className)
tixlib = os.environ.get('TIX_LIBRARY')
self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]')
if tixlib is not None:
self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib)
self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib)
# Load Tix - this should work dynamically or statically
# If it's static, lib/tix8.1/pkgIndex.tcl should have
# 'load {} Tix'
# If it's dynamic under Unix, lib/tix8.1/pkgIndex.tcl should have
# 'load libtix8.1.8.3.so Tix'
self.tk.eval('package require Tix')
# The Tix 'tixForm' geometry manager
class Form:
"""The Tix Form geometry manager
Widgets can be arranged by specifying attachments to other widgets.
See Tix documentation for complete details"""
def config(self, cnf={}, **kw):
apply(self.tk.call, ('tixForm', self._w) + self._options(cnf, kw))
form = config
def __setitem__(self, key, value):
Form.form(self, {key: value})
def check(self):
return self.tk.call('tixForm', 'check', self._w)
def forget(self):
self.tk.call('tixForm', 'forget', self._w)
def grid(self, xsize=0, ysize=0):
if (not xsize) and (not ysize):
x = self.tk.call('tixForm', 'grid', self._w)
y = self.tk.splitlist(x)
z = ()
for x in y:
z = z + (self.tk.getint(x),)
return z
self.tk.call('tixForm', 'grid', self._w, xsize, ysize)
def info(self, option=None):
if not option:
return self.tk.call('tixForm', 'info', self._w)
if option[0] != '-':
option = '-' + option
return self.tk.call('tixForm', 'info', self._w, option)
def slaves(self):
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'tixForm', 'slaves', self._w)))
Tkinter.Widget.__bases__ = Tkinter.Widget.__bases__ + (Form,)
class TixWidget(Tkinter.Widget):
"""A TixWidget class is used to package all (or most) Tix widgets.
Widget initialization is extended in two ways:
1) It is possible to give a list of options which must be part of
the creation command (so called Tix 'static' options). These cannot be
given as a 'config' command later.
2) It is possible to give the name of an existing TK widget. These are
child widgets created automatically by a Tix mega-widget. The Tk call
to create these widgets is therefore bypassed in TixWidget.__init__
Both options are for use by subclasses only.
"""
def __init__ (self, master=None, widgetName=None,
static_options=None, cnf={}, kw={}):
# Merge keywords and dictionary arguments
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
# Move static options into extra. static_options must be
# a list of keywords (or None).
extra=()
if static_options:
for k,v in cnf.items()[:]:
if k in static_options:
extra = extra + ('-' + k, v)
del cnf[k]
self.widgetName = widgetName
Widget._setup(self, master, cnf)
# If widgetName is None, this is a dummy creation call where the
# corresponding Tk widget has already been created by Tix
if widgetName:
apply(self.tk.call, (widgetName, self._w) + extra)
# Non-static options - to be done via a 'config' command
if cnf:
Widget.config(self, cnf)
# Dictionary to hold subwidget names for easier access. We can't
# use the children list because the public Tix names may not be the
# same as the pathname component
self.subwidget_list = {}
# We set up an attribute access function so that it is possible to
# do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello'
# when w is a StdButtonBox.
# We can even do w.ok.invoke() because w.ok is subclassed from the
# Button class if you go through the proper constructors
def __getattr__(self, name):
if self.subwidget_list.has_key(name):
return self.subwidget_list[name]
raise AttributeError, name
def set_silent(self, value):
"""Set a variable without calling its action routine"""
self.tk.call('tixSetSilent', self._w, value)
def subwidget(self, name):
"""Return the named subwidget (which must have been created by
the sub-class)."""
n = self._subwidget_name(name)
if not n:
raise TclError, "Subwidget " + name + " not child of " + self._name
# Remove header of name and leading dot
n = n[len(self._w)+1:]
return self._nametowidget(n)
def subwidgets_all(self):
"""Return all subwidgets."""
names = self._subwidget_names()
if not names:
return []
retlist = []
for name in names:
name = name[len(self._w)+1:]
try:
retlist.append(self._nametowidget(name))
except:
# some of the widgets are unknown e.g. border in LabelFrame
pass
return retlist
def _subwidget_name(self,name):
"""Get a subwidget name (returns a String, not a Widget !)"""
try:
return self.tk.call(self._w, 'subwidget', name)
except TclError:
return None
def _subwidget_names(self):
"""Return the name of all subwidgets."""
try:
x = self.tk.call(self._w, 'subwidgets', '-all')
return self.tk.split(x)
except TclError:
return None
def config_all(self, option, value):
"""Set configuration options for all subwidgets (and self)."""
if option == '':
return
elif not isinstance(option, StringType):
option = `option`
if not isinstance(value, StringType):
value = `value`
names = self._subwidget_names()
for name in names:
self.tk.call(name, 'configure', '-' + option, value)
# Subwidgets are child widgets created automatically by mega-widgets.
# In python, we have to create these subwidgets manually to mirror their
# existence in Tk/Tix.
class TixSubWidget(TixWidget):
"""Subwidget class.
This is used to mirror child widgets automatically created
by Tix/Tk as part of a mega-widget in Python (which is not informed
of this)"""
def __init__(self, master, name,
destroy_physically=1, check_intermediate=1):
if check_intermediate:
path = master._subwidget_name(name)
try:
path = path[len(master._w)+1:]
plist = string.splitfields(path, '.')
except:
plist = []
if (not check_intermediate) or len(plist) < 2:
# immediate descendant
TixWidget.__init__(self, master, None, None, {'name' : name})
else:
# Ensure that the intermediate widgets exist
parent = master
for i in range(len(plist) - 1):
n = string.joinfields(plist[:i+1], '.')
try:
w = master._nametowidget(n)
parent = w
except KeyError:
# Create the intermediate widget
parent = TixSubWidget(parent, plist[i],
destroy_physically=0,
check_intermediate=0)
TixWidget.__init__(self, parent, None, None, {'name' : name})
self.destroy_physically = destroy_physically
def destroy(self):
# For some widgets e.g., a NoteBook, when we call destructors,
# we must be careful not to destroy the frame widget since this
# also destroys the parent NoteBook thus leading to an exception
# in Tkinter when it finally calls Tcl to destroy the NoteBook
for c in self.children.values(): c.destroy()
if self.master.children.has_key(self._name):
del self.master.children[self._name]
if self.master.subwidget_list.has_key(self._name):
del self.master.subwidget_list[self._name]
if self.destroy_physically:
# This is bypassed only for a few widgets
self.tk.call('destroy', self._w)
# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
def _lst2dict(lst):
dict = {}
for x in lst:
dict[x[0][1:]] = (x[0][1:],) + x[1:]
return dict
# Useful class to create a display style - later shared by many items.
# Contributed by Steffen Kremser
class DisplayStyle:
"""DisplayStyle - handle configuration options shared by
(multiple) Display Items"""
def __init__(self, itemtype, cnf={}, **kw ):
master = _default_root # global from Tkinter
if not master and cnf.has_key('refwindow'): master=cnf['refwindow']
elif not master and kw.has_key('refwindow'): master= kw['refwindow']
elif not master: raise RuntimeError, "Too early to create display style: no root window"
self.tk = master.tk
self.stylename = apply(self.tk.call, ('tixDisplayStyle', itemtype) +
self._options(cnf,kw) )
def __str__(self):
return self.stylename
def _options(self, cnf, kw ):
if kw and cnf:
cnf = _cnfmerge((cnf, kw))
elif kw:
cnf = kw
opts = ()
for k, v in cnf.items():
opts = opts + ('-'+k, v)
return opts
def delete(self):
self.tk.call(self.stylename, 'delete')
del(self)
def __setitem__(self,key,value):
self.tk.call(self.stylename, 'configure', '-%s'%key, value)
def config(self, cnf={}, **kw):
return _lst2dict(
self.tk.split(
apply(self.tk.call,
(self.stylename, 'configure') + self._options(cnf,kw))))
def __getitem__(self,key):
return self.tk.call(self.stylename, 'cget', '-%s'%key)
######################################################
### The Tix Widget classes - in alphabetical order ###
######################################################
class Balloon(TixWidget):
"""Balloon help widget.
Subwidget Class
--------- -----
label Label
message Message"""
def __init__(self, master=None, cnf={}, **kw):
# static seem to be -installcolormap -initwait -statusbar -cursor
static = ['options', 'installcolormap', 'initwait', 'statusbar',
'cursor']
TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label',
destroy_physically=0)
self.subwidget_list['message'] = _dummyLabel(self, 'message',
destroy_physically=0)
def bind_widget(self, widget, cnf={}, **kw):
"""Bind balloon widget to another.
One balloon widget may be bound to several widgets at the same time"""
apply(self.tk.call,
(self._w, 'bind', widget._w) + self._options(cnf, kw))
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
class ButtonBox(TixWidget):
"""ButtonBox - A container for pushbuttons.
Subwidgets are the buttons added with the add method.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixButtonBox',
['orientation', 'options'], cnf, kw)
def add(self, name, cnf={}, **kw):
"""Add a button with given name to box."""
btn = apply(self.tk.call,
(self._w, 'add', name) + self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return btn
def invoke(self, name):
if self.subwidget_list.has_key(name):
self.tk.call(self._w, 'invoke', name)
class ComboBox(TixWidget):
"""ComboBox - an Entry field with a dropdown menu. The user can select a
choice by either typing in the entry subwdget or selecting from the
listbox subwidget.
Subwidget Class
--------- -----
entry Entry
arrow Button
slistbox ScrolledListBox
tick Button
cross Button : present if created with the fancy option"""
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixComboBox',
['editable', 'dropdown', 'fancy', 'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox')
try:
self.subwidget_list['tick'] = _dummyButton(self, 'tick')
self.subwidget_list['cross'] = _dummyButton(self, 'cross')
except TypeError:
# unavailable when -fancy not specified
pass
def add_history(self, str):
self.tk.call(self._w, 'addhistory', str)
def append_history(self, str):
self.tk.call(self._w, 'appendhistory', str)
def insert(self, index, str):
self.tk.call(self._w, 'insert', index, str)
def pick(self, index):
self.tk.call(self._w, 'pick', index)
class Control(TixWidget):
"""Control - An entry field with value change arrows. The user can
adjust the value by pressing the two arrow buttons or by entering
the value directly into the entry. The new value will be checked
against the user-defined upper and lower limits.
Subwidget Class
--------- -----
incr Button
decr Button
entry Entry
label Label"""
def __init__ (self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw)
self.subwidget_list['incr'] = _dummyButton(self, 'incr')
self.subwidget_list['decr'] = _dummyButton(self, 'decr')
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def decrement(self):
self.tk.call(self._w, 'decr')
def increment(self):
self.tk.call(self._w, 'incr')
def invoke(self):
self.tk.call(self._w, 'invoke')
def update(self):
self.tk.call(self._w, 'update')
class DirList(TixWidget):
"""DirList - displays a list view of a directory, its previous
directories and its sub-directories. The user can choose one of
the directories displayed in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirTree(TixWidget):
"""DirTree - Directory Listing in a hierarchical view.
Displays a tree view of a directory, its previous directories and its
sub-directories. The user can choose one of the directories displayed
in the list or change to another directory.
Subwidget Class
--------- -----
hlist HList
hsb Scrollbar
vsb Scrollbar"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def chdir(self, dir):
self.tk.call(self._w, 'chdir', dir)
class DirSelectBox(TixWidget):
"""DirSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class ExFileSelectBox(TixWidget):
"""ExFileSelectBox - MS Windows style file select box.
It provides an convenient method for the user to select files.
Subwidget Class
--------- -----
cancel Button
ok Button
hidden Checkbutton
types ComboBox
dir ComboBox
file ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
def filter(self):
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class DirSelectDialog(TixWidget):
"""The DirSelectDialog widget presents the directories in the file
system in a dialog window. The user can use this dialog window to
navigate through the file system to select the desired directory.
Subwidgets Class
---------- -----
dirbox DirSelectDialog"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirSelectDialog',
['options'], cnf, kw)
self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox')
# cancel and ok buttons are missing
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
# Should inherit from a Dialog class
class ExFileSelectDialog(TixWidget):
"""ExFileSelectDialog - MS Windows style file select dialog.
It provides an convenient method for the user to select files.
Subwidgets Class
---------- -----
fsbox ExFileSelectBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixExFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileSelectBox(TixWidget):
"""ExFileSelectBox - Motif style file select box.
It is generally used for
the user to choose a file. FileSelectBox stores the files mostly
recently selected into a ComboBox widget so that they can be quickly
selected again.
Subwidget Class
--------- -----
selection ComboBox
filter ComboBox
dirlist ScrolledListBox
filelist ScrolledListBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
def apply_filter(self): # name of subwidget is same as command
self.tk.call(self._w, 'filter')
def invoke(self):
self.tk.call(self._w, 'invoke')
# Should inherit from a Dialog class
class FileSelectDialog(TixWidget):
"""FileSelectDialog - Motif style file select dialog.
Subwidgets Class
---------- -----
btns StdButtonBox
fsbox FileSelectBox"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileSelectDialog',
['options'], cnf, kw)
self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns')
self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox')
def popup(self):
self.tk.call(self._w, 'popup')
def popdown(self):
self.tk.call(self._w, 'popdown')
class FileEntry(TixWidget):
"""FileEntry - Entry field with button that invokes a FileSelectDialog.
The user can type in the filename manually. Alternatively, the user can
press the button widget that sits next to the entry, which will bring
up a file selection dialog.
Subwidgets Class
---------- -----
button Button
entry Entry"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixFileEntry',
['dialogtype', 'options'], cnf, kw)
self.subwidget_list['button'] = _dummyButton(self, 'button')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
def invoke(self):
self.tk.call(self._w, 'invoke')
def file_dialog(self):
# XXX return python object
pass
class HList(TixWidget):
"""HList - Hierarchy display widget can be used to display any data
that have a hierarchical structure, for example, file system directory
trees. The list entries are indented and connected by branch lines
according to their places in the hierachy.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixHList',
['columns', 'options'], cnf, kw)
def add(self, entry, cnf={}, **kw):
return apply(self.tk.call,
(self._w, 'add', entry) + self._options(cnf, kw))
def add_child(self, parent=None, cnf={}, **kw):
if not parent:
parent = ''
return apply(self.tk.call,
(self._w, 'addchild', parent) + self._options(cnf, kw))
def anchor_set(self, entry):
self.tk.call(self._w, 'anchor', 'set', entry)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def column_width(self, col=0, width=None, chars=None):
if not chars:
return self.tk.call(self._w, 'column', 'width', col, width)
else:
return self.tk.call(self._w, 'column', 'width', col,
'-char', chars)
def delete_all(self):
self.tk.call(self._w, 'delete', 'all')
def delete_entry(self, entry):
self.tk.call(self._w, 'delete', 'entry', entry)
def delete_offsprings(self, entry):
self.tk.call(self._w, 'delete', 'offsprings', entry)
def delete_siblings(self, entry):
self.tk.call(self._w, 'delete', 'siblings', entry)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def header_create(self, col, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'header', 'create', col) + self._options(cnf, kw))
def header_configure(self, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'header', 'configure', col)))
apply(self.tk.call, (self._w, 'header', 'configure', col)
+ self._options(cnf, kw))
def header_cget(self, col, opt):
return self.tk.call(self._w, 'header', 'cget', col, opt)
def header_exists(self, col):
return self.tk.call(self._w, 'header', 'exists', col)
def header_delete(self, col):
self.tk.call(self._w, 'header', 'delete', col)
def header_size(self, col):
return self.tk.call(self._w, 'header', 'size', col)
def hide_entry(self, entry):
self.tk.call(self._w, 'hide', 'entry', entry)
def indicator_create(self, entry, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'indicator', 'create', entry) + self._options(cnf, kw))
def indicator_configure(self, entry, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'indicator', 'configure', entry)))
apply(self.tk.call,
(self._w, 'indicator', 'configure', entry) + self._options(cnf, kw))
def indicator_cget(self, entry, opt):
return self.tk.call(self._w, 'indicator', 'cget', entry, opt)
def indicator_exists(self, entry):
return self.tk.call (self._w, 'indicator', 'exists', entry)
def indicator_delete(self, entry):
self.tk.call(self._w, 'indicator', 'delete', entry)
def indicator_size(self, entry):
return self.tk.call(self._w, 'indicator', 'size', entry)
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_children(self, entry=None):
c = self.tk.call(self._w, 'info', 'children', entry)
return self.tk.splitlist(c)
def info_data(self, entry):
return self.tk.call(self._w, 'info', 'data', entry)
def info_exists(self, entry):
return self.tk.call(self._w, 'info', 'exists', entry)
def info_hidden(self, entry):
return self.tk.call(self._w, 'info', 'hidden', entry)
def info_next(self, entry):
return self.tk.call(self._w, 'info', 'next', entry)
def info_parent(self, entry):
return self.tk.call(self._w, 'info', 'parent', entry)
def info_prev(self, entry):
return self.tk.call(self._w, 'info', 'prev', entry)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def item_cget(self, entry, col, opt):
return self.tk.call(self._w, 'item', 'cget', entry, col, opt)
def item_configure(self, entry, col, cnf={}, **kw):
if cnf is None:
return _lst2dict(
self.tk.split(
self.tk.call(self._w, 'item', 'configure', entry, col)))
apply(self.tk.call, (self._w, 'item', 'configure', entry, col) +
self._options(cnf, kw))
def item_create(self, entry, col, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'item', 'create', entry, col) + self._options(cnf, kw))
def item_exists(self, entry, col):
return self.tk.call(self._w, 'item', 'exists', entry, col)
def item_delete(self, entry, col):
self.tk.call(self._w, 'item', 'delete', entry, col)
def nearest(self, y):
return self.tk.call(self._w, 'nearest', y)
def see(self, entry):
self.tk.call(self._w, 'see', entry)
def selection_clear(self, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'selection', 'clear') + self._options(cnf, kw))
def selection_includes(self, entry):
return self.tk.call(self._w, 'selection', 'includes', entry)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def show_entry(self, entry):
return self.tk.call(self._w, 'show', 'entry', entry)
def xview(self, *args):
apply(self.tk.call, (self._w, 'xview') + args)
def yview(self, *args):
apply(self.tk.call, (self._w, 'yview') + args)
class InputOnly(TixWidget):
"""InputOnly - Invisible widget.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw)
class LabelEntry(TixWidget):
"""LabelEntry - Entry field with label. Packages an entry widget
and a label into one mega widget. It can beused be used to simplify
the creation of ``entry-form'' type of interface.
Subwidgets Class
---------- -----
label Label
entry Entry"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelEntry',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
class LabelFrame(TixWidget):
"""LabelFrame - Labelled Frame container. Packages a frame widget
and a label into one mega widget. To create widgets inside a
LabelFrame widget, one creates the new widgets relative to the
frame subwidget and manage them inside the frame subwidget.
Subwidgets Class
---------- -----
label Label
frame Frame"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixLabelFrame',
['labelside','options'], cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
self.subwidget_list['frame'] = _dummyFrame(self, 'frame')
class ListNoteBook(TixWidget):
"""A ListNoteBook widget is very similar to the TixNoteBook widget:
it can be used to display many windows in a limited space using a
notebook metaphor. The notebook is divided into a stack of pages
(windows). At one time only one of these pages can be shown.
The user can navigate through these pages by
choosing the name of the desired page in the hlist subwidget."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'vsb')
def add(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', name) + self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
class Meter(TixWidget):
"""The Meter widget can be used to show the progress of a background
job which may take a long time to execute.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixMeter',
['options'], cnf, kw)
class NoteBook(TixWidget):
"""NoteBook - Multi-page container widget (tabbed notebook metaphor).
Subwidgets Class
---------- -----
nbframe NoteBookFrame
<pages> page widgets added dynamically with the add method"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw)
self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe',
destroy_physically=0)
def add(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', name) + self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name)
return self.subwidget_list[name]
def delete(self, name):
self.tk.call(self._w, 'delete', name)
def page(self, name):
return self.subwidget(name)
def pages(self):
# Can't call subwidgets_all directly because we don't want .nbframe
names = self.tk.split(self.tk.call(self._w, 'pages'))
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
def raise_page(self, name): # raise is a python keyword
self.tk.call(self._w, 'raise', name)
def raised(self):
return self.tk.call(self._w, 'raised')
class NoteBookFrame(TixWidget):
"""Will be added when Tix documentation is available !!!"""
pass
class OptionMenu(TixWidget):
"""OptionMenu - creates a menu button of options.
Subwidget Class
--------- -----
menubutton Menubutton
menu Menu"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def add_command(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', 'command', name) + self._options(cnf, kw))
def add_separator(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', 'separator', name) + self._options(cnf, kw))
def delete(self, name):
self.tk.call(self._w, 'delete', name)
def disable(self, name):
self.tk.call(self._w, 'disable', name)
def enable(self, name):
self.tk.call(self._w, 'enable', name)
class PanedWindow(TixWidget):
"""PanedWindow - Multi-pane container widget
allows the user to interactively manipulate the sizes of several
panes. The panes can be arranged either vertically or horizontally.The
user changes the sizes of the panes by dragging the resize handle
between two panes.
Subwidgets Class
---------- -----
<panes> g/p widgets added dynamically with the add method."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw)
def add(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', name) + self._options(cnf, kw))
self.subwidget_list[name] = TixSubWidget(self, name,
check_intermediate=0)
return self.subwidget_list[name]
def panes(self):
names = self.tk.call(self._w, 'panes')
ret = []
for x in names:
ret.append(self.subwidget(x))
return ret
class PopupMenu(TixWidget):
"""PopupMenu widget can be used as a replacement of the tk_popup command.
The advantage of the Tix PopupMenu widget is it requires less application
code to manipulate.
Subwidgets Class
---------- -----
menubutton Menubutton
menu Menu"""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw)
self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
def bind_widget(self, widget):
self.tk.call(self._w, 'bind', widget._w)
def unbind_widget(self, widget):
self.tk.call(self._w, 'unbind', widget._w)
def post_widget(self, widget, x, y):
self.tk.call(self._w, 'post', widget._w, x, y)
class ResizeHandle(TixWidget):
"""Internal widget to draw resize handles on Scrolled widgets."""
def __init__(self, master, cnf={}, **kw):
# There seems to be a Tix bug rejecting the configure method
# Let's try making the flags -static
flags = ['options', 'command', 'cursorfg', 'cursorbg',
'handlesize', 'hintcolor', 'hintwidth',
'x', 'y']
# In fact, x y height width are configurable
TixWidget.__init__(self, master, 'tixResizeHandle',
flags, cnf, kw)
def attach_widget(self, widget):
self.tk.call(self._w, 'attachwidget', widget._w)
def detach_widget(self, widget):
self.tk.call(self._w, 'detachwidget', widget._w)
def hide(self, widget):
self.tk.call(self._w, 'hide', widget._w)
def show(self, widget):
self.tk.call(self._w, 'show', widget._w)
class ScrolledHList(TixWidget):
"""ScrolledHList - HList with automatic scrollbars."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledHList', ['options'],
cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledListBox(TixWidget):
"""ScrolledListBox - Listbox with automatic scrollbars."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledText(TixWidget):
"""ScrolledText - Text with automatic scrollbars."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw)
self.subwidget_list['text'] = _dummyText(self, 'text')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledTList(TixWidget):
"""ScrolledTList - TList with automatic scrollbars."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledTList', ['options'],
cnf, kw)
self.subwidget_list['tlist'] = _dummyTList(self, 'tlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class ScrolledWindow(TixWidget):
"""ScrolledWindow - Window with automatic scrollbars."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw)
self.subwidget_list['window'] = _dummyFrame(self, 'window')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class Select(TixWidget):
"""Select - Container of button subwidgets. It can be used to provide
radio-box or check-box style of selection options for the user.
Subwidgets are buttons added dynamically using the add method."""
def __init__(self, master, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixSelect',
['allowzero', 'radio', 'orientation', 'labelside',
'options'],
cnf, kw)
self.subwidget_list['label'] = _dummyLabel(self, 'label')
def add(self, name, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'add', name) + self._options(cnf, kw))
self.subwidget_list[name] = _dummyButton(self, name)
return self.subwidget_list[name]
def invoke(self, name):
self.tk.call(self._w, 'invoke', name)
class StdButtonBox(TixWidget):
"""StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixStdButtonBox',
['orientation', 'options'], cnf, kw)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
def invoke(self, name):
if self.subwidget_list.has_key(name):
self.tk.call(self._w, 'invoke', name)
class TList(TixWidget):
"""TList - Hierarchy display widget which can be
used to display data in a tabular format. The list entries of a TList
widget are similar to the entries in the Tk listbox widget. The main
differences are (1) the TList widget can display the list entries in a
two dimensional format and (2) you can use graphical images as well as
multiple colors and fonts for the list entries.
Subwidgets - None"""
def __init__ (self,master=None,cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw)
def active_set(self, index):
self.tk.call(self._w, 'active', 'set', index)
def active_clear(self):
self.tk.call(self._w, 'active', 'clear')
def anchor_set(self, index):
self.tk.call(self._w, 'anchor', 'set', index)
def anchor_clear(self):
self.tk.call(self._w, 'anchor', 'clear')
def delete(self, from_, to=None):
self.tk.call(self._w, 'delete', from_, to)
def dragsite_set(self, index):
self.tk.call(self._w, 'dragsite', 'set', index)
def dragsite_clear(self):
self.tk.call(self._w, 'dragsite', 'clear')
def dropsite_set(self, index):
self.tk.call(self._w, 'dropsite', 'set', index)
def dropsite_clear(self):
self.tk.call(self._w, 'dropsite', 'clear')
def insert(self, index, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'insert', index) + self._options(cnf, kw))
def info_active(self):
return self.tk.call(self._w, 'info', 'active')
def info_anchor(self):
return self.tk.call(self._w, 'info', 'anchor')
def info_down(self, index):
return self.tk.call(self._w, 'info', 'down', index)
def info_left(self, index):
return self.tk.call(self._w, 'info', 'left', index)
def info_right(self, index):
return self.tk.call(self._w, 'info', 'right', index)
def info_selection(self):
c = self.tk.call(self._w, 'info', 'selection')
return self.tk.splitlist(c)
def info_size(self):
return self.tk.call(self._w, 'info', 'size')
def info_up(self, index):
return self.tk.call(self._w, 'info', 'up', index)
def nearest(self, x, y):
return self.tk.call(self._w, 'nearest', x, y)
def see(self, index):
self.tk.call(self._w, 'see', index)
def selection_clear(self, cnf={}, **kw):
apply(self.tk.call,
(self._w, 'selection', 'clear') + self._options(cnf, kw))
def selection_includes(self, index):
return self.tk.call(self._w, 'selection', 'includes', index)
def selection_set(self, first, last=None):
self.tk.call(self._w, 'selection', 'set', first, last)
def xview(self, *args):
apply(self.tk.call, (self._w, 'xview') + args)
def yview(self, *args):
apply(self.tk.call, (self._w, 'yview') + args)
class Tree(TixWidget):
"""Tree - The tixTree widget can be used to display hierachical
data in a tree form. The user can adjust
the view of the tree by opening or closing parts of the tree."""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixTree',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
self.tk.call(self._w, 'open', entrypath)
def setmode(self, entrypath, mode='none'):
self.tk.call(self._w, 'setmode', entrypath, mode)
# Could try subclassing Tree for CheckList - would need another arg to init
class CheckList(TixWidget):
"""The CheckList widget
displays a list of items to be selected by the user. CheckList acts
similarly to the Tk checkbutton or radiobutton widgets, except it is
capable of handling many more items than checkbuttons or radiobuttons.
"""
def __init__(self, master=None, cnf={}, **kw):
TixWidget.__init__(self, master, 'tixCheckList',
['options'], cnf, kw)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
def autosetmode(self):
self.tk.call(self._w, 'autosetmode')
def close(self, entrypath):
self.tk.call(self._w, 'close', entrypath)
def getmode(self, entrypath):
return self.tk.call(self._w, 'getmode', entrypath)
def open(self, entrypath):
self.tk.call(self._w, 'open', entrypath)
def getselection(self, mode='on'):
'''Mode can be on, off, default'''
self.tk.call(self._w, 'getselection', mode)
def getstatus(self, entrypath):
self.tk.call(self._w, 'getstatus', entrypath)
def setstatus(self, entrypath, mode='on'):
self.tk.call(self._w, 'setstatus', entrypath, mode)
###########################################################################
### The subclassing below is used to instantiate the subwidgets in each ###
### mega widget. This allows us to access their methods directly. ###
###########################################################################
class _dummyButton(Button, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyCheckbutton(Checkbutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyEntry(Entry, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyFrame(Frame, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyLabel(Label, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyListbox(Listbox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenu(Menu, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyMenubutton(Menubutton, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrollbar(Scrollbar, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyText(Text, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledListBox(ScrolledListBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyHList(HList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyScrolledHList(ScrolledHList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyTList(TList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
class _dummyComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
# I'm not sure about this destroy_physically=0 in all cases;
# it may depend on if -dropdown is true; I've added as a trial
self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
'slistbox',
destroy_physically=0)
self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox',
destroy_physically=0)
class _dummyDirList(DirList, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
class _dummyDirSelectBox(DirSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
self.subwidget_list['types'] = _dummyComboBox(self, 'types')
self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['file'] = _dummyComboBox(self, 'file')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
class _dummyFileSelectBox(FileSelectBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
class _dummyFileComboBox(ComboBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx')
class _dummyStdButtonBox(StdButtonBox, TixSubWidget):
def __init__(self, master, name, destroy_physically=1):
TixSubWidget.__init__(self, master, name, destroy_physically)
self.subwidget_list['ok'] = _dummyButton(self, 'ok')
self.subwidget_list['apply'] = _dummyButton(self, 'apply')
self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
self.subwidget_list['help'] = _dummyButton(self, 'help')
class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget):
def __init__(self, master, name, destroy_physically=0):
TixSubWidget.__init__(self, master, name, destroy_physically)
########################
### Utility Routines ###
########################
# Returns the qualified path name for the widget. Normally used to set
# default options for subwidgets. See tixwidgets.py
def OptionName(widget):
return widget.tk.call('tixOptionName', widget._w)
# Called with a dictionary argument of the form
# {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'}
# returns a string which can be used to configure the fsbox file types
# in an ExFileSelectBox. i.e.,
# '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}'
def FileTypeList(dict):
s = ''
for type in dict.keys():
s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
return s
# Still to be done:
class CObjView(TixWidget):
"""This file implements the Canvas Object View widget. This is a base
class of IconView. It implements automatic placement/adjustment of the
scrollbars according to the canvas objects inside the canvas subwidget.
The scrollbars are adjusted so that the canvas is just large enough
to see all the objects.
"""
pass
| mit | 2,482,072,190,428,551,700 | 37.341943 | 105 | 0.604629 | false |
RDeckers/ScientificVisualization-1TD389 | Assignments/Assignment 1/part2a/molecules.py | 1 | 7392 | """Molecular dynamics.
This script should display the atoms (and their connections) in a
molecular dynamics simulation dataset.
You can run the script from the command line by typing
python molecules.py
"""
from vtk import *
import molecules_io
import collections
import operator
#needed to determine the path to the source files
from os.path import dirname, realpath, join
# Define a class for the keyboard interface
def MakeLUTFromCTF(tableSize):#taken from http://www.cmake.org/Wiki/VTK/Examples/Python/Visualization/AssignColorsCellFromLUT, python does not seem to support "GetTable" on CTF by default.
'''
Use a color transfer Function to generate the colors in the lookup table.
See: http://www.vtk.org/doc/nightly/html/classvtkColorTransferFunction.html
:param: tableSize - The table size
:return: The lookup table.
'''
ctf = vtk.vtkColorTransferFunction()
#ctf.SetColorSpaceToDiverging()
#taken from http://colorbrewer2.org/, sequential data, colorblind safe.
ctf.AddRGBPoint(1.0, 255/255.0,255/255.0,217/255.0)
ctf.AddRGBPoint(0.875, 237/255.0,248/255.0,177/255.0)
ctf.AddRGBPoint(0.75, 199/255.0,233/255.0,180/255.0)
ctf.AddRGBPoint(0.625, 127/255.0,205/255.0,187/255.0)
ctf.AddRGBPoint(0.5, 65/255.0,182/255.0,196/255.0)
ctf.AddRGBPoint(0.375, 29/255.0,145/255.0,192/255.0)
ctf.AddRGBPoint(0.25, 34/255.0,94/255.0,168/255.0)
ctf.AddRGBPoint(0.125, 37/255.0,52/255.0,148/255.0)
ctf.AddRGBPoint(0.0, 8/255.0,29/255.0,88/255.0)
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(tableSize)
lut.Build()
for i in range(0,tableSize):
rgb = list(ctf.GetColor(float(i)/tableSize))+[1]
lut.SetTableValue(i,rgb)
return lut
class KeyboardInterface(object):
"""Keyboard interface.
Provides a simple keyboard interface for interaction. You may
extend this interface with keyboard shortcuts for manipulating the
molecule visualization.
"""
def __init__(self):
self.screenshot_counter = 0
self.render_window = None
self.window2image_filter = None
self.png_writer = None
# Add the extra attributes you need here...
def keypress(self, obj, event):
"""This function captures keypress events and defines actions for
keyboard shortcuts."""
key = obj.GetKeySym()
if key == "9":
self.render_window.Render()
self.window2image_filter.Modified()
screenshot_filename = ("screenshot%02d.png" %
(self.screenshot_counter))
self.png_writer.SetFileName(screenshot_filename)
self.png_writer.Write()
print("Saved %s" % (screenshot_filename))
self.screenshot_counter += 1
# Add your keyboard shortcuts here. If you modify any of the
# actors or change some other parts or properties of the
# scene, don't forget to call the render window's Render()
# function to update the rendering.
# elif key == ...
# Read the data into a vtkPolyData object using the functions in
# molecules_io.py
basedir = dirname(realpath(__file__)) #Get the directory of the .py file, courtesy of http://stackoverflow.com/a/5137509/4455880
data = vtk.vtkPolyData()
data.SetPoints(molecules_io.read_points(join(basedir, "coordinates.txt")))
data.GetPointData().SetScalars(molecules_io.read_scalars(join(basedir, "radii.txt")))
data.SetLines(molecules_io.read_connections(join(basedir, "connections.txt")))
pd = data.GetPointData()
pd.GetScalars().SetName("radii")
colors = vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
color_table =[[255,0,0],[128,255,0],[0,255,255],[127,0,255], [255,0,255],[255,127,0],[0,255,0], [0,127,255] ]
color_dictionary = dict()
scalars = data.GetPointData().GetScalars()
current_key = 0;
for i in range(0, scalars.GetNumberOfTuples()):
scalar = scalars.GetTuple1(i)
if color_dictionary.has_key(scalar):
colors.InsertNextTuple(color_dictionary[scalar])
else:
color_dictionary[scalar] = color_table[current_key]
colors.InsertNextTuple(color_table[current_key])
current_key += 1 #will fail if color_table too small
pd.AddArray(colors)
print(color_dictionary)
sphere_source = vtkSphereSource()
glyph = vtkGlyph3D();
glyph.SetSourceConnection(sphere_source.GetOutputPort());
glyph.SetInput(data)
glyph.SetColorModeToColorByScalar()
glyph.Update()
mapper_molecules = vtkPolyDataMapper()
mapper_connections = vtkPolyDataMapper()
mapper_molecules.SetInputConnection(glyph.GetOutputPort())
mapper_molecules.SetScalarModeToUsePointFieldData()
mapper_molecules.SelectColorArray("Colors")
tube_filter = vtkTubeFilter()
tube_filter.SetInput(data)
tube_filter.SetVaryRadiusToVaryRadiusOff()
tube_filter.SetRadius(0.05)
tube_filter.SetNumberOfSides(15)
mapper_connections.SetInputConnection(tube_filter.GetOutputPort())
#mapper_connections.SetInput(data) #Map with normal lines
#mapper_connections.SetScalarModeToUsePointFieldData()
#mapper_connections.SelectColorArray("Colors")
mapper_connections.ScalarVisibilityOff()
legend = vtkLegendBoxActor()
legend.SetNumberOfEntries(len(color_dictionary)+1)
legend.SetEntryColor(0, 1,1,1)
legend.SetEntryString(0, "RADII:")
sorted_dict = sorted(color_dictionary.items(), key=operator.itemgetter(0))
index = 1
print sorted_dict
for key_color in sorted_dict:
key = key_color[0]
color = key_color[1]
legend.SetEntryColor(index, color[0]/255., color[1]/255., color[2]/255.)
legend.SetEntryString(index, "%.2f" % key)
index += 1
legend.SetBackgroundColor(0,0,0)
legend.UseBackgroundOn()
legend.SetPosition(0,0)
#legend.SetBackgroundOpacity(1)
outline = vtkOutlineFilter()
outline.SetInput(data)
mapper_outline = vtkPolyDataMapper()
mapper_outline.SetInputConnection(outline.GetOutputPort())
actor_molecules = vtkActor()
actor_molecules.SetMapper(mapper_molecules)
actor_connections = vtkActor()
actor_connections.SetMapper(mapper_connections)
actor_outline = vtkActor()
actor_outline.SetMapper(mapper_outline)
# Create a renderer and add the actors to it
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.2, 0.2, 0.2)
renderer.AddActor(actor_molecules)
renderer.AddActor(actor_connections)
renderer.AddActor(legend)
renderer.AddActor(actor_outline)
# Create a render window
render_window = vtk.vtkRenderWindow()
render_window.SetWindowName("Molecular dynamics")
render_window.SetSize(500, 500)
render_window.AddRenderer(renderer)
# Create an interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(render_window)
# Create a window-to-image filter and a PNG writer that can be used
# to take screenshots
window2image_filter = vtk.vtkWindowToImageFilter()
window2image_filter.SetInput(render_window)
png_writer = vtk.vtkPNGWriter()
png_writer.SetInput(window2image_filter.GetOutput())
# Set up the keyboard interface
keyboard_interface = KeyboardInterface()
keyboard_interface.render_window = render_window
keyboard_interface.window2image_filter = window2image_filter
keyboard_interface.png_writer = png_writer
# Connect the keyboard interface to the interactor
interactor.AddObserver("KeyPressEvent", keyboard_interface.keypress)
# Initialize the interactor and start the rendering loop
interactor.Initialize()
render_window.Render()
interactor.Start()
| gpl-3.0 | 3,674,771,079,054,238,000 | 33.381395 | 188 | 0.736201 | false |
dariosena/LearningPython | PY-14/forca.py | 1 | 2811 | import random
def jogar():
imprime_messagem_de_abertura()
palavra_secreta = carrega_palavra_secreta()
letras_acertadas = ['_' for letra in palavra_secreta]
acertou = False
enforcou = False
erros = 0
print(letras_acertadas)
while (not acertou and not enforcou):
chute = pede_chute()
if (chute in palavra_secreta):
posicao = 0
for letra in palavra_secreta:
if (chute == letra):
letras_acertadas[posicao] = letra
posicao += 1
else:
erros += 1
acertou = '_' not in letras_acertadas
enforcou = erros == 7
print(letras_acertadas)
print()
if (acertou):
imprime_messagem_vencedor()
else:
imprime_messagem_perdedor(palavra_secreta)
print()
print('Fim do Jogo!')
def pede_chute():
chute = input('\nQual letra? ')
chute = chute.strip().upper()
return chute
def carrega_palavra_secreta():
arquivo = open('palavras.txt', 'r')
palavras = []
for linha in arquivo:
linha = linha.strip()
palavras.append(linha)
arquivo.close()
numero = random.randrange(0, len(palavras))
palavra_secreta = palavras[numero].upper()
return palavra_secreta
def imprime_messagem_de_abertura():
print('************************************')
print('*** Bem Vindo ao Jogo da Forca ***')
print('************************************')
def imprime_messagem_vencedor():
print('Parabéns, você ganhou!')
print(" ___________ ")
print(" '._==_==_=_.' ")
print(" .-\\: /-. ")
print(" | (|:. |) | ")
print(" '-|:. |-' ")
print(" \\::. / ")
print(" '::. .' ")
print(" ) ( ")
print(" _.' '._ ")
print(" '-------' ")
def imprime_messagem_perdedor(palavra_secreta):
print('Puxa, você foi enforcado!')
print('A palavra era {}'.format(palavra_secreta))
print(" _______________ ")
print(" / \ ")
print(" / \ ")
print("// \/\ ")
print("\| XXXX XXXX | / ")
print(" | XXXX XXXX |/ ")
print(" | XXX XXX | ")
print(" | | ")
print(" \__ XXX __/ ")
print(" |\ XXX /| ")
print(" | | | | ")
print(" | I I I I I I I | ")
print(" | I I I I I I | ")
print(" \_ _/ ")
print(" \_ _/ ")
print(" \_______/ ")
| gpl-3.0 | 1,580,980,590,459,419,000 | 26.271845 | 57 | 0.404202 | false |
forseti-security/forseti-security | google/cloud/forseti/common/gcp_type/appengine.py | 1 | 1911 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An AppEngine Application.
See: https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps
"""
from builtins import object
from google.cloud.forseti.common.util import parser
# pylint: disable=too-many-instance-attributes
class Application(object):
"""Represents Instance resource."""
def __init__(self, **kwargs):
"""AppEngine Application resource.
Args:
**kwargs (dict): The keyworded variable args.
"""
self.project_id = kwargs.get('project_id')
self.name = kwargs.get('name')
self.app_id = kwargs.get('app_id')
self.dispatch_rules = parser.json_unstringify(
kwargs.get('dispatch_rules'))
self.auth_domain = kwargs.get('auth_domain')
self.location_id = kwargs.get('location_id')
self.code_bucket = kwargs.get('code_bucket')
self.default_cookie_expiration = kwargs.get('default_cookie_expiration')
self.serving_status = kwargs.get('serving_status')
self.default_hostname = kwargs.get('default_hostname')
self.default_bucket = kwargs.get('default_bucket')
self.iap = parser.json_unstringify(kwargs.get('iap'))
self.gcr_domain = kwargs.get('gcr_domain')
self.raw_application = kwargs.get('raw_application')
| apache-2.0 | 3,290,040,204,077,729,300 | 38.8125 | 80 | 0.689691 | false |
jhogsett/linkit | python/cyclone.py | 1 | 3241 | #!/usr/bin/python
import serial
import time
import random
import sys
s = None
num_leds = 93
play_time = 0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, ticks, play_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
command(":::pau:clr")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
play_time = float(sys.argv[2])
command("6:zon:blk:red:rep:3,0:cpy")
command("5:zon:blk:org:rep:3,0:cpy")
command("4:zon:blk:grn:rep:3,0:cpy")
command("3:zon:blk:blu:rep:3,0:cpy")
command("2:zon:blk:pur:rep:3,0:cpy")
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "black", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zon:" + color + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
global idx
idx = -1
def loop():
command("6:zon:rot")
command("5:zon:rot")
command("4:zon:rot")
command("3:zon:rot")
command("2:zon:rot")
command("flu")
time.sleep(play_time)
if __name__ == '__main__':
setup()
while True:
loop()
| mit | -2,699,644,802,816,666,000 | 31.089109 | 151 | 0.39062 | false |
amcat/amcat | amcat/tools/clustermap.py | 1 | 6550 | ##########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Contains functions to create a clustermap. That is, given a bunch of queries,
which groups of articles are in q1, q1 AND q2, etc. Can be used as input for
visualisation software such as:
http://www.aduna-software.com/technology/clustermap
"""
import os
import subprocess
from collections import defaultdict, OrderedDict
from itertools import chain
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.template import Context
from django.template.loader import get_template
from lxml import html
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# XML template given to Aduna binary
XML_TEMPLATE = get_template("query/clustermap/cluster.xml")
# Location of Aduna binaries
ADUNA_JARS = ("aduna-clustermap-2006.1.jar", "aduna-clustermap-2006.1-resources.jar")
ADUNA_PATH = os.path.join(settings.ROOT, "amcat/contrib/java")
CLASS_PATH = ":".join(chain((ADUNA_PATH,), [
os.path.join(ADUNA_PATH, jar) for jar in ADUNA_JARS
]))
# Minimal memory allocated by JVM
ADUNA_MEMORY = "1000m"
# Template for interactive clustermap
HTML_TEMPLATE = get_template("query/clustermap/clustermap.html")
### CLUSTER LOGIC ###
def combinations(iterable):
"""
Returns a generator yielding all combinations of all lengths of `iterable` as tuples.
Care should be taken, as there are 2^n of these combinations.
"""
all = tuple(iterable)
if len(all) == 0:
yield ()
return
head, tail = all[0], all[1:]
for result in combinations(tail):
yield (head,) + result
yield result
def get_clusters(queries) -> dict:
"""Based on a mapping {query: ids} determine a mapping {[query] -> [ids]}, thus
determining the cluster it belongs to.
@param queries.keys(): SearchQuery
@param queries.values(): List of ids
@returns: mapping of cluster (frozenset of queries) to a set of article ids
"""
queries = {q: set(ids) for q, ids in queries.items()}
article_clusters = defaultdict(set)
for query, aids in queries.items():
for aid in aids:
article_clusters[aid].add(query)
clusters = defaultdict(set)
for aid, queries in article_clusters.items():
clusters[frozenset(queries)].add(aid)
return clusters
def get_clustermap_table(queries):
"""
Given a mapping of query to ids, return a table with the #hits for each boolean combination
"""
queries = OrderedDict((k, set(v)) for (k,v) in queries.items())
header = sorted(queries.keys(), key=lambda q: str(q))
rows = []
allids = set(chain.from_iterable(queries.values()))
for c in combinations(header):
ids = allids.copy()
row = []
for q in header:
row.append(int(q in c))
if q in c:
ids &= queries[q]
else:
ids -= queries[q]
n = len(ids)
if n:
rows.append(tuple(row + [n]))
return [h.label for h in header] + ["Total"], rows
def _get_cluster_query(all_queries, cluster_queries):
# We sort the queries to generate the queries in a deterministic manner
exclude_queries = "(%s)" % ") OR (".join(sorted(q.query for q in all_queries - cluster_queries))
include_queries = "(%s)" % ") AND (".join(sorted(q.query for q in cluster_queries))
return "({include_queries}) NOT ({exclude_queries})".format(**locals()).replace(" NOT (())", "")
def get_cluster_queries(clusters):
"""Based on a collection of clusters (for example those returned by get_clusters()),
determine the query needed to fetch the articles in that particular cluster.
"""
all_queries = set(chain.from_iterable(clusters))
return (_get_cluster_query(all_queries, queries) for queries in clusters)
### ADUNA CLUSTERMAP IMAGE LOGIC ###
class AdunaException(Exception):
pass
def aduna(xml_path, img_path):
args = ["java", "-classpath", CLASS_PATH, "-Xms%s" % ADUNA_MEMORY, "Cluster", xml_path, img_path]
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except FileNotFoundError:
raise AdunaException("Java executable not found")
if not stdout:
raise AdunaException("Aduna clustermap proces generated error: %s" % stderr)
return open(img_path, "rb").read(), stdout, stderr
def clustermap_html_to_coords(_html):
doc = html.fromstring(_html)
for area in doc.cssselect("area"):
coords = list(map(int, area.attrib["coords"].split(",")))
article_id = int(area.attrib["href"])
yield {"coords": coords, "article_id": article_id}
def get_clustermap_image(queries):
"""Based on a mapping {query: ids} render an Aduno clustermap.
@returns: (image bytes, html) """
all_article_ids = list(chain.from_iterable(queries.values()))
with NamedTemporaryFile(suffix=".xml", mode="wb") as xml:
context = locals()
rendered = XML_TEMPLATE.render(context)
xml.write(rendered.encode('utf-8'))
xml.flush()
with NamedTemporaryFile(suffix=".png") as png:
return aduna(xml.name, png.name)[:-1]
| agpl-3.0 | 7,175,982,662,567,712,000 | 36.00565 | 101 | 0.611298 | false |
zbarge/zeex | zeex/core/ui/basic/push_grid_ui.py | 1 | 2316 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Zeke/Google Drive/dev/python/zeex/zeex/core/ui/basic/push_grid.ui'
#
# Created: Mon Nov 13 22:57:17 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_PushGridWidget(object):
def setupUi(self, PushGridWidget):
PushGridWidget.setObjectName("PushGridWidget")
PushGridWidget.resize(302, 203)
self.gridLayoutWidget_2 = QtGui.QWidget(PushGridWidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 301, 201))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.pushGrid = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.pushGrid.setContentsMargins(0, 0, 0, 0)
self.pushGrid.setObjectName("pushGrid")
self.listViewLeft = QtGui.QListView(self.gridLayoutWidget_2)
self.listViewLeft.setObjectName("listViewLeft")
self.pushGrid.addWidget(self.listViewLeft, 0, 0, 1, 1)
self.listViewRight = QtGui.QListView(self.gridLayoutWidget_2)
self.listViewRight.setObjectName("listViewRight")
self.pushGrid.addWidget(self.listViewRight, 0, 2, 1, 1)
self.btnGrid = QtGui.QGridLayout()
self.btnGrid.setObjectName("btnGrid")
self.btnPushRight = QtGui.QPushButton(self.gridLayoutWidget_2)
self.btnPushRight.setObjectName("btnPushRight")
self.btnGrid.addWidget(self.btnPushRight, 0, 0, 1, 1)
self.btnPushLeft = QtGui.QPushButton(self.gridLayoutWidget_2)
self.btnPushLeft.setObjectName("btnPushLeft")
self.btnGrid.addWidget(self.btnPushLeft, 1, 0, 1, 1)
self.pushGrid.addLayout(self.btnGrid, 0, 1, 1, 1)
self.retranslateUi(PushGridWidget)
QtCore.QMetaObject.connectSlotsByName(PushGridWidget)
def retranslateUi(self, PushGridWidget):
PushGridWidget.setWindowTitle(QtGui.QApplication.translate("PushGridWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.btnPushRight.setText(QtGui.QApplication.translate("PushGridWidget", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.btnPushLeft.setText(QtGui.QApplication.translate("PushGridWidget", "<<", None, QtGui.QApplication.UnicodeUTF8))
| mit | -6,414,148,394,479,850,000 | 50.466667 | 131 | 0.717185 | false |
Eksmo/calibre | src/calibre/devices/usbms/driver.py | 1 | 22481 | # -*- coding: utf-8 -*-
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Generic USB Mass storage device driver. This is not a complete stand alone
driver. It is intended to be subclassed with the relevant parts implemented
for a particular device.
'''
import os, re, time, json, functools, shutil
from itertools import cycle
from calibre.constants import numeric_version
from calibre import prints, isbytestring
from calibre.constants import filesystem_encoding, DEBUG
from calibre.devices.usbms.cli import CLI
from calibre.devices.usbms.device import Device
from calibre.devices.usbms.books import BookList, Book
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.utils.config import from_json, to_json
from calibre.utils.date import now, isoformat
BASE_TIME = None
def debug_print(*args):
global BASE_TIME
if BASE_TIME is None:
BASE_TIME = time.time()
if DEBUG:
prints('DEBUG: %6.1f'%(time.time()-BASE_TIME), *args)
# CLI must come before Device as it implements the CLI functions that
# are inherited from the device interface in Device.
class USBMS(CLI, Device):
'''
The base class for all USBMS devices. Implements the logic for
sending/getting/updating metadata/caching metadata/etc.
'''
description = _('Communicate with an eBook reader.')
author = _('John Schember')
supported_platforms = ['windows', 'osx', 'linux']
# Store type instances of BookList and Book. We must do this because
# a) we need to override these classes in some device drivers, and
# b) the classmethods seem only to see real attributes declared in the
# class, not attributes stored in the class
booklist_class = BookList
book_class = Book
FORMATS = []
CAN_SET_METADATA = []
METADATA_CACHE = 'metadata.calibre'
DRIVEINFO = 'driveinfo.calibre'
SCAN_FROM_ROOT = False
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
import uuid
if not isinstance(dinfo, dict):
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = unicode(uuid.uuid4())
if dinfo.get('device_name') is None:
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([unicode(i) for i in numeric_version])
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = prefix.replace('\\', '/')
return dinfo
def _update_driveinfo_file(self, prefix, location_code, name=None):
if os.path.exists(os.path.join(prefix, self.DRIVEINFO)):
with open(os.path.join(prefix, self.DRIVEINFO), 'rb') as f:
try:
driveinfo = json.loads(f.read(), object_hook=from_json)
except:
driveinfo = None
driveinfo = self._update_driveinfo_record(driveinfo, prefix,
location_code, name)
with open(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(json.dumps(driveinfo, default=to_json))
else:
driveinfo = self._update_driveinfo_record({}, prefix, location_code, name)
with open(os.path.join(prefix, self.DRIVEINFO), 'wb') as f:
f.write(json.dumps(driveinfo, default=to_json))
return driveinfo
def get_device_information(self, end_session=True):
self.report_progress(1.0, _('Get device information...'))
self.driveinfo = {}
if self._main_prefix is not None:
try:
self.driveinfo['main'] = self._update_driveinfo_file(self._main_prefix, 'main')
except (IOError, OSError) as e:
raise IOError(_('Failed to access files in the main memory of'
' your device. You should contact the device'
' manufacturer for support. Common fixes are:'
' try a different USB cable/USB port on your computer.'
' If you device has a "Reset to factory defaults" type'
' of setting somewhere, use it. Underlying error: %s')
% e)
try:
if self._card_a_prefix is not None:
self.driveinfo['A'] = self._update_driveinfo_file(self._card_a_prefix, 'A')
if self._card_b_prefix is not None:
self.driveinfo['B'] = self._update_driveinfo_file(self._card_b_prefix, 'B')
except (IOError, OSError) as e:
raise IOError(_('Failed to access files on the SD card in your'
' device. This can happen for many reasons. The SD card may be'
' corrupted, it may be too large for your device, it may be'
' write-protected, etc. Try a different SD card, or reformat'
' your SD card using the FAT32 filesystem. Also make sure'
' there are not too many files in the root of your SD card.'
' Underlying error: %s') % e)
return (self.get_gui_name(), '', '', '', self.driveinfo)
def set_driveinfo_name(self, location_code, name):
if location_code == 'main':
self._update_driveinfo_file(self._main_prefix, location_code, name)
elif location_code == 'A':
self._update_driveinfo_file(self._card_a_prefix, location_code, name)
elif location_code == 'B':
self._update_driveinfo_file(self._card_b_prefix, location_code, name)
def formats_to_scan_for(self):
return set(self.settings().format_map) | set(self.FORMATS)
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
debug_print ('USBMS: Fetching list of books from device. Device=',
self.__class__.__name__,
'oncard=', oncard)
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
ebook_dirs = self.get_carda_ebook_dir() if oncard == 'carda' else \
self.EBOOK_DIR_CARD_B if oncard == 'cardb' else \
self.get_main_ebook_dir()
debug_print ('USBMS: dirs are:', prefix, ebook_dirs)
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
all_formats = self.formats_to_scan_for()
def update_booklist(filename, path, prefix):
changed = False
if path_to_ext(filename) in all_formats:
try:
lpath = os.path.join(path, filename).partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if self.update_metadata_item(bl[idx]):
#print 'update_metadata_item returned true'
changed = True
else:
if bl.add_book(self.book_from_path(prefix, lpath),
replace_metadata=False):
changed = True
except: # Probably a filename encoding error
import traceback
traceback.print_exc()
return changed
if isinstance(ebook_dirs, basestring):
ebook_dirs = [ebook_dirs]
for ebook_dir in ebook_dirs:
ebook_dir = self.path_to_unicode(ebook_dir)
if self.SCAN_FROM_ROOT:
ebook_dir = self.normalize_path(prefix)
else:
ebook_dir = self.normalize_path( \
os.path.join(prefix, *(ebook_dir.split('/'))) \
if ebook_dir else prefix)
debug_print('USBMS: scan from root', self.SCAN_FROM_ROOT, ebook_dir)
if not os.path.exists(ebook_dir): continue
# Get all books in the ebook_dir directory
if self.SUPPORTS_SUB_DIRS or self.SUPPORTS_SUB_DIRS_FOR_SCAN:
# build a list of files to check, so we can accurately report progress
flist = []
for path, dirs, files in os.walk(ebook_dir):
for filename in files:
if filename != self.METADATA_CACHE:
flist.append({'filename': self.path_to_unicode(filename),
'path':self.path_to_unicode(path)})
for i, f in enumerate(flist):
self.report_progress(i/float(len(flist)), _('Getting list of books on device...'))
changed = update_booklist(f['filename'], f['path'], prefix)
if changed:
need_sync = True
else:
paths = os.listdir(ebook_dir)
for i, filename in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Getting list of books on device...'))
changed = update_booklist(self.path_to_unicode(filename), ebook_dir, prefix)
if changed:
need_sync = True
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
debug_print('USBMS: count found in cache: %d, count of files in metadata: %d, need_sync: %s' % \
(len(bl_cache), len(bl), need_sync))
if need_sync: #self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print('USBMS: Finished fetching list of books from device. oncard=', oncard)
return bl
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('USBMS: uploading %d books'%(len(files)))
path = self._sanity_check(on_card, files)
paths = []
names = iter(names)
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = metadata.next(), names.next()
filepath = self.normalize_path(self.create_upload_path(path, mdata, fname))
if not hasattr(infile, 'read'):
infile = self.normalize_path(infile)
filepath = self.put_file(infile, filepath, replace_file=True)
paths.append(filepath)
try:
self.upload_cover(os.path.dirname(filepath),
os.path.splitext(os.path.basename(filepath))[0],
mdata, filepath)
except: # Failure to upload cover is not catastrophic
import traceback
traceback.print_exc()
self.report_progress((i+1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...'))
debug_print('USBMS: finished uploading %d books'%(len(files)))
return zip(paths, cycle([on_card]))
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
pass
def add_books_to_metadata(self, locations, metadata, booklists):
debug_print('USBMS: adding metadata for %d books'%(len(metadata)))
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
book = self.book_class(prefix, lpath, other=info)
if book.size is None:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
debug_print('USBMS: finished adding metadata')
def delete_books(self, paths, end_session=True):
debug_print('USBMS: deleting %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
if os.path.exists(path):
# Delete the ebook
os.unlink(path)
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
for x in (filepath, path):
x += ext
if os.path.exists(x):
if os.path.isdir(x):
shutil.rmtree(x, ignore_errors=True)
else:
os.unlink(x)
if self.SUPPORTS_SUB_DIRS:
try:
os.removedirs(os.path.dirname(path))
except:
pass
self.report_progress(1.0, _('Removing books from device...'))
debug_print('USBMS: finished deleting %d books'%(len(paths)))
def remove_books_from_metadata(self, paths, booklists):
debug_print('USBMS: removing metadata for %d books'%(len(paths)))
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if path.endswith(book.path):
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
debug_print('USBMS: finished removing metadata for %d books'%(len(paths)))
# If you override this method and you use book._new_book, then you must
# complete the processing before you call this method. The flag is cleared
# at the end just before the return
def sync_booklists(self, booklists, end_session=True):
debug_print('USBMS: starting sync_booklists')
json_codec = JsonCodec()
if not os.path.exists(self.normalize_path(self._main_prefix)):
os.makedirs(self.normalize_path(self._main_prefix))
def write_prefix(prefix, listid):
if (prefix is not None and len(booklists) > listid and
isinstance(booklists[listid], self.booklist_class)):
if not os.path.exists(prefix):
os.makedirs(self.normalize_path(prefix))
with open(self.normalize_path(os.path.join(prefix, self.METADATA_CACHE)), 'wb') as f:
json_codec.encode_to_file(f, booklists[listid])
write_prefix(self._main_prefix, 0)
write_prefix(self._card_a_prefix, 1)
write_prefix(self._card_b_prefix, 2)
# Clear the _new_book indication, as we are supposed to be done with
# adding books at this point
for blist in booklists:
if blist is not None:
for book in blist:
book._new_book = False
self.report_progress(1.0, _('Sending metadata to device...'))
debug_print('USBMS: finished sync_booklists')
@classmethod
def build_template_regexp(cls):
def replfunc(match, seen=None):
v = match.group(1)
if v in ['authors', 'author_sort']:
v = 'author'
if v in ('title', 'series', 'series_index', 'isbn', 'author'):
if v not in seen:
seen.add(v)
return '(?P<' + v + '>.+?)'
return '(.+?)'
s = set()
f = functools.partial(replfunc, seen=s)
template = None
try:
template = cls.save_template().rpartition('/')[2]
return re.compile(re.sub('{([^}]*)}', f, template) + '([_\d]*$)')
except:
prints(u'Failed to parse template: %r'%template)
template = u'{title} - {authors}'
return re.compile(re.sub('{([^}]*)}', f, template) + '([_\d]*$)')
@classmethod
def path_to_unicode(cls, path):
if isbytestring(path):
path = path.decode(filesystem_encoding)
return path
@classmethod
def normalize_path(cls, path):
'Return path with platform native path separators'
if path is None:
return None
if os.sep == '\\':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
return cls.path_to_unicode(path)
@classmethod
def parse_metadata_cache(cls, bl, prefix, name):
json_codec = JsonCodec()
need_sync = False
cache_file = cls.normalize_path(os.path.join(prefix, name))
if os.access(cache_file, os.R_OK):
try:
with open(cache_file, 'rb') as f:
json_codec.decode_from_file(f, bl, cls.book_class, prefix)
except:
import traceback
traceback.print_exc()
bl = []
need_sync = True
else:
need_sync = True
return need_sync
@classmethod
def update_metadata_item(cls, book):
changed = False
size = os.stat(cls.normalize_path(book.path)).st_size
if size != book.size:
changed = True
mi = cls.metadata_from_path(book.path)
book.smart_update(mi)
book.size = size
return changed
@classmethod
def metadata_from_path(cls, path):
return cls.metadata_from_formats([path])
@classmethod
def metadata_from_formats(cls, fmts):
from calibre.ebooks.metadata.meta import metadata_from_formats
from calibre.customize.ui import quick_metadata
with quick_metadata:
return metadata_from_formats(fmts, force_read_metadata=True,
pattern=cls.build_template_regexp())
@classmethod
def book_from_path(cls, prefix, lpath):
from calibre.ebooks.metadata.book.base import Metadata
if cls.settings().read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = Metadata(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, other=mi, size=size)
return book
| gpl-3.0 | 321,990,210,503,236,000 | 43.962 | 112 | 0.558872 | false |
joshbohde/scikit-learn | sklearn/svm/sparse/base.py | 1 | 11891 | import numpy as np
from ..base import BaseLibSVM, BaseLibLinear, _get_class_weight
from . import libsvm
from .. import liblinear
class SparseBaseLibSVM(BaseLibSVM):
_kernel_types = ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']
_svm_types = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability):
assert impl in self._svm_types, \
"impl should be one of %s, %s was given" % (
self._svm_types, impl)
assert kernel in self._kernel_types, \
"kernel should be one of %s, "\
"%s was given." % (self._kernel_types, kernel)
self.kernel = kernel
self.impl = impl
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
# container for when we call fit
self._support_data = np.empty(0, dtype=np.float64, order='C')
self._support_indices = np.empty(0, dtype=np.int32, order='C')
self._support_indptr = np.empty(0, dtype=np.int32, order='C')
# strictly speaking, dual_coef is not sparse (see Notes above)
self._dual_coef_data = np.empty(0, dtype=np.float64, order='C')
self._dual_coef_indices = np.empty(0, dtype=np.int32, order='C')
self._dual_coef_indptr = np.empty(0, dtype=np.int32, order='C')
self.intercept_ = np.empty(0, dtype=np.float64, order='C')
# only used in classification
self.n_support_ = np.empty(0, dtype=np.int32, order='C')
def fit(self, X, y, class_weight=None, sample_weight=[], cache_size=100.):
"""
Fit the SVM model according to the given training data and
parameters.
Parameters
----------
X : sparse matrix, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
class_weight : {dict, 'auto'}, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
The 'auto' mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
For maximum effiency, use a sparse matrix in csr format
(scipy.sparse.csr_matrix)
"""
import scipy.sparse
X = scipy.sparse.csr_matrix(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
y = np.asanyarray(y, dtype=np.float64, order='C')
sample_weight = np.asanyarray(sample_weight, dtype=np.float64,
order='C')
solver_type = self._svm_types.index(self.impl)
kernel_type = self._kernel_types.index(self.kernel)
self.class_weight, self.class_weight_label = \
_get_class_weight(class_weight, y)
if (kernel_type in [1, 2]) and (self.gamma == 0):
# if custom gamma is not provided ...
self.gamma = 1.0 / X.shape[0]
self.label_, self.probA_, self.probB_ = libsvm.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y,
solver_type, kernel_type, self.degree, self.gamma,
self.coef0, self.tol, self.C, self._support_data,
self._support_indices, self._support_indptr,
self._dual_coef_data, self.intercept_,
self.class_weight_label, self.class_weight, sample_weight,
self.n_support_, self.nu, cache_size, self.epsilon,
int(self.shrinking), int(self.probability))
n_class = len(self.label_) - 1
n_SV = self._support_indptr.size - 1
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
# this will fail if n_SV is zero. This is a limitation
# in scipy.sparse, which does not permit empty matrices
self.support_vectors_ = scipy.sparse.csr_matrix((self._support_data,
self._support_indices,
self._support_indptr),
(n_SV, X.shape[1]))
self.dual_coef_ = scipy.sparse.csr_matrix((self._dual_coef_data,
dual_coef_indices,
dual_coef_indptr),
(n_class, n_SV)
)
return self
def predict(self, T):
"""
This function does classification or regression on an array of
test vectors T.
For a classification model, the predicted class for each
sample in T is returned. For a regression model, the function
value of T calculated is returned.
For an one-class model, +1 or -1 is returned.
Parameters
----------
T : scipy.sparse.csr, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
import scipy.sparse
T = scipy.sparse.csr_matrix(T)
T.data = np.asanyarray(T.data, dtype=np.float64, order='C')
kernel_type = self._kernel_types.index(self.kernel)
return libsvm.libsvm_sparse_predict(T.data, T.indices, T.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self.dual_coef_.data, self.intercept_,
self._svm_types.index(self.impl), kernel_type,
self.degree, self.gamma, self.coef0, self.tol,
self.C, self.class_weight_label, self.class_weight,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_, self.label_,
self.probA_, self.probB_)
def predict_proba(self, X):
"""
This function does classification or regression on a test vector X
given a model with probability information.
Parameters
----------
X : scipy.sparse.csr, shape = [n_samples, n_features]
Returns
-------
X : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered by arithmetical
order.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will meaningless results on very small
datasets.
"""
if not self.probability:
raise ValueError(
"probability estimates must be enabled to use this method")
if self.impl not in ('c_svc', 'nu_svc'):
raise NotImplementedError("predict_proba only implemented for SVC and NuSVC")
import scipy.sparse
X = scipy.sparse.csr_matrix(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
kernel_type = self._kernel_types.index(self.kernel)
return libsvm.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self.dual_coef_.data, self.intercept_,
self._svm_types.index(self.impl), kernel_type,
self.degree, self.gamma, self.coef0, self.tol,
self.C, self.class_weight_label, self.class_weight,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_, self.label_,
self.probA_, self.probB_)
class SparseBaseLibLinear(BaseLibLinear):
def fit(self, X, y, class_weight=None):
"""
Fit the model using X, y as training data.
Parameters
----------
X : sparse matrix, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns an instance of self.
"""
import scipy.sparse
X = scipy.sparse.csr_matrix(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
y = np.asanyarray(y, dtype=np.int32, order='C')
self.class_weight, self.class_weight_label = \
_get_class_weight(class_weight, y)
self.raw_coef_, self.label_ = \
liblinear.csr_train_wrap(X.shape[1], X.data, X.indices,
X.indptr, y,
self._get_solver_type(),
self.tol, self._get_bias(), self.C,
self.class_weight_label, self.class_weight)
return self
def predict(self, X):
"""
Predict target values of X according to the fitted model.
Parameters
----------
X : sparse matrix, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
import scipy.sparse
X = scipy.sparse.csr_matrix(X)
self._check_n_features(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
return liblinear.csr_predict_wrap(X.shape[1], X.data,
X.indices, X.indptr,
self.raw_coef_,
self._get_solver_type(),
self.tol, self.C,
self.class_weight_label,
self.class_weight, self.label_,
self._get_bias())
def decision_function(self, X):
"""
Return the decision function of X according to the trained
model.
Parameters
----------
X : sparse matrix, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_class]
Returns the decision function of the sample for each class
in the model.
"""
import scipy.sparse
X = scipy.sparse.csr_matrix(X)
self._check_n_features(X)
X.data = np.asanyarray(X.data, dtype=np.float64, order='C')
dec_func = liblinear.csr_decision_function_wrap(
X.shape[1], X.data, X.indices, X.indptr, self.raw_coef_,
self._get_solver_type(), self.tol, self.C,
self.class_weight_label, self.class_weight, self.label_,
self._get_bias())
if len(self.label_) <= 2:
# in the two-class case, the decision sign needs be flipped
# due to liblinear's design
return -dec_func
else:
return dec_func
libsvm.set_verbosity_wrap(0)
| bsd-3-clause | -1,599,776,428,535,825,400 | 36.511041 | 89 | 0.539147 | false |
nbear3/Dolphin-Updater | Source/controllers/dolphin_control.py | 1 | 1169 | """Handle control over dolphin parsing"""
import urllib
from bs4 import BeautifulSoup
def get_dolphin_html():
url = 'https://dolphin-emu.org/download/'
response = urllib.request.urlopen(url)
data = response.read()
return data.decode('utf-8')
def get_dolphin_link(dolphin_html=None):
if dolphin_html is None:
dolphin_html = get_dolphin_html()
soup = BeautifulSoup(dolphin_html, "html.parser")
return soup.find_all('a', {"class": 'btn always-ltr btn-info win'}, limit=1, href=True)[0]['href']
def get_dolphin_changelog(dolphin_html=None):
if dolphin_html is None:
dolphin_html = get_dolphin_html()
text = ""
soup = BeautifulSoup(dolphin_html, "html.parser")
sections = soup.find('table', {"class": 'versions-list dev-versions'})
for section in sections.find_all('tr', {"class": 'infos'}):
version = section.find("td", {"class": "version"}).find("a").get_text()
reldate = section.find("td", {"class": "reldate"}).get_text()
change = section.find("td", {"class": "description"}).get_text()
text += version + " - " + reldate + ":\n" + change + "\n\n"
return text
| gpl-3.0 | 7,803,553,696,066,041,000 | 31.472222 | 102 | 0.627032 | false |
bzamecnik/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics.py | 1 | 2006 | # matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smst.utils import audio, peaks
from smst.models import dft
(fs, x) = audio.read_wav('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N / 2
t = -20
pin = 4850
x1 = x[pin:pin + w.size]
mX1, pX1 = dft.from_audio(x1, w, N)
ploc = peaks.find_peaks(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = peaks.interpolate_peaks(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs * np.arange(mX1.size) / float(N), mX1 - max(mX1), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag - max(mX1), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -80, 4])
plt.title('mX + peaks (sine-440-490.wav)')
(fs, x) = audio.read_wav('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N / 2
t = -80
pin = 200
x2 = x[pin:pin + w.size]
mX2, pX2 = dft.from_audio(x2, w, N)
ploc = peaks.find_peaks(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = peaks.interpolate_peaks(mX2, pX2, ploc)
plt.subplot(3, 1, 2)
plt.plot(fs * np.arange(mX2.size) / float(N), mX2 - max(mX2), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag - max(mX2), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500, 10000, -100, 4])
plt.title('mX + peaks (vibraphone-C6.wav)')
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N / 2
t = -80
pin = 10000
x3 = x[pin:pin + w.size]
mX3, pX3 = dft.from_audio(x3, w, N)
ploc = peaks.find_peaks(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = peaks.interpolate_peaks(mX3, pX3, ploc)
plt.subplot(3, 1, 3)
plt.plot(fs * np.arange(mX3.size) / float(N), mX3 - max(mX3), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag - max(mX3), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0, 6000, -70, 2])
plt.title('mX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics.png')
| agpl-3.0 | -1,890,405,769,671,008,500 | 28.940299 | 109 | 0.643569 | false |
likit/gimme | tests/find_RI_test.py | 1 | 3910 | import unittest
import networkx as nx
from utils.find_RI import get_exon_node, find_RI, add_intervals, Exon
from bx.intervals import IntervalTree
'''test data contain genes with 0, 1, 2, 3 and 4 retained introns.'''
test_file = "../test_data/RI.test.bed"
class TestLoadData(unittest.TestCase):
def test_load_data(self):
for exons, transcript_id in get_exon_node(test_file):
pass
self.assertTrue(len(exons) > 1)
class TestExonGraph(unittest.TestCase):
def test_build_exon_graph(self):
for exons, transcript_id in get_exon_node(test_file):
self.graph = nx.DiGraph()
self.graph.add_path([str(e) for e in exons])
self.assertEqual(len(exons), len(self.graph.nodes()))
class TestAddIntervals(unittest.TestCase):
def setUp(self):
self.exonsDB = {}
self.ex1 = Exon('chrX', 1000, 2000, 'ex1.1', '+')
self.ex2 = Exon('chrX', 3000, 4000, 'ex1.1', '+')
self.ex3 = Exon('chrX', 5000, 6000, 'ex1.1', '+')
self.ex4 = Exon('chrX', 7000, 8000, 'ex1.1', '+')
self.exonsDB[str(self.ex1)] = self.ex1
self.exonsDB[str(self.ex2)] = self.ex2
self.exonsDB[str(self.ex3)] = self.ex3
self.exonsDB[str(self.ex4)] = self.ex4
self.graph = nx.DiGraph()
self.graph.add_path([str(self.ex1), str(self.ex2),
str(self.ex3), str(self.ex4)])
self.tree = add_intervals(self.graph, self.exonsDB)
self.assertEqual(len(self.tree.find(1500, 5500)), 3)
class TestFindRI(unittest.TestCase):
def setUp(self):
self.exonsDB = {}
self.ex1 = Exon('chrX', 1000, 2000, 'ex1.1', '+')
self.ex2 = Exon('chrX', 3000, 4000, 'ex1.1', '+')
self.ex3 = Exon('chrX', 5000, 6000, 'ex1.1', '+')
self.ex4 = Exon('chrX', 7000, 8000, 'ex1.1', '+')
self.exonsDB[str(self.ex1)] = self.ex1
self.exonsDB[str(self.ex2)] = self.ex2
self.exonsDB[str(self.ex3)] = self.ex3
self.exonsDB[str(self.ex4)] = self.ex4
self.tree = IntervalTree()
self.tree.add_interval(self.ex1)
self.tree.add_interval(self.ex2)
self.tree.add_interval(self.ex3)
self.tree.add_interval(self.ex4)
self.graph = nx.DiGraph()
def test_no_retained_introns(self):
self.path1 = [str(self.ex1), str(self.ex2), str(self.ex3)]
self.path2 = [str(self.ex1), str(self.ex3), str(self.ex4)]
self.graph.add_path(self.path1)
self.graph.add_path(self.path2)
self.events = list(find_RI(self.graph, self.tree, self.exonsDB))
self.assertEqual(len(self.events), 0)
def test_one_retained_introns(self):
self.ex5 = Exon('chrX', 3000, 6000, 'ex1.1', '+')
self.exonsDB[str(self.ex5)] = self.ex5
self.tree.add_interval(self.ex5)
self.path1 = [str(self.ex1), str(self.ex2),
str(self.ex3), str(self.ex4)]
self.path2 = [str(self.ex1), str(self.ex5), str(self.ex4)]
self.graph.add_path(self.path1)
self.graph.add_path(self.path2)
self.events = list(find_RI(self.graph, self.tree, self.exonsDB))
self.assertEqual(len(self.events), 1)
def test_two_retained_introns(self):
self.ex5 = Exon('chrX', 1000, 4000, 'ex1.1', '+')
self.exonsDB[str(self.ex5)] = self.ex5
self.tree.add_interval(self.ex5)
self.ex6 = Exon('chrX', 5000, 8000, 'ex1.1', '+')
self.exonsDB[str(self.ex6)] = self.ex6
self.tree.add_interval(self.ex6)
self.path1 = [str(self.ex1), str(self.ex2),
str(self.ex3), str(self.ex4)]
self.path2 = [str(self.ex5), str(self.ex6)]
self.graph.add_path(self.path1)
self.graph.add_path(self.path2)
self.events = list(find_RI(self.graph, self.tree, self.exonsDB))
self.assertEqual(len(self.events), 2)
| gpl-3.0 | -7,516,247,500,477,240,000 | 36.961165 | 72 | 0.589003 | false |
MatthewCox/PyMoronBot | pymoronbot/moduleinterface.py | 1 | 2293 | # -*- coding: utf-8 -*-
from zope.interface import Interface
from functools import wraps
from fnmatch import fnmatch
class IModule(Interface):
def actions():
"""
Returns the list of actions this module hooks into.
Actions are defined as a tuple with the following values:
(action_name, priority, function)
action_name (string): The name of the action.
priority (int): Actions are handled in order of priority.
Leave it at 1 unless you want to override another handler.
function (reference): A reference to the function in the module that handles this action.
"""
def onLoad():
"""
Called when the module is loaded. Typically loading data, API keys, etc.
"""
def hookBot(bot):
"""
Called when the bot is loaded to pass a reference to the bot for later use.
"""
def displayHelp(query, params):
"""
Catches help actions, checks if they are for this module, then calls help(query, params)
"""
def help(query, params):
"""
Returns help text describing what the module does.
Takes params as input so you can override with more complex help lookup.
"""
def onUnload():
"""
Called when the module is unloaded. Cleanup, if any.
"""
def ignore(func):
@wraps(func)
def wrapped(inst, message):
if inst.checkIgnoreList(message):
return
return func(inst, message)
return wrapped
class BotModule(object):
def actions(self):
return [('help', 1, self.displayHelp)]
def onLoad(self):
pass
def hookBot(self, bot):
self.bot = bot
def displayHelp(self, query):
if query[0].lower() == self.__class__.__name__.lower():
return self.help(query)
def help(self, query):
return "This module has no help text"
def onUnload(self):
pass
def checkIgnoreList(self, message):
"""
@type message: IRCMessage
@rtype Boolean
"""
for ignore in self.bot.config.getWithDefault('ignored', []):
if fnmatch(message.User.String, ignore):
return True
return False
| mit | 3,675,141,298,688,035,300 | 25.976471 | 97 | 0.588748 | false |
slarosa/QGIS | python/plugins/sextante/modeler/MultilineTextPanel.py | 1 | 2953 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MultilineTextPanel.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class MultilineTextPanel(QtGui.QWidget):
USE_TEXT = 0
def __init__(self, options, model, parent = None):
super(MultilineTextPanel, self).__init__(parent)
self.options = options
self.model = model
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setMargin(0)
self.combo = QtGui.QComboBox()
self.combo.addItem("[Use text below]")
for option in options:
self.combo.addItem(option.name(), option)
self.combo.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addWidget(self.combo)
self.textBox = QtGui.QPlainTextEdit()
self.verticalLayout.addWidget(self.textBox)
self.setLayout(self.verticalLayout)
def setText(self, text):
self.textBox.setPlainText(text)
def getOption(self):
return self.combo.currentIndex()
def getValue(self):
if self.combo.currentIndex() == 0:
return unicode(self.textBox.toPlainText())
else:
return self.combo.itemData(self.combo.currentIndex()).toPyObject()
def setValue(self, value):
items = [self.combo.itemData(i).toPyObject() for i in range(1,self.combo.count())]
idx = 0
for item in items:
idx += 1
if item and value:
if item.alg == value.alg and item.param == value.param:
self.combo.setCurrentIndex(idx)
return
self.combo.setCurrentIndex(0)
value = self.model.getValueFromAlgorithmAndParameter(value)
if value:
self.textBox.setPlainText(str(value))
| gpl-2.0 | 2,789,578,933,932,325,000 | 36.858974 | 90 | 0.533695 | false |
savinash47/openstack-doc-tools | autogenerate_config_docs/hooks.py | 1 | 2796 | #
# A collection of shared functions for managing help flag mapping files.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Hooks to handle configuration options not handled on module import or with a
call to _register_runtime_opts(). The HOOKS dict associate hook functions with
a module path."""
def aodh_config():
# Aodh uses a local conf object, therefore we need to use the same method
# here to populate the global cfg.CONF object used by the script.
import aodh.opts as opts
from oslo_config import cfg
cfg.CONF = cfg.ConfigOpts()
for group, options in opts.list_opts():
cfg.CONF.register_opts(list(options),
group=None if group == "DEFAULT" else group)
def glance_store_config():
try:
import glance_store
from oslo_config import cfg
glance_store.backend.register_opts(cfg.CONF)
except ImportError:
# glance_store is not available before Juno
pass
def keystone_config():
from keystone.common import config
config.configure()
def neutron_misc():
try:
# These imports are needed for kilo only
import bsnstacklib.plugins.bigswitch.config
import networking_cisco.plugins.cisco.cfg_agent.device_status # noqa
import networking_l2gw.services.l2gateway.common.config as l2gw
import networking_vsphere.common.config
from oslo_config import cfg
import vmware_nsx.neutron.plugins.vmware.common.config # noqa
bsnstacklib.plugins.bigswitch.config.register_config()
networking_vsphere.common.config.register_options()
l2gw.register_l2gw_opts_helper()
l2gw.register_ovsdb_opts_helper(cfg.CONF)
except Exception:
pass
def nova_spice():
import os
# nova.cmd.__init__ before kilo requires to be imported before eventlet is.
# Since we can't make sure of that, we define this envvar to let nova know
# that the import is OK (see nova/cmd/__init__.py)
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import nova.cmd.spicehtml5proxy # noqa
HOOKS = {'aodh': aodh_config,
'glance.common.config': glance_store_config,
'keystone.common.config': keystone_config,
'neutron': neutron_misc,
'nova.spice': nova_spice}
| apache-2.0 | -8,774,004,303,210,777,000 | 33.518519 | 79 | 0.692775 | false |
abid-mujtaba/blackout | settings.py | 1 | 5035 | # Django settings for blackout project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'db)_b45-a8dwbxs4fky#**xcmls4+hpnldg73hp&*++#3vl066'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'blackout.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 | -6,480,240,750,497,485,000 | 33.724138 | 115 | 0.684409 | false |
kaczmarj/neurodocker | examples/test_examples.py | 1 | 1405 | """Run the neurodocker examples and check for failures."""
import glob
import os
import subprocess
here = os.path.dirname(os.path.realpath(__file__))
def test_examples_readme():
with open(os.path.join(here, "README.md")) as f:
readme = f.read()
readme = readme.replace("\\\n", " ")
cmds = []
for line in readme.splitlines():
if not line.startswith("neurodocker generate"):
continue
s = line.split()
if 'docker' in s[2] and 'singularity' in s[2]:
s[2] = 'docker'
cmds.append(" ".join(s))
s[2] = 'singularity'
cmds.append(" ".join(s))
else:
cmds.append(line)
print("Testing {} commands from the examples README".format(len(cmds)))
with TemporaryChDir(here):
for c in cmds:
subprocess.run(c, shell=True, check=True)
def test_specialized_examples():
files = glob.glob(os.path.join(here, "**", "generate.sh"))
print("Testing {} commands from specialized examples".format(len(files)))
with TemporaryChDir(here):
for f in files:
subprocess.run(f, shell=True, check=True)
class TemporaryChDir:
def __init__(self, wd):
self.wd = wd
self._wd_orig = os.getcwd()
def __enter__(self):
os.chdir(self.wd)
def __exit__(self, exc_type, exc_value, tb):
os.chdir(self._wd_orig)
| apache-2.0 | 5,201,097,463,606,118,000 | 25.509434 | 77 | 0.578648 | false |
hmendozap/auto-sklearn | autosklearn/ensembles/abstract_ensemble.py | 1 | 1831 | from abc import ABCMeta, abstractmethod
class AbstractEnsemble(object):
__metaclass__ = ABCMeta
@abstractmethod
def fit(self, base_models_predictions, true_targets, model_identifiers):
"""Fit an ensemble given predictions of base models and targets.
Parameters
----------
base_models_predictions : array of shape = [n_base_models, n_data_points, n_targets]
n_targets is the number of classes in case of classification,
n_targets is 0 or 1 in case of regression
true_targets : array of shape [n_targets]
model_identifiers : identifier for each base model.
Can be used for practical text output of the ensemble.
Returns
-------
self
"""
pass
@abstractmethod
def predict(self, base_models_predictions):
"""Create ensemble predictions from the base model predictions.
Parameters
----------
base_models_predictions : array of shape = [n_base_models, n_data_points, n_targets]
Same as in the fit method.
Returns
-------
array : [n_data_points]
"""
self
@abstractmethod
def pprint_ensemble_string(self, models):
"""Return a nicely-readable representation of the ensmble.
Parameters
----------
models : dict {identifier : model object}
The identifiers are the same as the one presented to the fit()
method. Models can be used for nice printing.
Returns
-------
str
"""
@abstractmethod
def get_model_identifiers(self):
"""Return identifiers of models in the ensemble.
This includes models which have a weight of zero!
Returns
-------
list
"""
| bsd-3-clause | -5,472,394,889,960,350,000 | 25.926471 | 92 | 0.582742 | false |
tjvr/pyswip | pyswip/core.py | 1 | 28267 | # -*- coding: utf-8 -*-
# pyswip -- Python SWI-Prolog bridge
# Copyright (c) 2007-2012 Yüce Tekol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import sys
import glob
import warnings
from subprocess import Popen, PIPE
from ctypes import *
from ctypes.util import find_library
# To initialize the SWI-Prolog environment, two things need to be done: the
# first is to find where the SO/DLL is located and the second is to find the
# SWI-Prolog home, to get the saved state.
#
# The goal of the (entangled) process below is to make the library installation
# independent.
def _findSwiplPathFromFindLib():
"""
This function resorts to ctype's find_library to find the path to the
DLL. The biggest problem is that find_library does not give the path to the
resource file.
:returns:
A path to the swipl SO/DLL or None if it is not found.
:returns type:
{str, None}
"""
path = (find_library('swipl') or
find_library('pl') or
find_library('libswipl')) # This last one is for Windows
return path
def _findSwiplFromExec():
"""
This function tries to use an executable on the path to find SWI-Prolog
SO/DLL and the resource file.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
platform = sys.platform[:3]
fullName = None
swiHome = None
try: # try to get library path from swipl executable.
# We may have pl or swipl as the executable
try:
cmd = Popen(['swipl', '-dump-runtime-variables'], stdout=PIPE)
except OSError:
cmd = Popen(['pl', '-dump-runtime-variables'], stdout=PIPE)
ret = cmd.communicate()
# Parse the output into a dictionary
ret = ret[0].replace(';', '').splitlines()
ret = [line.split('=', 1) for line in ret]
rtvars = dict((name, value[1:-1]) for name, value in ret) # [1:-1] gets
# rid of the
# quotes
if rtvars['PLSHARED'] == 'no':
raise ImportError('SWI-Prolog is not installed as a shared '
'library.')
else: # PLSHARED == 'yes'
swiHome = rtvars['PLBASE'] # The environment is in PLBASE
if not os.path.exists(swiHome):
swiHome = None
# determine platform specific path
if platform == "win":
dllName = rtvars['PLLIB'][:-4] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'bin')
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
elif platform == "cyg":
# e.g. /usr/lib/pl-5.6.36/bin/i686-cygwin/cygpl.dll
dllName = 'cygpl.dll'
path = os.path.join(rtvars['PLBASE'], 'bin', rtvars['PLARCH'])
fullName = os.path.join(path, dllName)
if not os.path.exists(fullName):
fullName = None
else: # assume UNIX-like
# The SO name in some linuxes is of the form libswipl.so.5.10.2,
# so we have to use glob to find the correct one
dllName = 'lib' + rtvars['PLLIB'][2:] + '.' + rtvars['PLSOEXT']
path = os.path.join(rtvars['PLBASE'], 'lib', rtvars['PLARCH'])
baseName = os.path.join(path, dllName)
if os.path.exists(baseName):
fullName = baseName
else: # We will search for versions
pattern = baseName + '.*'
files = glob.glob(pattern)
if len(files) == 0:
fullName = None
elif len(files) == 1:
fullName = files[0]
else: # Will this ever happen?
fullName = None
except (OSError, KeyError): # KeyError from accessing rtvars
pass
return (fullName, swiHome)
def _findSwiplWin():
"""
This function uses several heuristics to gues where SWI-Prolog is installed
in Windows. It always returns None as the path of the resource file because,
in Windows, the way to find it is more robust so the SWI-Prolog DLL is
always able to find it.
:returns:
A tuple of (path to the swipl DLL, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
dllNames = ('swipl.dll', 'libswipl.dll')
# First try: check the usual installation path (this is faster but
# hardcoded)
programFiles = os.getenv('ProgramFiles')
paths = [os.path.join(programFiles, r'pl\bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
# Second try: use the find_library
path = _findSwiplPathFromFindLib()
if path is not None and os.path.exists(path):
return (path, None)
# Third try: use reg.exe to find the installation path in the registry
# (reg should be installed in all Windows XPs)
try:
cmd = Popen(['reg', 'query',
r'HKEY_LOCAL_MACHINE\Software\SWI\Prolog',
'/v', 'home'], stdout=PIPE)
ret = cmd.communicate()
# Result is like:
# ! REG.EXE VERSION 3.0
#
# HKEY_LOCAL_MACHINE\Software\SWI\Prolog
# home REG_SZ C:\Program Files\pl
# (Note: spaces may be \t or spaces in the output)
ret = ret[0].splitlines()
ret = [line for line in ret if len(line) > 0]
pattern = re.compile('[^h]*home[^R]*REG_SZ( |\t)*(.*)$')
match = pattern.match(ret[-1])
if match is not None:
path = match.group(2)
paths = [os.path.join(path, 'bin', dllName)
for dllName in dllNames]
for path in paths:
if os.path.exists(path):
return (path, None)
except OSError:
# reg.exe not found? Weird...
pass
# May the exec is on path?
(path, swiHome) = _findSwiplFromExec()
if path is not None:
return (path, swiHome)
# Last try: maybe it is in the current dir
for dllName in dllNames:
if os.path.exists(dllName):
return (dllName, None)
return (None, None)
def _findSwiplLin():
"""
This function uses several heuristics to guess where SWI-Prolog is
installed in Linuxes.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# May the exec is on path?
(path, swiHome) = _findSwiplFromExec()
if path is not None:
return (path, swiHome)
# If it is not, use find_library
path = _findSwiplPathFromFindLib()
if path is not None:
return (path, swiHome)
# Our last try: some hardcoded paths.
paths = ['/lib', '/usr/lib', '/usr/local/lib', '.', './lib']
names = ['libswipl.so', 'libpl.so']
path = None
for name in names:
for try_ in paths:
try_ = os.path.join(try_, name)
if os.path.exists(try_):
path = try_
break
if path is not None:
return (path, swiHome)
return (None, None)
def _findSwiplDar():
"""
This function uses several heuristics to gues where SWI-Prolog is
installed in MacOS.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# Help with MacOS is welcome!!
paths = ['.', './lib', '/usr/local/lib']
names = ['libswipl.dylib', 'libpl.dylib']
for name in names:
for path in paths:
path = os.path.join(path, name)
if os.path.exists(path):
return (path, None)
return (None, None)
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = _findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = _findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = _findSwiplDar()
else:
raise EnvironmentError('The platform %s is not supported by this '
'library. If you want it to be supported, '
'please open an issue.' % platform)
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome)
def _fixWindowsPath(dll):
"""
When the path to the DLL is not in Windows search path, Windows will not be
able to find other DLLs on the same directory, so we have to add it to the
path. This function takes care of it.
:parameters:
- `dll` (str) - File name of the DLL
"""
if sys.platform[:3] != 'win':
return # Nothing to do here
pathToDll = os.path.dirname(dll)
currentWindowsPath = os.getenv('PATH')
if pathToDll not in currentWindowsPath:
# We will prepend the path, to avoid conflicts between DLLs
newPath = pathToDll + ';' + currentWindowsPath
os.putenv('PATH', newPath)
class c_void(Structure):
_fields_ = [('dummy', c_int)]
c_void_p = POINTER(c_void)
# Find the path and resource file. SWI_HOME_DIR shall be treated as a constant
# by users of this module
(_path, SWI_HOME_DIR) = _findSwipl()
_fixWindowsPath(_path)
# Load the library
_lib = CDLL(_path)
# PySWIP constants
PYSWIP_MAXSTR = 1024
c_int_p = POINTER(c_int)
c_long_p = POINTER(c_long)
c_double_p = POINTER(c_double)
# constants (from SWI-Prolog.h)
# PL_unify_term() arguments
PL_VARIABLE = 1 # nothing
PL_ATOM = 2 # const char
PL_INTEGER = 3 # int
PL_FLOAT = 4 # double
PL_STRING = 5 # const char *
PL_TERM = 6 #
# PL_unify_term()
PL_FUNCTOR = 10 # functor_t, arg ...
PL_LIST = 11 # length, arg ...
PL_CHARS = 12 # const char *
PL_POINTER = 13 # void *
# /* PlArg::PlArg(text, type) */
#define PL_CODE_LIST (14) /* [ascii...] */
#define PL_CHAR_LIST (15) /* [h,e,l,l,o] */
#define PL_BOOL (16) /* PL_set_feature() */
#define PL_FUNCTOR_CHARS (17) /* PL_unify_term() */
#define _PL_PREDICATE_INDICATOR (18) /* predicate_t (Procedure) */
#define PL_SHORT (19) /* short */
#define PL_INT (20) /* int */
#define PL_LONG (21) /* long */
#define PL_DOUBLE (22) /* double */
#define PL_NCHARS (23) /* unsigned, const char * */
#define PL_UTF8_CHARS (24) /* const char * */
#define PL_UTF8_STRING (25) /* const char * */
#define PL_INT64 (26) /* int64_t */
#define PL_NUTF8_CHARS (27) /* unsigned, const char * */
#define PL_NUTF8_CODES (29) /* unsigned, const char * */
#define PL_NUTF8_STRING (30) /* unsigned, const char * */
#define PL_NWCHARS (31) /* unsigned, const wchar_t * */
#define PL_NWCODES (32) /* unsigned, const wchar_t * */
#define PL_NWSTRING (33) /* unsigned, const wchar_t * */
#define PL_MBCHARS (34) /* const char * */
#define PL_MBCODES (35) /* const char * */
#define PL_MBSTRING (36) /* const char * */
# /********************************
# * NON-DETERMINISTIC CALL/RETURN *
# *********************************/
#
# Note 1: Non-deterministic foreign functions may also use the deterministic
# return methods PL_succeed and PL_fail.
#
# Note 2: The argument to PL_retry is a 30 bits signed integer (long).
PL_FIRST_CALL = 0
PL_CUTTED = 1
PL_REDO = 2
PL_FA_NOTRACE = 0x01 # foreign cannot be traced
PL_FA_TRANSPARENT = 0x02 # foreign is module transparent
PL_FA_NONDETERMINISTIC = 0x04 # foreign is non-deterministic
PL_FA_VARARGS = 0x08 # call using t0, ac, ctx
PL_FA_CREF = 0x10 # Internal: has clause-reference */
# /*******************************
# * CALL-BACK *
# *******************************/
PL_Q_DEBUG = 0x01 # = TRUE for backward compatibility
PL_Q_NORMAL = 0x02 # normal usage
PL_Q_NODEBUG = 0x04 # use this one
PL_Q_CATCH_EXCEPTION = 0x08 # handle exceptions in C
PL_Q_PASS_EXCEPTION = 0x10 # pass to parent environment
PL_Q_DETERMINISTIC = 0x20 # call was deterministic
# /*******************************
# * BLOBS *
# *******************************/
#define PL_BLOB_MAGIC_B 0x75293a00 /* Magic to validate a blob-type */
#define PL_BLOB_VERSION 1 /* Current version */
#define PL_BLOB_MAGIC (PL_BLOB_MAGIC_B|PL_BLOB_VERSION)
#define PL_BLOB_UNIQUE 0x01 /* Blob content is unique */
#define PL_BLOB_TEXT 0x02 /* blob contains text */
#define PL_BLOB_NOCOPY 0x04 /* do not copy the data */
#define PL_BLOB_WCHAR 0x08 /* wide character string */
# /*******************************
# * CHAR BUFFERS *
# *******************************/
CVT_ATOM = 0x0001
CVT_STRING = 0x0002
CVT_LIST = 0x0004
CVT_INTEGER = 0x0008
CVT_FLOAT = 0x0010
CVT_VARIABLE = 0x0020
CVT_NUMBER = CVT_INTEGER | CVT_FLOAT
CVT_ATOMIC = CVT_NUMBER | CVT_ATOM | CVT_STRING
CVT_WRITE = 0x0040 # as of version 3.2.10
CVT_ALL = CVT_ATOMIC | CVT_LIST
CVT_MASK = 0x00ff
BUF_DISCARDABLE = 0x0000
BUF_RING = 0x0100
BUF_MALLOC = 0x0200
CVT_EXCEPTION = 0x10000 # throw exception on error
argv = (c_char_p*(len(sys.argv) + 1))()
for i, arg in enumerate(sys.argv):
argv[i] = arg
argv[-1] = None
argc = len(sys.argv)
# types
atom_t = c_ulong
term_t = c_ulong
fid_t = c_ulong
module_t = c_void_p
predicate_t = c_void_p
record_t = c_void_p
qid_t = c_ulong
PL_fid_t = c_ulong
control_t = c_void_p
PL_engine_t = c_void_p
functor_t = c_ulong
PL_atomic_t = c_ulong
foreign_t = c_ulong
pl_wchar_t = c_wchar
##_lib.PL_initialise(len(sys.argv), _argv)
PL_initialise = _lib.PL_initialise
##PL_initialise.argtypes = [c_int, c_c
PL_open_foreign_frame = _lib.PL_open_foreign_frame
PL_open_foreign_frame.restype = fid_t
PL_new_term_ref = _lib.PL_new_term_ref
PL_new_term_ref.restype = term_t
PL_new_term_refs = _lib.PL_new_term_refs
PL_new_term_refs.restype = term_t
PL_chars_to_term = _lib.PL_chars_to_term
PL_chars_to_term.argtypes = [c_char_p, term_t]
PL_call = _lib.PL_call
PL_call.argtypes = [term_t, module_t]
PL_call_predicate = _lib.PL_call_predicate
PL_call_predicate.argtypes = [module_t, c_int, predicate_t, term_t]
PL_discard_foreign_frame = _lib.PL_discard_foreign_frame
PL_discard_foreign_frame.argtypes = [fid_t]
PL_put_list_chars = _lib.PL_put_list_chars
PL_put_list_chars.argtypes = [term_t, c_char_p]
#PL_EXPORT(void) PL_register_atom(atom_t a);
PL_register_atom = _lib.PL_register_atom
#PL_EXPORT(void) PL_unregister_atom(atom_t a);
PL_unregister_atom = _lib.PL_unregister_atom
#PL_EXPORT(atom_t) PL_functor_name(functor_t f);
PL_functor_name = _lib.PL_functor_name
#PL_EXPORT(int) PL_functor_arity(functor_t f);
PL_functor_arity = _lib.PL_functor_arity
# /* Get C-values from Prolog terms */
#PL_EXPORT(int) PL_get_atom(term_t t, atom_t *a);
PL_get_atom = _lib.PL_get_atom
PL_get_atom.argtypes = [term_t, c_ulong]
#PL_EXPORT(int) PL_get_bool(term_t t, int *value);
PL_get_bool = _lib.PL_get_bool
#PL_EXPORT(int) PL_get_atom_chars(term_t t, char **a);
PL_get_atom_chars = _lib.PL_get_atom_chars # FIXME
##define PL_get_string_chars(t, s, l) PL_get_string(t,s,l)
# /* PL_get_string() is depricated */
#PL_EXPORT(int) PL_get_string(term_t t, char **s, size_t *len);
PL_get_string = _lib.PL_get_string
PL_get_string_chars = PL_get_string
#PL_get_string_chars.argtypes = [term_t, POINTER(c_char_p), c_int_p]
#PL_EXPORT(int) PL_get_chars(term_t t, char **s, unsigned int flags);
PL_get_chars = _lib.PL_get_chars # FIXME:
#PL_EXPORT(int) PL_get_list_chars(term_t l, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_atom_nchars(term_t t, size_t *len, char **a);
#PL_EXPORT(int) PL_get_list_nchars(term_t l,
# size_t *len, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_nchars(term_t t,
# size_t *len, char **s,
# unsigned int flags);
#PL_EXPORT(int) PL_get_integer(term_t t, int *i);
PL_get_integer = _lib.PL_get_integer
#PL_get_integer.argtypes = [term_t, c_int_p]
#PL_EXPORT(int) PL_get_long(term_t t, long *i);
PL_get_long = _lib.PL_get_long
#PL_get_long.argtypes = [term_t, c_long_p]
#PL_EXPORT(int) PL_get_pointer(term_t t, void **ptr);
#PL_EXPORT(int) PL_get_float(term_t t, double *f);
PL_get_float = _lib.PL_get_float
#PL_get_float.argtypes = [term_t, c_double_p]
#PL_EXPORT(int) PL_get_functor(term_t t, functor_t *f);
PL_get_functor = _lib.PL_get_functor
PL_get_functor.argtypes = [term_t, c_ulong]
#PL_get_functor.argtypes = [term_t, POINTER(functor_t)]
#PL_EXPORT(int) PL_get_name_arity(term_t t, atom_t *name, int *arity);
PL_get_name_arity = _lib.PL_get_name_arity
#PL_EXPORT(int) PL_get_module(term_t t, module_t *module);
#PL_EXPORT(int) PL_get_arg(int index, term_t t, term_t a);
PL_get_arg = _lib.PL_get_arg
#PL_EXPORT(int) PL_get_list(term_t l, term_t h, term_t t);
#PL_EXPORT(int) PL_get_head(term_t l, term_t h);
PL_get_head = _lib.PL_get_head
#PL_EXPORT(int) PL_get_tail(term_t l, term_t t);
PL_get_tail = _lib.PL_get_tail
#PL_EXPORT(int) PL_get_nil(term_t l);
PL_get_nil = _lib.PL_get_nil
#PL_EXPORT(int) PL_get_term_value(term_t t, term_value_t *v);
#PL_EXPORT(char *) PL_quote(int chr, const char *data);
PL_put_atom_chars = _lib.PL_put_atom_chars
PL_put_atom_chars.argtypes = [term_t, c_char_p]
PL_atom_chars = _lib.PL_atom_chars
PL_atom_chars.argtypes = [atom_t]
PL_atom_chars.restype = c_char_p
PL_predicate = _lib.PL_predicate
PL_predicate.argtypes = [c_char_p, c_int, c_char_p]
PL_predicate.restype = predicate_t
PL_pred = _lib.PL_pred
PL_pred.argtypes = [functor_t, module_t]
PL_pred.restype = predicate_t
PL_open_query = _lib.PL_open_query
PL_open_query.argtypes = [module_t, c_int, predicate_t, term_t]
PL_open_query.restype = qid_t
PL_next_solution = _lib.PL_next_solution
PL_next_solution.argtypes = [qid_t]
PL_copy_term_ref = _lib.PL_copy_term_ref
PL_copy_term_ref.argtypes = [term_t]
PL_copy_term_ref.restype = term_t
PL_get_list = _lib.PL_get_list
PL_get_list.argtypes = [term_t, term_t, term_t]
PL_get_chars = _lib.PL_get_chars # FIXME
PL_close_query = _lib.PL_close_query
PL_close_query.argtypes = [qid_t]
#void PL_cut_query(qid)
PL_cut_query = _lib.PL_cut_query
PL_cut_query.argtypes = [qid_t]
PL_halt = _lib.PL_halt
PL_halt.argtypes = [c_int]
# PL_EXPORT(int) PL_cleanup(int status);
PL_cleanup = _lib.PL_cleanup
PL_unify_integer = _lib.PL_unify_integer
PL_unify = _lib.PL_unify
PL_unify_arg = _lib.PL_unify_arg
# Verify types
PL_term_type = _lib.PL_term_type
PL_term_type.argtypes = [term_t]
PL_term_type.restype = c_int
PL_is_variable = _lib.PL_is_variable
PL_is_variable.argtypes = [term_t]
PL_is_variable.restype = c_int
PL_is_ground = _lib.PL_is_ground
PL_is_ground.argtypes = [term_t]
PL_is_ground.restype = c_int
PL_is_atom = _lib.PL_is_atom
PL_is_atom.argtypes = [term_t]
PL_is_atom.restype = c_int
PL_is_integer = _lib.PL_is_integer
PL_is_integer.argtypes = [term_t]
PL_is_integer.restype = c_int
PL_is_string = _lib.PL_is_string
PL_is_string.argtypes = [term_t]
PL_is_string.restype = c_int
PL_is_float = _lib.PL_is_float
PL_is_float.argtypes = [term_t]
PL_is_float.restype = c_int
#PL_is_rational = _lib.PL_is_rational
#PL_is_rational.argtypes = [term_t]
#PL_is_rational.restype = c_int
PL_is_compound = _lib.PL_is_compound
PL_is_compound.argtypes = [term_t]
PL_is_compound.restype = c_int
PL_is_functor = _lib.PL_is_functor
PL_is_functor.argtypes = [term_t, functor_t]
PL_is_functor.restype = c_int
PL_is_list = _lib.PL_is_list
PL_is_list.argtypes = [term_t]
PL_is_list.restype = c_int
PL_is_atomic = _lib.PL_is_atomic
PL_is_atomic.argtypes = [term_t]
PL_is_atomic.restype = c_int
PL_is_number = _lib.PL_is_number
PL_is_number.argtypes = [term_t]
PL_is_number.restype = c_int
# /* Assign to term-references */
#PL_EXPORT(void) PL_put_variable(term_t t);
PL_put_variable = _lib.PL_put_variable
#PL_EXPORT(void) PL_put_atom(term_t t, atom_t a);
#PL_EXPORT(void) PL_put_atom_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_string_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_list_chars(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_list_codes(term_t t, const char *chars);
#PL_EXPORT(void) PL_put_atom_nchars(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_string_nchars(term_t t, size_t len, const char *chars);
#PL_EXPORT(void) PL_put_list_nchars(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_list_ncodes(term_t t, size_t l, const char *chars);
#PL_EXPORT(void) PL_put_integer(term_t t, long i);
PL_put_integer = _lib.PL_put_integer
PL_put_integer.argtypes = [term_t, c_long]
PL_put_integer.restype = None
#PL_EXPORT(void) PL_put_pointer(term_t t, void *ptr);
#PL_EXPORT(void) PL_put_float(term_t t, double f);
#PL_EXPORT(void) PL_put_functor(term_t t, functor_t functor);
PL_put_functor = _lib.PL_put_functor
#PL_EXPORT(void) PL_put_list(term_t l);
PL_put_list = _lib.PL_put_list
#PL_EXPORT(void) PL_put_nil(term_t l);
PL_put_nil = _lib.PL_put_nil
#PL_EXPORT(void) PL_put_term(term_t t1, term_t t2);
PL_put_term = _lib.PL_put_term
# /* construct a functor or list-cell */
#PL_EXPORT(void) PL_cons_functor(term_t h, functor_t f, ...);
#class _PL_cons_functor(object):
PL_cons_functor = _lib.PL_cons_functor # FIXME:
#PL_EXPORT(void) PL_cons_functor_v(term_t h, functor_t fd, term_t a0);
PL_cons_functor_v = _lib.PL_cons_functor_v
PL_cons_functor_v.argtypes = [term_t, functor_t, term_t]
PL_cons_functor_v.restype = None
#PL_EXPORT(void) PL_cons_list(term_t l, term_t h, term_t t);
PL_cons_list = _lib.PL_cons_list
#
# term_t PL_exception(qid_t qid)
PL_exception = _lib.PL_exception
PL_exception.argtypes = [qid_t]
PL_exception.restype = term_t
#
PL_register_foreign = _lib.PL_register_foreign
#
#PL_EXPORT(atom_t) PL_new_atom(const char *s);
PL_new_atom = _lib.PL_new_atom
PL_new_atom.argtypes = [c_char_p]
PL_new_atom.restype = atom_t
#PL_EXPORT(functor_t) PL_new_functor(atom_t f, int a);
PL_new_functor = _lib.PL_new_functor
PL_new_functor.argtypes = [atom_t, c_int]
PL_new_functor.restype = functor_t
# /*******************************
# * RECORDED DATABASE *
# *******************************/
#
#PL_EXPORT(record_t) PL_record(term_t term);
PL_record = _lib.PL_record
#PL_EXPORT(void) PL_recorded(record_t record, term_t term);
PL_recorded = _lib.PL_recorded
#PL_EXPORT(void) PL_erase(record_t record);
PL_erase = _lib.PL_erase
#
#PL_EXPORT(char *) PL_record_external(term_t t, size_t *size);
#PL_EXPORT(int) PL_recorded_external(const char *rec, term_t term);
#PL_EXPORT(int) PL_erase_external(char *rec);
PL_new_module = _lib.PL_new_module
PL_new_module.argtypes = [atom_t]
PL_new_module.restype = module_t
intptr_t = c_long
ssize_t = intptr_t
wint_t = c_uint
#typedef struct
#{
# int __count;
# union
# {
# wint_t __wch;
# char __wchb[4];
# } __value; /* Value so far. */
#} __mbstate_t;
class _mbstate_t_value(Union):
_fields_ = [("__wch",wint_t),
("__wchb",c_char*4)]
class mbstate_t(Structure):
_fields_ = [("__count",c_int),
("__value",_mbstate_t_value)]
# stream related funcs
Sread_function = CFUNCTYPE(ssize_t, c_void_p, c_char_p, c_size_t)
Swrite_function = CFUNCTYPE(ssize_t, c_void_p, c_char_p, c_size_t)
Sseek_function = CFUNCTYPE(c_long, c_void_p, c_long, c_int)
Sseek64_function = CFUNCTYPE(c_int64, c_void_p, c_int64, c_int)
Sclose_function = CFUNCTYPE(c_int, c_void_p)
Scontrol_function = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p)
# IOLOCK
IOLOCK = c_void_p
# IOFUNCTIONS
class IOFUNCTIONS(Structure):
_fields_ = [("read",Sread_function),
("write",Swrite_function),
("seek",Sseek_function),
("close",Sclose_function),
("seek64",Sseek64_function),
("reserved",intptr_t*2)]
# IOENC
ENC_UNKNOWN,ENC_OCTET,ENC_ASCII,ENC_ISO_LATIN_1,ENC_ANSI,ENC_UTF8,ENC_UNICODE_BE,ENC_UNICODE_LE,ENC_WCHAR = range(9)
IOENC = c_int
# IOPOS
class IOPOS(Structure):
_fields_ = [("byteno",c_int64),
("charno",c_int64),
("lineno",c_int),
("linepos",c_int),
("reserved", intptr_t*2)]
# IOSTREAM
class IOSTREAM(Structure):
_fields_ = [("bufp",c_char_p),
("limitp",c_char_p),
("buffer",c_char_p),
("unbuffer",c_char_p),
("lastc",c_int),
("magic",c_int),
("bufsize",c_int),
("flags",c_int),
("posbuf",IOPOS),
("position",POINTER(IOPOS)),
("handle",c_void_p),
("functions",IOFUNCTIONS),
("locks",c_int),
("mutex",IOLOCK),
("closure_hook",CFUNCTYPE(None, c_void_p)),
("closure",c_void_p),
("timeout",c_int),
("message",c_char_p),
("encoding",IOENC)]
IOSTREAM._fields_.extend([("tee",IOSTREAM),
("mbstate",POINTER(mbstate_t)),
("reserved",intptr_t*6)])
#PL_EXPORT(IOSTREAM *) Sopen_string(IOSTREAM *s, char *buf, size_t sz, const char *m);
Sopen_string = _lib.Sopen_string
Sopen_string.argtypes = [POINTER(IOSTREAM), c_char_p, c_size_t, c_char_p]
Sopen_string.restype = POINTER(IOSTREAM)
#PL_EXPORT(int) Sclose(IOSTREAM *s);
Sclose = _lib.Sclose
Sclose.argtypes = [POINTER(IOSTREAM)]
#PL_EXPORT(int) PL_unify_stream(term_t t, IOSTREAM *s);
PL_unify_stream = _lib.PL_unify_stream
PL_unify_stream.argtypes = [term_t, POINTER(IOSTREAM)]
| mit | -6,129,401,683,637,074,000 | 31.047619 | 116 | 0.602597 | false |
hijoe320/RSSNewsBot | rssnewsbot/spiders/rssspider.py | 1 | 2314 | from time import sleep, gmtime, mktime
from datetime import datetime
import logging
import scrapy
import redis
import msgpack
import xxhash
import pymongo as pm
import feedparser as fp
from colorama import Back, Fore, Style
from ..settings import MONGODB_URI, REDIS_HOST, REDIS_PORT, REDIS_PWD, REDIS_PENDING_QUEUE
def hs(s):
"""
hash function to convert url to fixed length hash code
"""
return xxhash.xxh32(s).hexdigest()
def time2ts(time_struct):
"""
convert time_struct to epoch
"""
return mktime(time_struct)
class RSSSpider(scrapy.Spider):
name = "rssspider"
def __init__(self, *args, **kwargs):
super(RSSSpider, self).__init__(*args, **kwargs)
self.rc = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD)
self.df = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD, db=REDIS_DUPFLT_DB)
self.mc = pm.MongoClient(host=MONGODB_URI, connect=False)
def start_requests(self):
with self.mc.rssnews.feed.find() as cursor:
logging.info("number of rss feeds = %d", cursor.count())
for item in cursor:
logging.debug("rss=%(url)s", item)
yield scrapy.Request(url=item["url"], callback=self.parse, meta=item)
def parse(self, res):
logging.debug("%sparsing %s%s", Fore.GREEN, res.url, Style.RESET_ALL)
rss = fp.parse(res.body)
symbol = res.meta["symbol"]
for e in rss.entries:
if self.check_exist(e.link):
continue
if '*' in e.link:
url = "http" + e.link.split("*http")[-1]
self.append_task(e, url)
elif e.link.startswith("http://finance.yahoo.com/r/"):
yield scrapy.Request(url=e.link, callback=self.extract_url, meta=e)
else:
self.append_task(e, e.link)
def extract_url(self, res):
if res.body.startswith("<script src="):
url = res.body.split("URL=\'")[-1].split("\'")[0]
self.append_task(res.meta, url)
else:
pass
def check_exist(self, url):
return self.df.get(url)
def append_task(self, entry, url):
self.df.set(url, True, ex=3600)
self.rc.append(PENDING_QUEUE, msgpack.packb(task))
| mit | -9,142,137,552,510,368,000 | 31.591549 | 103 | 0.601988 | false |
ydkhatri/mac_apt | plugins/msoffice.py | 1 | 20161 | '''
Copyright (c) 2017 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
'''
import logging
import sqlite3
import struct
from os import path
from plistutils.alias import AliasParser
from plugins.helpers.common import CommonFunctions
from plugins.helpers.macinfo import *
from plugins.helpers.writer import *
from plugins.helpers.bookmark import *
__Plugin_Name = "MSOFFICE"
__Plugin_Friendly_Name = "MSOffice"
__Plugin_Version = "1.0"
__Plugin_Description = "Reads Word, Excel, Powerpoint and other office MRU/accessed file paths"
__Plugin_Author = "Yogesh Khatri"
__Plugin_Author_Email = "[email protected]"
__Plugin_Modes = "MACOS,ARTIFACTONLY"
__Plugin_ArtifactOnly_Usage = 'Provide any of the office plists from ~/Library/Preferences/com.microsoft.*.plist '\
' '
log = logging.getLogger('MAIN.' + __Plugin_Name) # Do not rename or remove this ! This is the logger object
#---- Do not change the variable names in above section ----#
# Gets data from the following files:
# ~/Library/Preferences/com.microsoft.office.plist
# ~/Library/Containers/com.microsoft.<OFFICEAPP>/Data/Library/Preferences/com.microsoft.<APP>.plist
# ~/Library/Containers/com.microsoft.<OFFICEAPP>/Data/Library/Preferences/com.microsoft.<APP>.securebookmarks.plist
#
# And the registry database at
# ~/Library/Group Containers/xxxxxx.Office/MicrosoftRegistrationDB.reg
def GetStringRepresentation(value, valuetype = None):
s = ''
if value == None:
return s
if valuetype == 3: # REG_BINARY
s = value.hex().upper()
elif valuetype == 1: #REG_SZ
s = value
else:
s = str(value)
return s
# ONLY 1,3,4,11 have been seen so far.
def GetStringValueType(valuetype):
s = ''
if valuetype == None or valuetype == '':
return s
elif valuetype == 1: s = "REG_SZ"
elif valuetype == 3: s = "REG_BINARY"
elif valuetype == 4: s = "REG_DWORD"
elif valuetype == 11: s = "REG_QWORD"
elif valuetype == 2: s = "REG_EXPAND_SZ"
elif valuetype == 5: s = "REG_DWORD_BIG_ENDIAN"
elif valuetype == 6: s = "REG_LINK"
elif valuetype == 7: s = "REG_MULTI_SZ"
elif valuetype == 8: s = "REG_RESOURCE_LIST"
elif valuetype == 9: s = "REG_FULL_RESOURCE_DESCRIPTOR"
elif valuetype == 10: s = "REG_RESOURCE_REQUIREMENTS_LIST"
else:
s = str(value)
return s
def GetUint64Value(value):
if value != None:
try:
v = struct.unpack('<Q', value[0:8])[0]
return v
except (IndexError, struct.error, ValueError):
log.exception('')
return None
def OpenDbFromImage(mac_info, inputPath, user):
'''Returns tuple of (connection, wrapper_obj)'''
log.info ("Processing office registry entries for user '{}' from file {}".format(user, inputPath))
try:
sqlite = SqliteWrapper(mac_info)
conn = sqlite.connect(inputPath)
if conn:
log.debug ("Opened database successfully")
return conn, sqlite
except sqlite3.Error as ex:
log.exception ("Failed to open database, is it a valid DB?")
return None, None
def OpenDb(inputPath):
log.info ("Processing file " + inputPath)
try:
conn = CommonFunctions.open_sqlite_db_readonly(inputPath)
log.debug ("Opened database successfully")
return conn
except sqlite3.Error:
log.exception ("Failed to open database, is it a valid DB?")
return None
def ParseRegistrationDB(conn,office_reg_items, user, source):
conn.row_factory = sqlite3.Row
try:
query = str("SELECT t2.node_id as id, t2.write_time as keyLastWriteTime, path as key, HKEY_CURRENT_USER_values.name as valueName, HKEY_CURRENT_USER_values.value as value, HKEY_CURRENT_USER_values.type as valueType from ( "
" WITH RECURSIVE "
" under_software(path, name, node_id, write_time) AS ( "
" VALUES('Software','',1, NULL) "
" UNION ALL "
" SELECT under_software.path || '\\' || HKEY_CURRENT_USER.name, HKEY_CURRENT_USER.name, HKEY_CURRENT_USER.node_id, HKEY_CURRENT_USER.write_time "
" FROM HKEY_CURRENT_USER JOIN under_software ON HKEY_CURRENT_USER.parent_id=under_software.node_id "
" ORDER BY 1 "
" ) "
" SELECT name, path, write_time, node_id FROM under_software "
" ) as t2 LEFT JOIN HKEY_CURRENT_USER_values on HKEY_CURRENT_USER_values.node_id=t2.node_id ")
cursor = conn.execute(query)
data = cursor.fetchall()
try:
for row in data:
item = MSOfficeRegItem(row['id'],
CommonFunctions.ReadWindowsFileTime(GetUint64Value(row['keyLastWriteTime'])),
GetStringRepresentation(row['key']),
GetStringValueType(row['valueType']),
GetStringRepresentation(row['valueName']),
GetStringRepresentation(row['value'], row['valueType']),
user, source)
office_reg_items.append(item)
except (sqlite3.Error, ValueError, IndexError):
log.exception('')
except sqlite3.Error as ex:
log.exception('Error executing query : {}'.format(query))
class MSOfficeRegItem:
def __init__(self, id, ts, key, v_type, v_name, v_data, user, source):
self.id = id
self.ts = ts
self.key = key
self.v_type = v_type
self.v_name = v_name
self.v_data = v_data
self.user = user
self.source = source
def PrintRegItems(office_items, output_params):
office_info = [ ('Id',DataType.INTEGER),('TimeStamp',DataType.DATE),('KeyPath',DataType.TEXT),
('ValueName',DataType.TEXT),('ValueType',DataType.TEXT),('ValueData',DataType.TEXT),
('User', DataType.TEXT),('Source',DataType.TEXT)
]
log.info (str(len(office_items)) + " office item(s) found")
office_list = []
for q in office_items:
q_item = [ q.id, q.ts, q.key, q.v_name, q.v_type, q.v_data,
q.user, q.source
]
office_list.append(q_item)
WriteList("office registry data", "MSOfficeRegistry", office_list, office_info, output_params, '')
class MSOfficeItem:
def __init__(self, office_app, timestamp, name, data, info, user, source):
self.office_app = office_app
self.timestamp = timestamp
self.name = name
self.data = data
self.info = info
self.user = user
self.source_file = source
def PrintItems(office_items, output_params):
office_info = [ ('App',DataType.TEXT),('TimeStamp',DataType.DATE),('Name',DataType.TEXT),
('Data',DataType.TEXT),('Info',DataType.TEXT),
('User', DataType.TEXT),('Source',DataType.TEXT)
]
log.info (str(len(office_items)) + " office item(s) found")
office_list = []
for q in office_items:
q_item = [ q.office_app, q.timestamp, q.name, q.data, q.info,
q.user, q.source_file
]
office_list.append(q_item)
WriteList("office information", "MSOffice", office_list, office_info, output_params, '')
def ProcessMRU(office_items, app_name, mru_list, user, source):
for mru in mru_list:
try:
access_data = mru.get('Access Date', '')
access_time = None
try:
v = struct.unpack('<I', access_data[2:6])[0]
access_time = CommonFunctions.ReadMacHFSTime(v)
except (IndexError, ValueError):
log.exception('')
path = ''
alias_data = mru.get('File Alias', None)
if alias_data:
try:
alias_properties = next(AliasParser.parse(source, 0, alias_data))
#log.debug(alias_properties)
path = alias_properties.get('path', '')
except (IndexError, ValueError, KeyError, TypeError):
log.exception('')
o_item = MSOfficeItem(app_name, access_time, 'MRU', path, '', user, source)
office_items.append(o_item)
except (ValueError, TypeError):
log.exception('')
def ProcessOfficeAppPlist(plist, office_items, app_name, user, source):
for item in ('NSNavLastRootDirectory', 'SessionStartTime', 'SessionDuration'): # SessionStartTime is string, stored as local time?
item_val = plist.get(item, None)
if item_val:
info = ''
if item == 'SessionDuration':
pass # Get item_val in HH:MM:SS
elif item == 'SessionStartTime': info = 'Local time?'
o_item = MSOfficeItem(app_name, None, item, item_val, info, user, source)
office_items.append(o_item)
bookmark_data = plist.get('LastSaveFilePathBookmark', None)
if bookmark_data:
file_path = ''
file_creation_date = None
vol_path = ''
try:
bm = Bookmark.from_bytes(bookmark_data)
# Get full file path
vol_path = bm.tocs[0][1].get(BookmarkKey.VolumePath, '')
vol_creation_date = bm.tocs[0][1].get(BookmarkKey.VolumeCreationDate, '')
file_path = bm.tocs[0][1].get(BookmarkKey.Path, [])
file_path = '/' + '/'.join(file_path)
file_creation_date = bm.tocs[0][1].get(BookmarkKey.FileCreationDate, '')
if vol_path and (not file_path.startswith(vol_path)):
file_path += vol_path
if user == '': # in artifact_only mode
try:
user = bm.tocs[0][1].get(BookmarkKey.UserName, '')
if user == 'unknown':
user = ''
except (IndexError, ValueError):
pass
o_item = MSOfficeItem(app_name, file_creation_date, 'LastSaveFilePathBookmark', file_path, 'Date is FileCreated', user, source)
office_items.append(o_item)
except (IndexError, ValueError):
log.exception('Error processing BookmarkData from {}'.format(source))
log.debug(bm)
def ProcessOfficeAppSecureBookmarksPlist(plist, office_items, app_name, user, source):
'''Process com.microsoft.<APP>.securebookmarks.plist'''
for k, v in plist.items():
data = v.get('kBookmarkDataKey', None)
file_creation_date = None
try:
bm = Bookmark.from_bytes(data)
file_creation_date = bm.tocs[0][1].get(BookmarkKey.FileCreationDate, '')
if user == '': # in artifact_only mode
try:
user = bm.tocs[0][1].get(BookmarkKey.UserName, '')
if user == 'unknown':
user = ''
except (IndexError, ValueError):
pass
except (IndexError, ValueError):
log.exception('Error processing BookmarkData from {}'.format(source))
log.debug(bm)
o_item = MSOfficeItem(app_name, file_creation_date, 'SecureBookmark', k, 'Date is FileCreated', user, source)
office_items.append(o_item)
def ProcessOfficePlist(plist, office_items, user, source):
for item in ('UserName', 'UserInitials', 'UserOrganization'):
item_val = plist.get('14\\UserInfo\\{}'.format(item), None)
if item_val:
o_item = MSOfficeItem('', None, item, item_val, '', user, source)
office_items.append(o_item)
for item in plist:
if item.startswith('14\\Web\\TypedURLs\\url'):
o_item = MSOfficeItem('', None, 'TypedURLs', plist[item], '', user, source)
office_items.append(o_item)
elif item.find('Most Recent MRU File Name') > 0:
o_app = ''
try:
o_app = item[0:-26].split('\\')[-1]
except (IndexError, ValueError):
pass
o_item = MSOfficeItem(o_app, None, item, plist[item], '', user, source)
office_items.append(o_item)
mru_list = plist.get('14\\File MRU\\XCEL', None)
if mru_list and len(mru_list):
ProcessMRU(office_items, 'Excel', mru_list, user, source)
mru_list = plist.get('14\\File MRU\\MSWD', None)
if mru_list and len(mru_list):
ProcessMRU(office_items, 'Word', mru_list, user, source)
mru_list = plist.get('14\\File MRU\\PPT3', None)
if mru_list and len(mru_list):
ProcessMRU(office_items, 'Powerpoint', mru_list, user, source)
def ProcessAppPlists(mac_info, home_dir, office_items, user, source):
# ~\Library\Containers\com.microsoft.<OFFICEAPP>\Data\Library\Preferences\com.microsoft.<APP>.plist
app_container_path = '{}/Library/Containers'
path_partial = app_container_path.format(home_dir)
if mac_info.IsValidFilePath(path_partial):
folders_list = mac_info.ListItemsInFolder(path_partial, EntryType.FOLDERS, False)
for folder in folders_list:
if folder['name'].startswith('com.microsoft.'):
name = folder['name']
app_name = name[14:]
plist_path = path_partial + '/' + name + '/Data/Library/Preferences/' + name + '.plist'
if mac_info.IsValidFilePath(plist_path):
mac_info.ExportFile(plist_path, __Plugin_Name, user_name, False)
success, plist, error = mac_info.ReadPlist(plist_path)
if success:
ProcessOfficeAppPlist(plist, office_items, app_name, user, source)
else:
log.error("Problem reading plist {} - {}".format(plist_path, error))
#securebookmarks
plist_path = path_partial + '/' + name + '/Data/Library/Preferences/' + name + '.securebookmarks.plist'
if mac_info.IsValidFilePath(plist_path):
mac_info.ExportFile(plist_path, __Plugin_Name, user_name, False)
success, plist, error = mac_info.ReadPlist(plist_path)
if success:
ProcessOfficeAppSecureBookmarksPlist(plist, office_items, app_name, user, source)
else:
log.error("Problem reading plist {} - {}".format(plist_path, error))
def Plugin_Start(mac_info):
'''Main Entry point function for plugin'''
office_items = []
office_reg_items = []
processed_paths = set()
office_plist_path = '{}/Library/Preferences/com.microsoft.office.plist'
office_reg_path_partial = '{}/Library/Group Containers' # /xxxx.Office/MicrosoftRegistrationDB.reg
for user in mac_info.users:
user_name = user.user_name
if user.home_dir == '/private/var/empty': continue # Optimization, nothing should be here!
elif user.home_dir == '/private/var/root': user_name = 'root' # Some other users use the same root folder, we will list such all users as 'root', as there is no way to tell
if user.home_dir in processed_paths: continue # Avoid processing same folder twice (some users have same folder! (Eg: root & daemon))
processed_paths.add(user.home_dir)
plist_path = office_plist_path.format(user.home_dir)
if mac_info.IsValidFilePath(plist_path):
mac_info.ExportFile(plist_path, __Plugin_Name, user_name, False)
success, plist, error = mac_info.ReadPlist(plist_path)
if success:
ProcessOfficePlist(plist, office_items, user_name, plist_path)
else:
log.error("Problem reading plist {} - {}".format(plist_path, error))
reg_path_partial = office_reg_path_partial.format(user.home_dir)
if mac_info.IsValidFolderPath(reg_path_partial):
folders_list = mac_info.ListItemsInFolder(reg_path_partial, EntryType.FOLDERS, False)
for folder in folders_list:
if folder['name'].endswith('.Office'):
reg_path = reg_path_partial + '/' + folder['name'] + '/MicrosoftRegistrationDB.reg'
if mac_info.IsValidFilePath(reg_path):
if mac_info.IsSymbolicLink(reg_path): # sometimes this is a symlink
target_path = mac_info.ReadSymLinkTargetPath(reg_path)
log.debug('SYMLINK {} <==> {}'.format(reg_path, target_path))
if target_path.startswith('../') or target_path.startswith('./'):
reg_path = mac_info.GetAbsolutePath(posixpath.split(reg_path)[0], target_path)
else:
reg_path = target_path
if not mac_info.IsValidFilePath(reg_path):
log.error(f"symlink did not point to a valid file?? path={reg_path}")
continue
mac_info.ExportFile(reg_path, __Plugin_Name, user_name, False)
conn, wrapper = OpenDbFromImage(mac_info, reg_path, user_name)
if conn:
ParseRegistrationDB(conn, office_reg_items, user_name, reg_path)
conn.close()
else:
log.debug('MicrosoftRegistrationDB.reg not found in path ' + reg_path_partial + '/' + folder['name'])
if len(office_items) > 0:
PrintItems(office_items, mac_info.output_params)
else:
log.info('No office items found')
if len(office_reg_items) > 0:
PrintRegItems(office_reg_items, mac_info.output_params)
else:
log.info('No office registries found')
def Plugin_Start_Standalone(input_files_list, output_params):
log.info("Module Started as standalone")
for input_path in input_files_list:
log.debug("Input file passed was: " + input_path)
office_items = []
office_reg_items = []
if input_path.endswith('com.microsoft.office.plist'):
success, plist, error = CommonFunctions.ReadPlist(input_path)
if success:
ProcessOfficePlist(plist, office_items, '', input_path)
else:
log.error('Failed to read file: {}. {}'.format(input_path, error))
else:
basename = path.basename(input_path)
if basename.startswith('com.microsoft.') and basename.endswith('.plist'):
success, plist, error = CommonFunctions.ReadPlist(input_path)
if success:
if basename.endswith('securebookmarks.plist'):
app_name = basename[14:-22]
ProcessOfficeAppSecureBookmarksPlist(plist, office_items, app_name, '', input_path)
else:
app_name = basename[14:-6]
ProcessOfficeAppPlist(plist, office_items, app_name, '', input_path)
else:
log.error('Failed to read file: {}. {}'.format(input_path, error))
elif input_path.endswith('MicrosoftRegistrationDB.reg'):
conn = OpenDb(input_path)
if conn:
ParseRegistrationDB(conn, office_reg_items, '', input_path)
conn.close()
if len(office_items) > 0:
PrintItems(office_items, output_params)
else:
log.info('No office items found in {}'.format(input_path))
if len(office_reg_items) > 0:
PrintRegItems(office_reg_items, output_params)
else:
log.info('No office registries found')
if __name__ == '__main__':
print ("This plugin is a part of a framework and does not run independently on its own!") | mit | 1,772,681,161,322,302,500 | 43.7051 | 231 | 0.576013 | false |
jojoblaze/Woodstock | data/templates/signup.mako.py | 1 | 3819 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 5
_modified_time = 1298770751.096385
_template_filename='/home/jojo/pylons_projects/woodstock/woodstock/templates/signup.mako'
_template_uri='/signup.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
from webhelpers.html import escape
_exports = ['javascriptIncludes', 'body', 'head', 'title', 'styleSheetIncludes', 'documentReady']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/base.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n')
# SOURCE LINE 5
__M_writer(u'\n\n')
# SOURCE LINE 8
__M_writer(u'\n\n')
# SOURCE LINE 13
__M_writer(u'\n\n')
# SOURCE LINE 19
__M_writer(u'\n\n')
# SOURCE LINE 24
__M_writer(u'\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascriptIncludes(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 15
__M_writer(u'\n <script type="text/javascript" src="/scripts/js/jquery-ui-1.8.6.custom.min.js"></script>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_body(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 26
__M_writer(u'\n\n<div><h3>Sign Up!</h3></div>\n<div>\n <form name="edit_user_form" method="post" action="/home/register_user" enctype="multipart/form-data">\n <div> \n <span>Name: </span><input type="text" name="name" />\n </div>\n <div> \n <span>Password: </span><input type="text" name="password" />\n </div>\n <div> \n <span>E-mail: </span><input type="text" name="email" />\n </div>\n <br/>\n <div> \n <input type="reset" name="cancel" value="Cancel" OnClick="javascript:window.location = \'/users/index\'" />\n <input type="submit" name="submit" value="Save" />\n </div>\n </form>\n</div>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_head(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 7
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer(u'\n Menu\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_styleSheetIncludes(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 10
__M_writer(u'\n \n')
return ''
finally:
context.caller_stack._pop_frame()
def render_documentReady(context):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 21
__M_writer(u'\n \n')
return ''
finally:
context.caller_stack._pop_frame()
| gpl-3.0 | -8,944,819,669,884,613,000 | 32.5 | 743 | 0.581042 | false |
MartinSoto/Seamless | src/plugins/lirc.py | 1 | 1844 | # Seamless DVD Player
# Copyright (C) 2004 Martin Soto <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import sys, os, popen2, signal
import traceback
import gobject
class Plugin(object):
def __init__(self, mainUi):
self.mainUi = mainUi
self.conn = popen2.Popen3('ircat seamless')
self.cmdFile = self.conn.fromchild
self.sourceId = gobject.io_add_watch(self.cmdFile,
gobject.IO_IN,
self.readData)
def readData(self, source, condition):
cmd = self.cmdFile.readline()[:-1]
if cmd != 'off' and cmd != 'leave':
try:
getattr(self.mainUi.getPlayer(), cmd)()
except:
traceback.print_exc()
return True
else:
self.mainUi.shutdown()
return False
def close(self):
gobject.source_remove(self.sourceId)
# Kill the ircat process explicitly. Otherwise, this program
# will hang forever.
os.kill(self.conn.pid, signal.SIGTERM)
os.waitpid(self.conn.pid, 0)
| gpl-2.0 | 4,236,236,327,357,102,600 | 33.148148 | 69 | 0.62961 | false |
yahoo/TensorFlowOnSpark | tensorflowonspark/TFManager.py | 1 | 2236 | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
from multiprocessing.managers import BaseManager
from multiprocessing import JoinableQueue
class TFManager(BaseManager):
"""Python multiprocessing.Manager for distributed, multi-process communication."""
pass
# global to each Spark executor's python worker
mgr = None # TFManager
qdict = {} # dictionary of queues
kdict = {} # dictionary of key-values
def _get(key):
return kdict[key]
def _set(key, value):
kdict[key] = value
def _get_queue(qname):
try:
return qdict[qname]
except KeyError:
return None
def start(authkey, queues, mode='local'):
"""Create a new multiprocess.Manager (or return existing one).
Args:
:authkey: string authorization key
:queues: *INTERNAL_USE*
:mode: 'local' indicates that the manager will only be accessible from the same host, otherwise remotely accessible.
Returns:
A TFManager instance, which is also cached in local memory of the Python worker process.
"""
global mgr, qdict, kdict
qdict.clear()
kdict.clear()
for q in queues:
qdict[q] = JoinableQueue()
TFManager.register('get_queue', callable=lambda qname: _get_queue(qname))
TFManager.register('get', callable=lambda key: _get(key))
TFManager.register('set', callable=lambda key, value: _set(key, value))
if mode == 'remote':
mgr = TFManager(address=('', 0), authkey=authkey)
else:
mgr = TFManager(authkey=authkey)
mgr.start()
return mgr
def connect(address, authkey):
"""Connect to a multiprocess.Manager.
Args:
:address: unique address to the TFManager, either a unique connection string for 'local', or a (host, port) tuple for remote.
:authkey: string authorization key
Returns:
A TFManager instance referencing the remote TFManager at the supplied address.
"""
TFManager.register('get_queue')
TFManager.register('get')
TFManager.register('set')
m = TFManager(address, authkey=authkey)
m.connect()
return m
| apache-2.0 | -1,944,213,321,138,364,400 | 25.939759 | 129 | 0.711538 | false |
mohlerm/hotspot_cached_profiles | .mx.jvmci/mx_jvmci.py | 1 | 37568 | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, shutil, zipfile, re, time, sys, datetime, platform
from os.path import join, exists, dirname, isdir
from argparse import ArgumentParser, REMAINDER
import StringIO
import xml.dom.minidom
import subprocess
import mx
import mx_gate
import mx_unittest
from mx_gate import Task
from mx_unittest import unittest
_suite = mx.suite('jvmci')
JVMCI_VERSION = 9
"""
Top level directory of the JDK source workspace.
"""
_jdkSourceRoot = dirname(_suite.dir)
_JVMCI_JDK_TAG = 'jvmci'
_minVersion = mx.VersionSpec('1.9')
# max version (first _unsupported_ version)
_untilVersion = None
_jvmciModes = {
'hosted' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI'],
'jit' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler'],
'disabled' : []
}
# TODO: can optimized be built without overriding release build?
_jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
# TODO: add client once/if it can be built on 64-bit platforms
_jdkJvmVariants = ['server']
"""
Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
"""
_legacyVmbuilds = {
'product' : 'release',
'debug' : 'slowdebug'
}
"""
Translates a mx_jvmci:8 --vmbuild value to a mx_jvmci:9 --jdk-debug-level value.
"""
def _translateLegacyDebugLevel(debugLevel):
return _legacyVmbuilds.get(debugLevel, debugLevel)
"""
Translation table from mx_jvmci:8 --vm values to mx_jvmci:9 (--jdk-jvm-variant, --jvmci-mode) tuples.
"""
_legacyVms = {
'jvmci' : ('server', 'jit')
}
"""
A VM configuration composed of a JDK debug level, JVM variant and a JVMCI mode.
This is also a context manager that can be used with the 'with' statement to set/change
a VM configuration within a dynamic scope. For example:
with ConfiguredJDK(debugLevel='fastdebug'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
self.update(jvmVariant, debugLevel, jvmciMode)
def update(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
if jvmVariant in _legacyVms:
# Backwards compatibility for mx_jvmci:8 API
jvmVariant, newJvmciMode = _legacyVms[jvmVariant]
if jvmciMode is not None and jvmciMode != newJvmciMode:
mx.abort('JVM variant "' + jvmVariant + '" implies JVMCI mode "' + newJvmciMode +
'" which conflicts with explicitly specified JVMCI mode of "' + jvmciMode + '"')
jvmciMode = newJvmciMode
debugLevel = _translateLegacyDebugLevel(debugLevel)
assert jvmVariant is None or jvmVariant in _jdkJvmVariants, jvmVariant
assert debugLevel is None or debugLevel in _jdkDebugLevels, debugLevel
assert jvmciMode is None or jvmciMode in _jvmciModes, jvmciMode
self.jvmVariant = jvmVariant or _vm.jvmVariant
self.debugLevel = debugLevel or _vm.debugLevel
self.jvmciMode = jvmciMode or _vm.jvmciMode
def __enter__(self):
global _vm
self.previousVm = _vm
_vm = self
def __exit__(self, exc_type, exc_value, traceback):
global _vm
_vm = self.previousVm
_vm = VM(jvmVariant=_jdkJvmVariants[0], debugLevel=_jdkDebugLevels[0], jvmciMode='hosted')
def get_vm():
"""
Gets the configured VM.
"""
return _vm
def relativeVmLibDirInJdk():
mxos = mx.get_os()
if mxos == 'darwin':
return join('lib')
if mxos == 'windows' or mxos == 'cygwin':
return join('bin')
return join('lib', mx.get_arch())
def isJVMCIEnabled(vm):
assert vm in _jdkJvmVariants
return True
class JvmciJDKDeployedDist(object):
def __init__(self, name, compilers=False):
self._name = name
self._compilers = compilers
def dist(self):
return mx.distribution(self._name)
def deploy(self, jdkDir):
mx.nyi('deploy', self)
def post_parse_cmd_line(self):
self.set_archiveparticipant()
def set_archiveparticipant(self):
dist = self.dist()
dist.set_archiveparticipant(JVMCIArchiveParticipant(dist))
class ExtJDKDeployedDist(JvmciJDKDeployedDist):
def __init__(self, name):
JvmciJDKDeployedDist.__init__(self, name)
"""
The monolithic JVMCI distribution is deployed through use of -Xbootclasspath/p
so that it's not necessary to run JDK make after editing JVMCI sources.
The latter causes all JDK Java sources to be rebuilt since JVMCI is
(currently) in java.base.
"""
_monolithicJvmci = JvmciJDKDeployedDist('JVMCI')
"""
List of distributions that are deployed on the boot class path.
Note: In jvmci-8, they were deployed directly into the JDK directory.
"""
jdkDeployedDists = [_monolithicJvmci]
def _makehelp():
return subprocess.check_output([mx.gmake_cmd(), 'help'], cwd=_jdkSourceRoot)
def _runmake(args):
"""run the JDK make process
To build hotspot and import it into the JDK: "mx make hotspot import-hotspot"
{0}"""
jdkBuildDir = _get_jdk_build_dir()
if not exists(jdkBuildDir):
# JDK9 must be bootstrapped with a JDK8
compliance = mx.JavaCompliance('8')
jdk8 = mx.get_jdk(compliance.exactMatch, versionDescription=compliance.value)
cmd = ['sh', 'configure', '--with-debug-level=' + _vm.debugLevel, '--with-native-debug-symbols=none', '--disable-precompiled-headers',
'--with-jvm-variants=' + _vm.jvmVariant, '--disable-warnings-as-errors', '--with-boot-jdk=' + jdk8.home]
mx.run(cmd, cwd=_jdkSourceRoot)
cmd = [mx.gmake_cmd(), 'CONF=' + _vm.debugLevel]
if mx.get_opts().verbose:
cmd.append('LOG=debug')
cmd.extend(args)
if mx.get_opts().use_jdk_image and 'images' not in args:
cmd.append('images')
if not mx.get_opts().verbose:
mx.log('--------------- make execution ----------------------')
mx.log('Working directory: ' + _jdkSourceRoot)
mx.log('Command line: ' + ' '.join(cmd))
mx.log('-----------------------------------------------------')
mx.run(cmd, cwd=_jdkSourceRoot)
if 'images' in cmd:
jdkImageDir = join(jdkBuildDir, 'images', 'jdk')
# The OpenJDK build creates an empty cacerts file so copy one from
# the default JDK (which is assumed to be an OracleJDK)
srcCerts = join(mx.get_jdk(tag='default').home, 'jre', 'lib', 'security', 'cacerts')
dstCerts = join(jdkImageDir, 'lib', 'security', 'cacerts')
shutil.copyfile(srcCerts, dstCerts)
_create_jdk_bundle(jdkBuildDir, _vm.debugLevel, jdkImageDir)
def _get_jdk_bundle_arches():
"""
Gets a list of names that will be the part of a JDK bundle's file name denoting the architecture.
The first element in the list is the canonical name. Symlinks should be created for the
remaining names.
"""
cpu = mx.get_arch()
if cpu == 'amd64':
return ['x64', 'x86_64', 'amd64']
elif cpu == 'sparcv9':
return ['sparcv9']
mx.abort('Unsupported JDK bundle arch: ' + cpu)
def _create_jdk_bundle(jdkBuildDir, debugLevel, jdkImageDir):
"""
Creates a tar.gz JDK archive, an accompanying tar.gz.sha1 file with its
SHA1 signature plus symlinks to the archive for non-canonical architecture names.
"""
arches = _get_jdk_bundle_arches()
jdkTgzPath = join(_suite.get_output_root(), 'jdk-bundles', 'jdk9-{}-{}-{}.tar.gz'.format(debugLevel, _get_openjdk_os(), arches[0]))
with mx.Archiver(jdkTgzPath, kind='tgz') as arc:
mx.log('Creating ' + jdkTgzPath)
for root, _, filenames in os.walk(jdkImageDir):
for name in filenames:
f = join(root, name)
arcname = 'jdk1.9.0/' + os.path.relpath(f, jdkImageDir)
arc.zf.add(name=f, arcname=arcname, recursive=False)
with open(jdkTgzPath + '.sha1', 'w') as fp:
mx.log('Creating ' + jdkTgzPath + '.sha1')
fp.write(mx.sha1OfFile(jdkTgzPath))
def _create_link(source, link_name):
if exists(link_name):
os.remove(link_name)
mx.log('Creating ' + link_name + ' -> ' + source)
os.symlink(source, link_name)
for arch in arches[1:]:
link_name = join(_suite.get_output_root(), 'jdk-bundles', 'jdk9-{}-{}-{}.tar.gz'.format(debugLevel, _get_openjdk_os(), arch))
jdkTgzName = os.path.basename(jdkTgzPath)
_create_link(jdkTgzName, link_name)
_create_link(jdkTgzName + '.sha1', link_name + '.sha1')
def _runmultimake(args):
"""run the JDK make process for one or more configurations"""
jvmVariantsDefault = ','.join(_jdkJvmVariants)
debugLevelsDefault = ','.join(_jdkDebugLevels)
parser = ArgumentParser(prog='mx multimake')
parser.add_argument('--jdk-jvm-variants', '--vms', help='a comma separated list of VMs to build (default: ' + jvmVariantsDefault + ')', metavar='<args>', default=jvmVariantsDefault)
parser.add_argument('--jdk-debug-levels', '--builds', help='a comma separated list of JDK debug levels (default: ' + debugLevelsDefault + ')', metavar='<args>', default=debugLevelsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
select = parser.add_mutually_exclusive_group()
select.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log files')
select.add_argument('-d', '--output-dir', help='directory for log files instead of current working directory', default=os.getcwd(), metavar='<dir>')
args = parser.parse_args(args)
jvmVariants = args.jdk_jvm_variants.split(',')
debugLevels = [_translateLegacyDebugLevel(dl) for dl in args.jdk_debug_levels.split(',')]
allStart = time.time()
for jvmVariant in jvmVariants:
for debugLevel in debugLevels:
if not args.console:
logFile = join(mx.ensure_dir_exists(args.output_dir), jvmVariant + '-' + debugLevel + '.log')
log = open(logFile, 'wb')
start = time.time()
mx.log('BEGIN: ' + jvmVariant + '-' + debugLevel + '\t(see: ' + logFile + ')')
verbose = ['-v'] if mx.get_opts().verbose else []
# Run as subprocess so that output can be directed to a file
cmd = [sys.executable, '-u', mx.__file__] + verbose + ['--jdk-jvm-variant=' + jvmVariant, '--jdk-debug-level=' + debugLevel, 'make']
mx.logv("executing command: " + str(cmd))
subprocess.check_call(cmd, cwd=_suite.dir, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + jvmVariant + '-' + debugLevel + '\t[' + str(duration) + ']')
else:
with VM(jvmVariant=jvmVariant, debugLevel=debugLevel):
_runmake([])
if not args.no_check:
with VM(jvmciMode='jit'):
run_vm(['-XX:-BootstrapJVMCI', '-version'])
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
class HotSpotProject(mx.NativeProject):
"""
Defines a NativeProject representing the HotSpot binaries built via make.
"""
def __init__(self, suite, name, deps, workingSets, **args):
assert name == 'hotspot'
mx.NativeProject.__init__(self, suite, name, "", [], deps, workingSets, None, None, join(suite.mxDir, name))
def eclipse_config_up_to_date(self, configZip):
# Assume that any change to this module might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
for _, source in self._get_eclipse_settings_sources().iteritems():
if configZip.isOlderThan(source):
return False
return True
def _get_eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the file providing its generated content.
"""
if not hasattr(self, '_eclipse_settings'):
esdict = {}
templateSettingsDir = join(self.dir, 'templates', 'eclipse', 'settings')
if exists(templateSettingsDir):
for name in os.listdir(templateSettingsDir):
source = join(templateSettingsDir, name)
esdict[name] = source
self._eclipse_settings = esdict
return self._eclipse_settings
def _eclipseinit(self, files=None, libFiles=None):
"""
Generates an Eclipse project for each HotSpot build configuration.
"""
roots = [
'ASSEMBLY_EXCEPTION',
'LICENSE',
'README',
'THIRD_PARTY_README',
'agent',
'make',
'src',
'test'
]
for jvmVariant in _jdkJvmVariants:
for debugLevel in _jdkDebugLevels:
name = jvmVariant + '-' + debugLevel
eclProjectDir = join(self.dir, 'eclipse', name)
mx.ensure_dir_exists(eclProjectDir)
out = mx.XMLDoc()
out.open('projectDescription')
out.element('name', data='hotspot:' + name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder')
out.element('triggers', data='full,incremental')
out.element('arguments', data='')
out.close('buildCommand')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.cdt.core.cnature')
out.element('nature', data='org.eclipse.cdt.core.ccnature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.managedBuildNature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigNature')
out.close('natures')
if roots:
out.open('linkedResources')
for r in roots:
f = join(_suite.dir, r)
out.open('link')
out.element('name', data=r)
out.element('type', data='2' if isdir(f) else '1')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(f, eclProjectDir))
out.close('link')
out.open('link')
out.element('name', data='generated')
out.element('type', data='2')
generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'generated')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(generated, eclProjectDir))
out.close('link')
out.close('linkedResources')
out.close('projectDescription')
projectFile = join(eclProjectDir, '.project')
mx.update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
cprojectTemplate = join(self.dir, 'templates', 'eclipse', 'cproject')
cprojectFile = join(eclProjectDir, '.cproject')
with open(cprojectTemplate) as f:
content = f.read()
mx.update_file(cprojectFile, content)
if files:
files.append(cprojectFile)
settingsDir = join(eclProjectDir, ".settings")
mx.ensure_dir_exists(settingsDir)
for name, source in self._get_eclipse_settings_sources().iteritems():
out = StringIO.StringIO()
print >> out, '# GENERATED -- DO NOT EDIT'
print >> out, '# Source:', source
with open(source) as f:
print >> out, f.read()
content = out.getvalue()
mx.update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
def getBuildTask(self, args):
return JDKBuildTask(self, args, _vm.debugLevel, _vm.jvmVariant)
class JDKBuildTask(mx.NativeBuildTask):
def __init__(self, project, args, debugLevel, jvmVariant):
mx.NativeBuildTask.__init__(self, args, project)
self.jvmVariant = jvmVariant
self.debugLevel = debugLevel
def __str__(self):
return 'Building JDK[{}, {}]'.format(self.debugLevel, self.jvmVariant)
def build(self):
if mx.get_opts().use_jdk_image:
_runmake(['images'])
else:
_runmake([])
self._newestOutput = None
def clean(self, forBuild=False):
if forBuild: # Let make handle incremental builds
return
if exists(_get_jdk_build_dir(self.debugLevel)):
_runmake(['clean'])
self._newestOutput = None
# Backwards compatibility for mx_jvmci:8 API
def buildvms(args):
_runmultimake(args)
def run_vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, debugLevel=None, vmbuild=None):
"""run a Java program by executing the java executable in a JVMCI JDK"""
jdkTag = mx.get_jdk_option().tag
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.abort('The "--jdk" option must have the tag "' + _JVMCI_JDK_TAG + '" when running a command requiring a JVMCI VM')
jdk = get_jvmci_jdk(debugLevel=debugLevel or _translateLegacyDebugLevel(vmbuild))
return jdk.run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
mx_unittest.set_vm_launcher('JVMCI VM launcher', _unittest_vm_launcher)
def _jvmci_gate_runner(args, tasks):
# Build release server VM now so we can run the unit tests
with Task('BuildHotSpotJVMCIHosted: release', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'release'])
# Run unit tests in hosted mode
with VM(jvmVariant='server', debugLevel='release', jvmciMode='hosted'):
with Task('JVMCI UnitTests: hosted-release', tasks) as t:
if t: unittest(['--suite', 'jvmci', '--enable-timing', '--verbose', '--fail-fast'])
# Build the other VM flavors
with Task('BuildHotSpotJVMCIOthers: fastdebug', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'fastdebug'])
with Task('CleanAndBuildIdealGraphVisualizer', tasks, disableJacoco=True) as t:
if t and platform.processor() != 'sparc':
buildxml = mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'))
mx.run(['ant', '-f', buildxml, '-q', 'clean', 'build'], env=_igvBuildEnv())
mx_gate.add_gate_runner(_suite, _jvmci_gate_runner)
mx_gate.add_gate_argument('-g', '--only-build-jvmci', action='store_false', dest='buildNonJVMCI', help='only build the JVMCI VM')
def _igvJdk():
v8u20 = mx.VersionSpec("1.8.0_20")
v8u40 = mx.VersionSpec("1.8.0_40")
v8 = mx.VersionSpec("1.8")
def _igvJdkVersionCheck(version):
return version >= v8 and (version < v8u20 or version >= v8u40)
return mx.get_jdk(_igvJdkVersionCheck, versionDescription='>= 1.8 and < 1.8.0u20 or >= 1.8.0u40', purpose="building & running IGV").home
def _igvBuildEnv():
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = dict(os.environ)
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
env['JAVA_HOME'] = _igvJdk()
return env
def igv(args):
"""run the Ideal Graph Visualizer"""
logFile = '.ideal_graph_visualizer.log'
with open(join(_suite.dir, logFile), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
updateTrackingFile = join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml')
if not exists(updateTrackingFile):
mx.log('Could not find \'' + updateTrackingFile + '\', removing NetBeans platform')
shutil.rmtree(nbplatform)
else:
dom = xml.dom.minidom.parse(updateTrackingFile)
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
env = _igvBuildEnv()
# make the jar for Batik 1.7 available.
env['IGV_BATIK_JAR'] = mx.library('BATIK').get_path(True)
if mx.run(['ant', '-f', mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml')), '-l', mx._cygpathU2W(fp.name), 'run'], env=env, nonZeroIsFatal=False):
mx.abort("IGV ant build & launch failed. Check '" + logFile + "'. You can also try to delete 'src/share/tools/IdealGraphVisualizer/nbplatform'.")
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_suite.dir, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
if mx.get_arch() == "sparcv9":
flavor = "sparcv9"
lib = mx.add_lib_suffix('hsdis-' + mx.get_arch())
path = join(_suite.dir, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
'sparcv9/hsdis-sparcv9.so': '970640a9af0bd63641f9063c11275b371a59ee60',
}
flavoredLib = flavor + "/" + lib
if flavoredLib not in sha1s:
mx.logv("hsdis not supported on this plattform or architecture")
return
if not exists(path):
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
candidates = mx.select_items(sorted(candidates))
else:
# mx.findclass can be mistaken, don't give up yet
candidates = args
run_vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
class JVMCIArchiveParticipant:
def __init__(self, dist):
self.dist = dist
def __opened__(self, arc, srcArc, services):
self.services = services
self.jvmciServices = services
self.arc = arc
def __add__(self, arcname, contents):
return False
def __addsrc__(self, arcname, contents):
return False
def __closing__(self):
pass
def _get_openjdk_os():
# See: common/autoconf/platform.m4
os = mx.get_os()
if 'darwin' in os:
os = 'macosx'
elif 'linux' in os:
os = 'linux'
elif 'solaris' in os:
os = 'solaris'
elif 'cygwin' in os or 'mingw' in os:
os = 'windows'
return os
def _get_openjdk_cpu():
cpu = mx.get_arch()
if cpu == 'amd64':
cpu = 'x86_64'
elif cpu == 'sparcv9':
cpu = 'sparcv9'
return cpu
def _get_openjdk_os_cpu():
return _get_openjdk_os() + '-' + _get_openjdk_cpu()
def _get_jdk_build_dir(debugLevel=None):
"""
Gets the directory into which the JDK is built. This directory contains
the exploded JDK under jdk/ and the JDK image under images/jdk/.
"""
if debugLevel is None:
debugLevel = _vm.debugLevel
name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
return join(dirname(_suite.dir), 'build', name)
_jvmci_bootclasspath_prepends = []
def _get_hotspot_build_dir(jvmVariant=None, debugLevel=None):
"""
Gets the directory in which a particular HotSpot configuration is built
(e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/bsd_amd64_compiler2)
"""
if jvmVariant is None:
jvmVariant = _vm.jvmVariant
os = mx.get_os()
if os == 'darwin':
os = 'bsd'
arch = mx.get_arch()
buildname = {'client': 'compiler1', 'server': 'compiler2'}.get(jvmVariant, jvmVariant)
name = '{}_{}_{}'.format(os, arch, buildname)
return join(_get_jdk_build_dir(debugLevel=debugLevel), 'hotspot', name)
def add_bootclasspath_prepend(dep):
assert isinstance(dep, mx.ClasspathDependency)
_jvmci_bootclasspath_prepends.append(dep)
class JVMCI9JDKConfig(mx.JDKConfig):
def __init__(self, debugLevel):
self.debugLevel = debugLevel
jdkBuildDir = _get_jdk_build_dir(debugLevel)
jdkDir = join(jdkBuildDir, 'images', 'jdk') if mx.get_opts().use_jdk_image else join(jdkBuildDir, 'jdk')
mx.JDKConfig.__init__(self, jdkDir, tag=_JVMCI_JDK_TAG)
def parseVmArgs(self, args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
args = jacocoArgs + args
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
# Remove JVMCI jars from class path. They are only necessary when
# compiling with a javac from JDK8 or earlier.
cpIndex, cp = mx.find_classpath_arg(args)
if cp:
excluded = frozenset([dist.path for dist in _suite.dists])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in excluded])
args[cpIndex] = cp
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
if jvmciModeArgs:
bcpDeps = [jdkDist.dist() for jdkDist in jdkDeployedDists]
if bcpDeps:
args = ['-Xbootclasspath/p:' + os.pathsep.join([d.classpath_repr() for d in bcpDeps])] + args
# Set the default JVMCI compiler
for jdkDist in reversed(jdkDeployedDists):
assert isinstance(jdkDist, JvmciJDKDeployedDist), jdkDist
if jdkDist._compilers:
jvmciCompiler = jdkDist._compilers[-1]
args = ['-Djvmci.compiler=' + jvmciCompiler] + args
break
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return self.processArgs(args, addDefaultArgs=addDefaultArgs)
# Overrides JDKConfig
def run_java(self, args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
if vm is None:
vm = 'server'
args = self.parseVmArgs(args, addDefaultArgs=addDefaultArgs)
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
cmd = [self.java] + ['-' + vm] + jvmciModeArgs + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
"""
The dict of JVMCI JDKs indexed by debug-level names.
"""
_jvmci_jdks = {}
def get_jvmci_jdk(debugLevel=None):
"""
Gets the JVMCI JDK corresponding to 'debugLevel'.
"""
if not debugLevel:
debugLevel = _vm.debugLevel
jdk = _jvmci_jdks.get(debugLevel)
if jdk is None:
try:
jdk = JVMCI9JDKConfig(debugLevel)
except mx.JDKConfigException as e:
jdkBuildDir = _get_jdk_build_dir(debugLevel)
msg = 'Error with the JDK built into {}:\n{}\nTry (re)building it with: mx --jdk-debug-level={} make'
if mx.get_opts().use_jdk_image:
msg += ' images'
mx.abort(msg.format(jdkBuildDir, e.message, debugLevel))
_jvmci_jdks[debugLevel] = jdk
return jdk
class JVMCI9JDKFactory(mx.JDKFactory):
def getJDKConfig(self):
jdk = get_jvmci_jdk(_vm.debugLevel)
return jdk
def description(self):
return "JVMCI JDK"
mx.update_commands(_suite, {
'make': [_runmake, '[args...]', _makehelp],
'multimake': [_runmultimake, '[options]'],
'c1visualizer' : [c1visualizer, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jol' : [jol, ''],
'vm': [run_vm, '[-options] class [args...]'],
})
mx.add_argument('-M', '--jvmci-mode', action='store', choices=sorted(_jvmciModes.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmciMode + ')')
mx.add_argument('--jdk-jvm-variant', '--vm', action='store', choices=_jdkJvmVariants + sorted(_legacyVms.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmVariant + ')')
mx.add_argument('--jdk-debug-level', '--vmbuild', action='store', choices=_jdkDebugLevels + sorted(_legacyVmbuilds.viewkeys()), help='the JDK debug level to build/run (default: ' + _vm.debugLevel + ')')
mx.add_argument('-I', '--use-jdk-image', action='store_true', help='build/run JDK image instead of exploded JDK')
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('9'), JVMCI9JDKFactory())
def mx_post_parse_cmd_line(opts):
mx.set_java_command_default_jdk_tag(_JVMCI_JDK_TAG)
jdkTag = mx.get_jdk_option().tag
jvmVariant = None
debugLevel = None
jvmciMode = None
if opts.jdk_jvm_variant is not None:
jvmVariant = opts.jdk_jvm_variant
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-jvm-variant" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jdk_debug_level is not None:
debugLevel = _translateLegacyDebugLevel(opts.jdk_debug_level)
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-debug-level" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jvmci_mode is not None:
jvmciMode = opts.jvmci_mode
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jvmci-mode" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
_vm.update(jvmVariant, debugLevel, jvmciMode)
for jdkDist in jdkDeployedDists:
jdkDist.post_parse_cmd_line()
def _update_JDK9_STUBS_library():
"""
Sets the "path" and "sha1" attributes of the "JDK9_STUBS" library.
"""
jdk9InternalLib = _suite.suiteDict['libraries']['JDK9_STUBS']
jarInputDir = join(_suite.get_output_root(), 'jdk9-stubs')
jarPath = join(_suite.get_output_root(), 'jdk9-stubs.jar')
stubs = [
('jdk.internal.misc', 'VM', """package jdk.internal.misc;
public class VM {
public static String getSavedProperty(String key) {
throw new InternalError("should not reach here");
}
}
""")
]
if not exists(jarPath):
sourceFiles = []
for (package, className, source) in stubs:
sourceFile = join(jarInputDir, package.replace('.', os.sep), className + '.java')
mx.ensure_dir_exists(os.path.dirname(sourceFile))
with open(sourceFile, 'w') as fp:
fp.write(source)
sourceFiles.append(sourceFile)
jdk = mx.get_jdk(tag='default')
mx.run([jdk.javac, '-d', jarInputDir] + sourceFiles)
mx.run([jdk.jar, 'cf', jarPath, '.'], cwd=jarInputDir)
jdk9InternalLib['path'] = jarPath
jdk9InternalLib['sha1'] = mx.sha1OfFile(jarPath)
_update_JDK9_STUBS_library()
| gpl-2.0 | 4,022,981,025,196,164,600 | 39.968375 | 202 | 0.612223 | false |
STREAM3/visisc | _visisc_modules/EventDataModel.py | 1 | 12474 | # Copyright (C) 2014, 2015, 2016 SICS Swedish ICT AB
#
# Main author: Tomas Olsson <[email protected]>
#
# License: BSD 3 clause
from numpy import array,r_, ndarray
from pyisc import AnomalyDetector, DataObject
from pyisc import P_PoissonOnesided, P_Poisson
from visisc import _EventDataModel, \
EventHierarchyElement, \
EventDataObject, \
get_global_num_of_severity_levels, \
set_global_num_of_severity_levels
__author__ = 'tol'
class EventDataModel(_EventDataModel):
class_column = None
period_column = None
root_column = None
_event_data_object = None
_anomaly_detector = None
num_of_event_columns = None
num_of_severity_levels_ = None
@staticmethod
def flat_model(event_columns, event_names=None):
'''
Creates a flat event data model event structures with one single top element that is the sum of all the
provided event columns.
:param event_columns: column indexes pointing to frequencies of all events
:param event_names: optional argument for giving names to each column
:return: an instance of EventDataModel
'''
set_global_num_of_severity_levels(1)
root_column = 2
new_event_columns = range(root_column + 1, root_column + len(event_columns) + 1)
if event_columns is not None and len(event_columns) > 0:
_event_sev2original_column_map = {}
root = EventHierarchyElement("Root")
for i in xrange(len(new_event_columns)):
event = EventHierarchyElement(event_names[i]) \
if event_names is not None else \
EventHierarchyElement("%i" % event_columns[i])
event.set_index_value(0, new_event_columns[i])
event.set_index_component(0, i+1)# Refers to the same component model as provided to the anomaly detector created in an instance method.
root.add_child(event)
_event_sev2original_column_map[(event.name, 0)] = event_columns[i]
root.set_index_value(0,root_column)
root.set_index_component(0, 0) # First component model below
num_of_event_columns = len(event_columns) + 1
model = EventDataModel(root, num_of_event_columns, 0)
model.root = root
model.root_column = root_column
model.num_of_event_columns = num_of_event_columns
model._event_sev2original_column_map = _event_sev2original_column_map
model.num_of_severity_levels_ = 1
return model
raise ValueError("No columns provided")
@staticmethod
def hierarchical_model(event_columns, get_event_path=None, get_severity_level=None, num_of_severity_levels=1, sep='.'):
'''
Creates an hierarchical event data model with types as defined by method get_event_path and with a severity
level as defined by method get_severity_level.
:param event_columns: a list or an array with original event column index.
:param get_event_path: a callable that returns a list with path elements of strings defining the path for the
event in the hierarchy. The given argument is the event column index from event_columns
:param get_severity_level: a callable that returns the severity level in terms of an integer from 0 for least
severity up to num_of_severity_levels as maximum severity level.
:param num_of_severity_levels: an integer > 0 that specifies the number of severity levels.
:param sep: a string that is put in between the path elements to form event names.
:return:
'''
if get_severity_level is None:
set_global_num_of_severity_levels(1)
num_of_severity_levels = 1
else:
set_global_num_of_severity_levels(num_of_severity_levels)
if event_columns is not None and len(event_columns) > 0:
# Create Event hierarchy
root = EventHierarchyElement("Root")
events = {root.name:root}
num_of_event_columns = 0
for i in range(len(event_columns)):
path0 = get_event_path(event_columns[i]) if get_event_path is not None else ["%i" % event_columns[i]]
severity_level = get_severity_level(event_columns[i]) if get_severity_level is not None else 0
parent = root
if root.get_index_value(severity_level) == -1:
root.set_index_value(severity_level, 0)
num_of_event_columns += 1
for h in range(1,len(path0)+1):
path = str(sep.join([root.name]+path0[:h]))
if path in events.keys():
event = events[path]
else:
event = EventHierarchyElement(path)
events[path] = event
parent.add_child(event)
if event.get_index_value(severity_level) == -1:
event.set_index_value(severity_level, i) # index to be replaced with true column index later
num_of_event_columns += 1
parent = event
# Replace root if original root has only one child
if root.num_of_children == 1:
for sev_lev_ind in xrange(num_of_severity_levels):
if root.get_index_value(sev_lev_ind) != -1:
num_of_event_columns -= 1
new_root = root.child_
del events[root.name]
for name in events.keys():
event = events[name]
del events[name]
event.name = name[len(root.name) + 1:]
events[event.name] = event
root = new_root
root.parent_ = None
root.parent = None
# Create new data object with hierarchical structure
root_column = 2 # In _data_object we create a new data object with data from this column
# map events to column index
new_column = root_column
event = root
_event_sev2original_column_map = {}
while event is not None:
for sev_lev_ind in xrange(num_of_severity_levels):
if event.get_index_value(sev_lev_ind) != -1:
if event.num_of_children == 0:
_event_sev2original_column_map[(event.name,sev_lev_ind)] = event_columns[event.get_index_value(sev_lev_ind)] # Store original event columns
event.set_index_value(sev_lev_ind, new_column)
event.set_index_component(sev_lev_ind, new_column-root_column) # index to the component model provided to the anomaly detector
new_column += 1
event = event.next()
model = EventDataModel(root, len(events), 0)
model.root = root
model.root_column = root_column
model.num_of_event_columns = num_of_event_columns
model._event_sev2original_column_map = _event_sev2original_column_map
model.num_of_severity_levels_ = num_of_severity_levels
return model
raise ValueError("No columns provided")
def data_object(self, X, period_column, date_column, source_column, class_column=None):
'''
Creates a EventDataObject using the event model. It only takes a single, common period for all events.
:param X: an numpy array
:param period_column: column index pointing to the column containing the period.
:param date_column: column index pointing to the date of each row, instance of datetime.date
:param source_column: column index pointing to the identifier of the source of each row: must be convertible to str.
:param class_column: column index pointing to the class of the source of each row.
:return: an instance of EventDataObject with new column structure reflecting the event data model.
'''
assert isinstance(X, ndarray)
XT = X.T
offset_columns = [col for col in [class_column, period_column] if col is not None]
X_newT = r_[
XT[offset_columns],
array([[0.0 for _ in xrange(len(X))] for _ in xrange(self.num_of_event_columns)])
]
self.class_column = 0 if class_column > -1 else None
self.period_column = 1 if self.class_column == 0 else 0
# Sum the frequency counts of sub types, starting from the leaves and up
event = self.root
while event is not None:
if event.num_of_children == 0: # If leaf
for sev_lev_ind in xrange(self.num_of_severity_levels_):
if event.get_index_value(sev_lev_ind) != -1:
current = event
while current is not None:
X_newT[current.get_index_value(sev_lev_ind)] += XT[self._event_sev2original_column_map[(event.name,sev_lev_ind)]]
current = current.parent
event = event.next()
# End creating data object
self._event_data_object = EventDataObject(X_newT.T,class_column=self.class_column)
self._event_data_object.model_ = self
self._event_data_object.dates_ = XT[date_column]
self._event_data_object.sources_ = array([str(t) for t in XT[source_column]])
return self._event_data_object
def fit_anomaly_detector(self, data_object, poisson_onesided=True):
if poisson_onesided:
anomaly_detector = AnomalyDetector([
P_PoissonOnesided(self.root_column+i, self.period_column)
for i in xrange(self.num_of_event_columns)
])
else:
anomaly_detector = AnomalyDetector([
P_Poisson(self.root_column+i, self.period_column)
for i in xrange(self.num_of_event_columns)
])
self._anomaly_detector = anomaly_detector.fit(data_object)
return anomaly_detector
def get_event_column_names(self, only_basic_events=False):
names = []
event = self.root
while event is not None:
if not only_basic_events or event.num_of_children == 0:
for sev_lev in xrange(self.num_of_severity_levels_):
if event.get_index_value(sev_lev) != -1:
names += [event.name+"/severity_"+ str(sev_lev)]
event = event.next()
return names
def get_column_names(self):
names = []
event = self.root
while event is not None:
for sev_lev in xrange(self.num_of_severity_levels_):
if event.get_index_value(sev_lev) != -1:
names += [event.name+"/severity_"+ str(sev_lev)]
event = event.next()
return ([] if self.class_column is None else ['Class']) + ['Period'] + names;
def calc_one(self, data_index):
assert isinstance(data_index, int) and \
data_index < len(self._event_data_object) and \
data_index >= 0
result = self._anomaly_detector.anomaly_score_details(self._event_data_object, index=data_index)
devs_index = 1+self._anomaly_detector.is_clustering + (self._anomaly_detector.class_column > -1)
devs = result[devs_index]
expected2 = result[devs_index+1]
min_out = result[devs_index+2]
max_out = result[devs_index+3]
severities = self.summarize_event_anomalies(devs);
expect = expected2[self._offset:]
return devs, severities, expect, min_out, max_out
def summarize_event_anomalies(self, devs):
sevs = array([0.0]*get_global_num_of_severity_levels())
ele = self.get_event_hierarchy()
while ele is not None:
for i in range(get_global_num_of_severity_levels()):
if ele.get_index_value(i) != -1:
if devs[ele.get_index_component(i)] > sevs[i]:
sevs[i] = devs[ele.get_index_component(i)]
ele = ele.event_hierarchy_next();
return sevs | bsd-3-clause | 4,326,833,483,550,511,000 | 43.713262 | 167 | 0.578884 | false |
helenwarren/pied-wagtail | wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py | 1 | 3218 | from django import template
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy
from wagtail.wagtailcore.util import camelcase_to_underscore
register = template.Library()
@register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
def explorer_nav():
return {
'nodes': get_navigation_menu_items()
}
@register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
def explorer_subnav(nodes):
return {
'nodes': nodes
}
@register.inclusion_tag('wagtailadmin/shared/main_nav.html', takes_context=True)
def main_nav(context):
menu_items = [
MenuItem(_('Explorer'), '#', classnames='icon icon-folder-open-inverse dl-trigger', order=100),
MenuItem(_('Search'), urlresolvers.reverse('wagtailadmin_pages_search'), classnames='icon icon-search', order=200),
]
request = context['request']
for fn in hooks.get_hooks('construct_main_menu'):
fn(request, menu_items)
return {
'menu_items': sorted(menu_items, key=lambda i: i.order),
'request': request,
}
@register.filter("ellipsistrim")
def ellipsistrim(value, max_length):
if len(value) > max_length:
truncd_val = value[:max_length]
if not len(value) == max_length+1 and value[max_length+1] != " ":
truncd_val = truncd_val[:truncd_val.rfind(" ")]
return truncd_val + "..."
return value
@register.filter
def fieldtype(bound_field):
try:
return camelcase_to_underscore(bound_field.field.__class__.__name__)
except AttributeError:
return ""
@register.filter
def meta_description(model):
try:
return model.model_class()._meta.description
except:
return ""
@register.assignment_tag(takes_context=True)
def page_permissions(context, page):
"""
Usage: {% page_permissions page as page_perms %}
Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out
what actions the current logged-in user can perform on the given page.
"""
# Create a UserPagePermissionsProxy object to represent the user's global permissions, and
# cache it in the context for the duration of the page request, if one does not exist already
if 'user_page_permissions' not in context:
context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)
# Now retrieve a PagePermissionTester from it, specific to the given page
return context['user_page_permissions'].for_page(page)
@register.simple_tag
def hook_output(hook_name):
"""
Example: {% hook_output 'insert_editor_css' %}
Whenever we have a hook whose functions take no parameters and return a string, this tag can be used
to output the concatenation of all of those return values onto the page.
Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.
"""
snippets = [fn() for fn in hooks.get_hooks(hook_name)]
return u''.join(snippets)
| bsd-3-clause | 6,854,908,379,575,165,000 | 31.836735 | 123 | 0.69826 | false |
ppsirg/openshiftDeploy | ready_to_deploy_example/blpass/wsgi/blpass/blpass/settings.py | 1 | 5355 | """
Django settings for blpass project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$_a!^h6i^ctsa^60%vzd4p6c#1g=#9c%c$!0=udq8o@zaph-g9'
# APP ADMINISTRATORS
ADMINS = (
('pedro rivera', '[email protected]'),
)
MANAGERS = ADMINS
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [u'localhost',
u'blpass-emdyptests.rhcloud.com',
u'.prod.rhcloud.com',
u'.prod.rhcloud.com']
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'blpass.wsgi.application'
# Application definition
INSTALLED_APPS = (
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'registers',
'citizens',
'blocks',
'keys',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'blpass.urls'
WSGI_APPLICATION = 'blpass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Static root
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static')
#STATICFILES_STORAGE = STATIC_ROOT
# Media roots and url
#abspath = os.path.abspath(__file__)
#MEDIA_ROOT = os.sep.join(abspath.split(os.sep)[:-2] + ['media'])
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'media')
MEDIA_URL = '/media/'
# Admin module's settings
# GRAPPELLI SETTINGS
if 'grappelli' in INSTALLED_APPS:
TEMPLATE_CONEXT_PROCESSORS = (
'django.core.context_processors.request')
GRAPPELLI_ADMIN_TITLE = 'Coinfest Bogota 2015 - Emdyp'
# BOOTSTRAP ADMIN SETTINGS
elif 'bootstrap_admin' in INSTALLED_APPS:
from django.conf import global_settings
temp_contest_processors = global_settings.TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS = temp_contest_processors + (
'django.core.context_processors.request',
)
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
# OPENSHIFT CONFIGS
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__),'templates').replace('\\','/'),
os.path.join(os.path.dirname(__file__),'..','registers','templates').replace('\\','/'),
os.path.join(os.path.dirname(__file__),'..','citizens','templates').replace('\\','/'),
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
| gpl-2.0 | 4,071,179,701,920,582,000 | 27.036649 | 91 | 0.682726 | false |
colour-science/colour | colour/contrast/barten1999.py | 1 | 17067 | # -*- coding: utf-8 -*-
"""
Barten (1999) Contrast Sensitivity Function
===========================================
Defines the *Barten (1999)* contrast sensitivity function:
- :func:`colour.contrast.contrast_sensitivity_function_Barten1999`
References
----------
- :cite:`Barten1999` : Barten, P. G. (1999). Contrast Sensitivity of the
Human Eye and Its Effects on Image Quality. SPIE. doi:10.1117/3.353254
- :cite:`Barten2003` : Barten, P. G. J. (2003). Formula for the contrast
sensitivity of the human eye. In Y. Miyake & D. R. Rasmussen (Eds.),
Proceedings of SPIE (Vol. 5294, pp. 231-238). doi:10.1117/12.537476
- :cite:`Cowan2004` : Cowan, M., Kennel, G., Maier, T., & Walker, B. (2004).
Contrast Sensitivity Experiment to Determine the Bit Depth for Digital
Cinema. SMPTE Motion Imaging Journal, 113(9), 281-292. doi:10.5594/j11549
- :cite:`InternationalTelecommunicationUnion2015` : International
Telecommunication Union. (2015). Report ITU-R BT.2246-4 - The present
state of ultra-high definition television BT Series Broadcasting service
(Vol. 5, pp. 1-92).
https://www.itu.int/dms_pub/itu-r/opb/rep/R-REP-BT.2246-4-2015-PDF-E.pdf
"""
import numpy as np
from colour.utilities import as_float_array, as_float
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'optical_MTF_Barten1999', 'pupil_diameter_Barten1999', 'sigma_Barten1999',
'retinal_illuminance_Barten1999', 'maximum_angular_size_Barten1999',
'contrast_sensitivity_function_Barten1999'
]
def optical_MTF_Barten1999(u, sigma=0.01):
"""
Returns the optical modulation transfer function (MTF) :math:`M_{opt}` of
the eye using *Barten (1999)* method.
Parameters
----------
u : numeric or array_like
Spatial frequency :math:`u`, the cycles per degree.
sigma : numeric or array_like, optional
Standard deviation :math:`\\sigma` of the line-spread function
resulting from the convolution of the different elements of the
convolution process.
Returns
-------
numeric or array_like
Optical modulation transfer function (MTF) :math:`M_{opt}` of the eye.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> optical_MTF_Barten1999(4, 0.01) # doctest: +ELLIPSIS
0.9689107...
"""
u = as_float_array(u)
sigma = as_float_array(sigma)
return as_float(np.exp(-2 * np.pi ** 2 * sigma ** 2 * u ** 2))
def pupil_diameter_Barten1999(L, X_0=60, Y_0=None):
"""
Returns the pupil diameter for given luminance and object or stimulus
angular size using *Barten (1999)* method.
Parameters
----------
L : numeric or array_like
Average luminance :math:`L` in :math:`cd/m^2`.
X_0 : numeric or array_like, optional
Angular size of the object :math:`X_0` in degrees in the x direction.
Y_0 : numeric or array_like, optional
Angular size of the object :math:`X_0` in degrees in the y direction.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> pupil_diameter_Barten1999(100, 60, 60) # doctest: +ELLIPSIS
2.0777571...
"""
L = as_float_array(L)
X_0 = as_float_array(X_0)
Y_0 = X_0 if Y_0 is None else as_float_array(Y_0)
return as_float(5 - 3 * np.tanh(0.4 * np.log(L * X_0 * Y_0 / 40 ** 2)))
def sigma_Barten1999(sigma_0=0.5 / 60, C_ab=0.08 / 60, d=2.1):
"""
Returns the standard deviation :math:`\\sigma` of the line-spread function
resulting from the convolution of the different elements of the convolution
process using *Barten (1999)* method.
The :math:`\\sigma` quantity depends on the pupil diameter :math:`d` of the
eye lens. For very small pupil diameters, :math:`\\sigma` increases
inversely proportionally with pupil size because of diffraction, and for
large pupil diameters, :math:`\\sigma` increases about linearly with pupil
size because of chromatic aberration and others aberrations.
Parameters
----------
sigma_0 : numeric or array_like, optional
Constant :math:`\\sigma_{0}` in degrees.
C_ab : numeric or array_like, optional
Spherical aberration of the eye :math:`C_{ab}` in
:math:`degrees\\div mm`.
d : numeric or array_like, optional
Pupil diameter :math:`d` in millimeters.
Returns
-------
ndarray
Standard deviation :math:`\\sigma` of the line-spread function
resulting from the convolution of the different elements of the
convolution process.
Warnings
--------
This definition expects :math:`\\sigma_{0}` and :math:`C_{ab}` to be given
in degrees and :math:`degrees\\div mm` respectively. However, in the
literature, the values for :math:`\\sigma_{0}` and
:math:`C_{ab}` are usually given in :math:`arc min` and
:math:`arc min\\div mm` respectively, thus they need to be divided by 60.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.1) # doctest: +ELLIPSIS
0.0087911...
"""
sigma_0 = as_float_array(sigma_0)
C_ab = as_float_array(C_ab)
d = as_float_array(d)
return as_float(np.sqrt(sigma_0 ** 2 + (C_ab * d) ** 2))
def retinal_illuminance_Barten1999(
L, d=2.1, apply_stiles_crawford_effect_correction=True):
"""
Returns the retinal illuminance :math:`E` in Trolands for given average
luminance :math:`L` and pupil diameter :math:`d` using *Barten (1999)*
method.
Parameters
----------
L : numeric or array_like
Average luminance :math:`L` in :math:`cd/m^2`.
d : numeric or array_like, optional
Pupil diameter :math:`d` in millimeters.
apply_stiles_crawford_effect_correction : bool, optional
Whether to apply the correction for *Stiles-Crawford* effect.
Returns
-------
ndarray
Retinal illuminance :math:`E` in Trolands.
Notes
-----
- This definition is for use with photopic viewing conditions and thus
corrects for the Stiles-Crawford effect by default, i.e. directional
sensitivity of the cone cells with lower response of cone cells
receiving light from the edge of the pupil.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> retinal_illuminance_Barten1999(100, 2.1) # doctest: +ELLIPSIS
330.4115803...
>>> retinal_illuminance_Barten1999(100, 2.1, False) # doctest: +ELLIPSIS
346.3605900...
"""
d = as_float_array(d)
L = as_float_array(L)
E = (np.pi * d ** 2) / 4 * L
if apply_stiles_crawford_effect_correction:
E *= (1 - (d / 9.7) ** 2 + (d / 12.4) ** 4)
return E
def maximum_angular_size_Barten1999(u, X_0=60, X_max=12, N_max=15):
"""
Returns the maximum angular size :math:`X` of the object considered using
*Barten (1999)* method.
Parameters
----------
u : numeric
Spatial frequency :math:`u`, the cycles per degree.
X_0 : numeric or array_like, optional
Angular size :math:`X_0` in degrees of the object in the x direction.
X_max : numeric or array_like, optional
Maximum angular size :math:`X_{max}` in degrees of the integration
area in the x direction.
N_max : numeric or array_like, optional
Maximum number of cycles :math:`N_{max}` over which the eye can
integrate the information.
Returns
-------
numeric or ndarray
Maximum angular size :math:`X` of the object considered.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> maximum_angular_size_Barten1999(4) # doctest: +ELLIPSIS
3.5729480...
"""
u = as_float_array(u)
X_0 = as_float_array(X_0)
X_max = as_float_array(X_max)
N_max = as_float_array(N_max)
return (1 / X_0 ** 2 + 1 / X_max ** 2 + u ** 2 / N_max ** 2) ** -0.5
def contrast_sensitivity_function_Barten1999(u,
sigma=sigma_Barten1999(
0.5 / 60, 0.08 / 60, 2.1),
k=3.0,
T=0.1,
X_0=60,
Y_0=None,
X_max=12,
Y_max=None,
N_max=15,
n=0.03,
p=1.2274 * 10 ** 6,
E=retinal_illuminance_Barten1999(
20, 2.1),
phi_0=3 * 10 ** -8,
u_0=7):
"""
Returns the contrast sensitivity :math:`S` of the human eye according to
the contrast sensitivity function (CSF) described by *Barten (1999)*.
Contrast sensitivity is defined as the inverse of the modulation threshold
of a sinusoidal luminance pattern. The modulation threshold of this pattern
is generally defined by 50% probability of detection. The contrast
sensitivity function or CSF gives the contrast sensitivity as a function of
spatial frequency. In the CSF, the spatial frequency is expressed in
angular units with respect to the eye. It reaches a maximum between 1 and
10 cycles per degree with a fall off at higher and lower spatial
frequencies.
Parameters
----------
u : numeric
Spatial frequency :math:`u`, the cycles per degree.
sigma : numeric or array_like, optional
Standard deviation :math:`\\sigma` of the line-spread function
resulting from the convolution of the different elements of the
convolution process.
k : numeric or array_like, optional
Signal-to-noise (SNR) ratio :math:`k`.
T : numeric or array_like, optional
Integration time :math:`T` in seconds of the eye.
X_0 : numeric or array_like, optional
Angular size :math:`X_0` in degrees of the object in the x direction.
Y_0 : numeric or array_like, optional
Angular size :math:`Y_0` in degrees of the object in the y direction.
X_max : numeric or array_like, optional
Maximum angular size :math:`X_{max}` in degrees of the integration
area in the x direction.
Y_max : numeric or array_like, optional
Maximum angular size :math:`Y_{max}` in degrees of the integration
area in the y direction.
N_max : numeric or array_like, optional
Maximum number of cycles :math:`N_{max}` over which the eye can
integrate the information.
n : numeric or array_like, optional
Quantum efficiency of the eye :math:`n`.
p : numeric or array_like, optional
Photon conversion factor :math:`p` in
:math:`photons\\div seconds\\div degrees^2\\div Trolands` that
depends on the light source.
E : numeric or array_like, optional
Retinal illuminance :math:`E` in Trolands.
phi_0 : numeric or array_like, optional
Spectral density :math:`\\phi_0` in :math:`seconds degrees^2` of the
neural noise.
u_0 : numeric or array_like, optional
Spatial frequency :math:`u_0` in :math:`cycles\\div degrees` above
which the lateral inhibition ceases.
Returns
-------
ndarray
Contrast sensitivity :math:`S`.
Warnings
--------
This definition expects :math:`\\sigma_{0}` and :math:`C_{ab}` used in the
computation of :math:`\\sigma` to be given in degrees and
:math:`degrees\\div mm` respectively. However, in the literature, the
values for :math:`\\sigma_{0}` and :math:`C_{ab}` are usually given in
:math:`arc min` and :math:`arc min\\div mm` respectively, thus they need to
be divided by 60.
Notes
-----
- The formula holds for bilateral viewing and for equal dimensions of
the object in x and y direction. For monocular vision, the contrast
sensitivity is a factor :math:`\\sqrt{2}` smaller.
- *Barten (1999)* CSF default values for the :math:`k`,
:math:`\\sigma_{0}`, :math:`C_{ab}`, :math:`T`, :math:`X_{max}`,
:math:`N_{max}`, :math:`n`, :math:`\\phi_{0}` and :math:`u_0` constants
are valid for a standard observer with good vision and with an age
between 20 and 30 years.
- The other constants have been filled using reference data from
*Figure 31* in :cite:`InternationalTelecommunicationUnion2015` but
must be adapted to the current use case.
- The product of :math:`u`, the cycles per degree, and :math:`X_0`,
the number of degrees, gives the number of cycles :math:`P_c` in a
pattern. Therefore, :math:`X_0` can be made a variable dependent on
:math:`u` such as :math:`X_0 = P_c / u`.
References
----------
:cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,
:cite:`InternationalTelecommunicationUnion2015`,
Examples
--------
>>> contrast_sensitivity_function_Barten1999(4) # doctest: +ELLIPSIS
360.8691122...
Reproducing *Figure 31* in \
:cite:`InternationalTelecommunicationUnion2015` illustrating the minimum
detectable contrast according to *Barten (1999)* model with the assumed
conditions for UHDTV applications. The minimum detectable contrast
:math:`MDC` is then defined as follows::
:math:`MDC = 1 / CSF * 2 * (1 / 1.27)`
where :math:`2` is used for the conversion from modulation to contrast and
:math:`1 / 1.27` is used for the conversion from sinusoidal to rectangular
waves.
>>> from scipy.optimize import fmin
>>> settings_BT2246 = {
... 'k': 3.0,
... 'T': 0.1,
... 'X_max': 12,
... 'N_max': 15,
... 'n': 0.03,
... 'p': 1.2274 * 10 ** 6,
... 'phi_0': 3 * 10 ** -8,
... 'u_0': 7,
... }
>>>
>>> def maximise_spatial_frequency(L):
... maximised_spatial_frequency = []
... for L_v in L:
... X_0 = 60
... d = pupil_diameter_Barten1999(L_v, X_0)
... sigma = sigma_Barten1999(0.5 / 60, 0.08 / 60, d)
... E = retinal_illuminance_Barten1999(L_v, d, True)
... maximised_spatial_frequency.append(
... fmin(lambda x: (
... -contrast_sensitivity_function_Barten1999(
... u=x,
... sigma=sigma,
... X_0=X_0,
... E=E,
... **settings_BT2246)
... ), 0, disp=False)[0])
... return as_float(np.array(maximised_spatial_frequency))
>>>
>>> L = np.logspace(np.log10(0.01), np.log10(100), 10)
>>> X_0 = Y_0 = 60
>>> d = pupil_diameter_Barten1999(L, X_0, Y_0)
>>> sigma = sigma_Barten1999(0.5 / 60, 0.08 / 60, d)
>>> E = retinal_illuminance_Barten1999(L, d)
>>> u = maximise_spatial_frequency(L)
>>> (1 / contrast_sensitivity_function_Barten1999(
... u=u, sigma=sigma, E=E, X_0=X_0, Y_0=Y_0, **settings_BT2246)
... * 2 * (1/ 1.27))
... # doctest: +ELLIPSIS
array([ 0.0207396..., 0.0133019..., 0.0089256..., 0.0064202..., \
0.0050275...,
0.0041933..., 0.0035573..., 0.0030095..., 0.0025803..., \
0.0022897...])
"""
u = as_float_array(u)
k = as_float_array(k)
T = as_float_array(T)
X_0 = as_float_array(X_0)
Y_0 = X_0 if Y_0 is None else as_float_array(Y_0)
X_max = as_float_array(X_max)
Y_max = X_max if Y_max is None else as_float_array(Y_max)
N_max = as_float_array(N_max)
n = as_float_array(n)
p = as_float_array(p)
E = as_float_array(E)
phi_0 = as_float_array(phi_0)
u_0 = as_float_array(u_0)
M_opt = optical_MTF_Barten1999(u, sigma)
M_as = 1 / (maximum_angular_size_Barten1999(u, X_0, X_max, N_max) *
maximum_angular_size_Barten1999(u, Y_0, Y_max, N_max))
S = (M_opt / k) / np.sqrt(2 / T * M_as * (1 / (n * p * E) + phi_0 /
(1 - np.exp(-(u / u_0) ** 2))))
return as_float(S)
| bsd-3-clause | 3,376,031,520,470,536,700 | 36.675497 | 79 | 0.587918 | false |
chetan/cherokee | admin/wizards/uwsgi.py | 1 | 14584 | # -*- coding: utf-8 -*-
#
# Cherokee-admin's uWSGI Wizard
#
# Authors:
# Taher Shihadeh <[email protected]>
# Alvaro Lopez Ortega <[email protected]>
#
# Copyright (C) 2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
#
# Tested:
# 2009/10/xx: uWSGI Version 0.9.3 / Cherokee 0.99.36b
# 2010/04/15: uWSGI Version 0.9.3 / Cherokee 0.99.41
# 2010/04/22: uWSGI Version 0.9.4.3 / Cherokee 0.99.45b
# 2010/12/01: uWSGI Version 0.9.6.5 + 0.9.7-dev / Cherokee SVN: r5847
import os
import sys
import socket
import re
import CTK
import Wizard
import validations
from xml.dom import minidom
import ConfigParser
try:
import yaml
UWSGI_HAS_YAML=True
except:
UWSGI_HAS_YAML=False
from util import *
NOTE_WELCOME_H1 = N_("Welcome to the uWSGI Wizard")
NOTE_WELCOME_P1 = N_('<a target="_blank" href="http://projects.unbit.it/uwsgi/">uWSGI</a> is a fast (pure C), self-healing, developer-friendly application container, aimed for professional webapps deployment and development.')
NOTE_WELCOME_P2 = N_('It includes a complete stack for networked/clustered applications, implementing message/object passing and process management. It uses the uwsgi (all lowercase) protocol for all the networking/interprocess communications.')
NOTE_LOCAL_H1 = N_("uWSGI")
NOTE_UWSGI_CONFIG = N_("Path to the uWSGI configuration file (XML, INI, YAML or .wsgi, .py, .psgi, .pl, .lua, .ws, .ru, .rb). Its mountpoint will be used.")
NOTE_UWSGI_BINARY = N_("Location of the uWSGI binary")
ERROR_NO_CONFIG = N_("It does not look like a uWSGI configuration file.")
NOTE_HOST_H1 = N_("New Virtual Server Details")
NOTE_HOST = N_("Host name of the virtual server that is about to be created.")
NOTE_DROOT = N_("Path to use as document root for the new virtual server.")
NOTE_WEBDIR = N_("Public web directory to access the project.")
NOTE_WEBDIR_H1 = N_("Public Web Directory")
NOTE_WEBDIR_P1 = N_("The default value is extracted from the configuration file. Change it at your own risk.")
PREFIX = 'tmp!wizard!uwsgi'
URL_APPLY = r'/wizard/vserver/uwsgi/apply'
UWSGI_CMDLINE_AUTOMAGIC = "-M -p %(CPU_num)d -z %(timeout)s -L -l %(SOMAXCONN)d %(filename)s"
UWSGI_DEFAULT_CONFS = ('.xml', '.ini', '.yml',)
UWSGI_MAGIC_CONFS = ('.wsgi', '.py', '.psgi', '.pl', '.lua', '.ws', '.ru', '.rb',)
SOURCE = """
source!%(src_num)d!env_inherited = 1
source!%(src_num)d!type = interpreter
source!%(src_num)d!nick = uWSGI %(src_num)d
source!%(src_num)d!host = %(src_addr)s
source!%(src_num)d!interpreter = %(uwsgi_binary)s %(uwsgi_extra)s
"""
SINGLE_DIRECTORY = """
%(vsrv_pre)s!rule!%(rule_id)d!match = directory
%(vsrv_pre)s!rule!%(rule_id)d!match!directory = %(webdir)s
%(vsrv_pre)s!rule!%(rule_id)d!handler = uwsgi
%(vsrv_pre)s!rule!%(rule_id)d!handler!error_handler = 1
%(vsrv_pre)s!rule!%(rule_id)d!handler!check_file = 0
%(vsrv_pre)s!rule!%(rule_id)d!handler!pass_req_headers = 1
%(vsrv_pre)s!rule!%(rule_id)d!handler!balancer = round_robin
%(vsrv_pre)s!rule!%(rule_id)d!handler!modifier1 = %(modifier1)d
%(vsrv_pre)s!rule!%(rule_id)d!handler!modifier2 = 0
%(vsrv_pre)s!rule!%(rule_id)d!handler!balancer!source!1 = %(src_num)d
"""
CONFIG_VSERVER = SOURCE + """
%(vsrv_pre)s!nick = %(new_host)s
%(vsrv_pre)s!document_root = %(document_root)s
"""
DEFAULT_DIRECTORY = """
%(vsrv_pre)s!rule!1!match = default
%(vsrv_pre)s!rule!1!handler = common
%(vsrv_pre)s!rule!1!handler!iocache = 0
"""
CONFIG_DIR = SOURCE + """
%(rule_pre)s!match = directory
%(rule_pre)s!match!directory = %(webdir)s
%(rule_pre)s!handler = uwsgi
%(rule_pre)s!handler!error_handler = 1
%(rule_pre)s!handler!check_file = 0
%(rule_pre)s!handler!pass_req_headers = 1
%(rule_pre)s!handler!balancer = round_robin
%(rule_pre)s!handler!modifier1 = %(modifier1)d
%(rule_pre)s!handler!modifier2 = 0
%(rule_pre)s!handler!balancer!source!1 = %(src_num)d
"""
DEFAULT_BINS = ['uwsgi','uwsgi26','uwsgi25']
DEFAULT_PATHS = ['/usr/bin',
'/usr/sbin',
'/usr/local/bin',
'/usr/local/sbin',
'/usr/gnu/bin',
'/opt/local/sbin',
'/opt/local/bin']
def figure_CPU_num():
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
return os.sysconf('SC_NPROCESSORS_ONLN')
proc = subprocess.Popen("sysctl -n hw.ncpu", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
if len(stderr_results) == 0:
return int(stdout_results)
return 1
class Commit:
def Commit_VServer (self):
# Incoming info
document_root = CTK.cfg.get_val('%s!document_root'%(PREFIX))
uwsgi_cfg = CTK.cfg.get_val('%s!uwsgi_cfg'%(PREFIX))
new_host = CTK.cfg.get_val('%s!new_host'%(PREFIX))
# Create the new Virtual Server
vsrv_pre = CTK.cfg.get_next_entry_prefix('vserver')
CTK.cfg['%s!nick'%(vsrv_pre)] = new_host
Wizard.CloneLogsCfg_Apply ('%s!logs_as_vsrv'%(PREFIX), vsrv_pre)
uwsgi_binary = find_uwsgi_binary()
if not uwsgi_binary:
uwsgi_binary = CTK.cfg.get_val('%s!uwsgi_binary' %(PREFIX))
# Locals
src_num, pre = cfg_source_get_next ()
uwsgi_extra = uwsgi_get_extra(uwsgi_cfg)
modifier1 = uwsgi_get_modifier(uwsgi_cfg)
src_addr = uwsgi_get_socket(uwsgi_cfg)
if not src_addr:
src_addr = "127.0.0.1:%d" % cfg_source_find_free_port ()
uwsgi_extra = "-s %s %s" % (src_addr, uwsgi_extra)
else:
if src_addr.startswith(':'):
src_addr = "127.0.0.1%s" % src_addr
# Build the config
cvs = CONFIG_VSERVER
webdirs = uwsgi_find_mountpoint(uwsgi_cfg)
rule_id = 2
for webdir in webdirs:
cvs += SINGLE_DIRECTORY %(locals())
rule_id = rule_id + 1
cvs += DEFAULT_DIRECTORY
# Add the new rules
config = cvs %(locals())
CTK.cfg.apply_chunk (config)
# Clean up
CTK.cfg.normalize ('%s!rule'%(vsrv_pre))
CTK.cfg.normalize ('vserver')
del (CTK.cfg[PREFIX])
return CTK.cfg_reply_ajax_ok()
def Commit_Rule (self):
vsrv_num = CTK.cfg.get_val('%s!vsrv_num' %(PREFIX))
vsrv_pre = 'vserver!%s' %(vsrv_num)
# Incoming info
uwsgi_cfg = CTK.cfg.get_val('%s!uwsgi_cfg' %(PREFIX))
uwsgi_binary = find_uwsgi_binary()
if not uwsgi_binary:
uwsgi_binary = CTK.cfg.get_val('%s!uwsgi_binary' %(PREFIX))
# Locals
rule_pre = CTK.cfg.get_next_entry_prefix ('%s!rule' %(vsrv_pre))
src_num, src_pre = cfg_source_get_next ()
uwsgi_extra = uwsgi_get_extra(uwsgi_cfg)
modifier1 = uwsgi_get_modifier(uwsgi_cfg)
src_addr = uwsgi_get_socket(uwsgi_cfg)
if not src_addr:
src_addr = "127.0.0.1:%d" % cfg_source_find_free_port ()
uwsgi_extra = "-s %s %s" % (src_addr, uwsgi_extra)
else:
if src_addr.startswith(':'):
src_addr = "127.0.0.1%s" % src_addr
# Add the new rules
webdir = uwsgi_find_mountpoint(uwsgi_cfg)[0]
config = CONFIG_DIR %(locals())
CTK.cfg.apply_chunk (config)
# Clean up
CTK.cfg.normalize ('%s!rule'%(vsrv_pre))
del (CTK.cfg[PREFIX])
return CTK.cfg_reply_ajax_ok()
def __call__ (self):
if CTK.post.pop('final'):
# Apply POST
CTK.cfg_apply_post()
# VServer or Rule?
if CTK.cfg.get_val ('%s!vsrv_num' %(PREFIX)):
return self.Commit_Rule()
return self.Commit_VServer()
return CTK.cfg_apply_post()
class WebDirectory:
def __call__ (self):
uwsgi_cfg = CTK.cfg.get_val('%s!uwsgi_cfg' %(PREFIX))
webdir = uwsgi_find_mountpoint(uwsgi_cfg)[0]
table = CTK.PropsTable()
table.Add (_('Web Directory'), CTK.TextCfg ('%s!webdir'%(PREFIX), False, {'value': webdir, 'class': 'noauto'}), _(NOTE_WEBDIR))
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('final', '1')
submit += table
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WEBDIR_H1)))
cont += submit
cont += CTK.Notice('warning', CTK.RawHTML(_(NOTE_WEBDIR_P1)))
cont += CTK.DruidButtonsPanel_PrevCreate_Auto()
return cont.Render().toStr()
class Host:
def __call__ (self):
table = CTK.PropsTable()
table.Add (_('New Host Name'), CTK.TextCfg ('%s!new_host'%(PREFIX), False, {'value': 'www.example.com', 'class': 'noauto'}), _(NOTE_HOST))
table.Add (_('Document Root'), CTK.TextCfg ('%s!document_root'%(PREFIX), False, {'value': os_get_document_root(), 'class': 'noauto'}), _(NOTE_DROOT))
table.Add (_('Use Same Logs as'), Wizard.CloneLogsCfg('%s!logs_as_vsrv'%(PREFIX)), _(Wizard.CloneLogsCfg.NOTE))
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('final', '1')
submit += table
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_HOST_H1)))
cont += submit
cont += CTK.DruidButtonsPanel_PrevCreate_Auto()
return cont.Render().toStr()
class LocalSource:
def __call__ (self):
uwsgi_binary = find_uwsgi_binary()
table = CTK.PropsTable()
if not uwsgi_binary:
table.Add (_('uWSGI binary'), CTK.TextCfg ('%s!uwsgi_binary'%(PREFIX), False), _(NOTE_UWSGI_BINARY))
table.Add (_('Configuration File'), CTK.TextCfg ('%s!uwsgi_cfg' %(PREFIX), False), _(NOTE_UWSGI_CONFIG))
submit = CTK.Submitter (URL_APPLY)
submit += table
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_LOCAL_H1)))
cont += submit
cont += CTK.DruidButtonsPanel_PrevNext_Auto()
return cont.Render().toStr()
class Welcome:
def __call__ (self):
box = CTK.Box ({'class': 'wizard-welcome'})
box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P1)))
box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P2)))
box += Wizard.CookBookBox ('cookbook_uwsgi')
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WELCOME_H1)))
cont += Wizard.Icon ('uwsgi', {'class': 'wizard-descr'})
cont += box
# Send the VServer num if it is a Rule
tmp = re.findall (r'^/wizard/vserver/(\d+)/', CTK.request.url)
if tmp:
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('%s!vsrv_num'%(PREFIX), tmp[0])
cont += submit
cont += CTK.DruidButtonsPanel_Next_Auto()
return cont.Render().toStr()
#
# Utility functions
#
def is_uwsgi_cfg (filename):
filename = validations.is_local_file_exists (filename)
for k in UWSGI_DEFAULT_CONFS:
if filename.endswith(k):
return filename
for k in UWSGI_MAGIC_CONFS:
if filename.endswith(k):
return filename
return filename
def uwsgi_get_extra(filename):
if filename.endswith('.xml'):
return "-x %s" % filename
elif filename.endswith('.ini'):
return "--ini %s" % filename
elif filename.endswith('.yml'):
return "--yaml %s" % filename
CPU_num = figure_CPU_num() * 2
timeout = CTK.cfg.get_val('server!timeout', '15')
SOMAXCONN = socket.SOMAXCONN
return UWSGI_CMDLINE_AUTOMAGIC %(locals())
def uwsgi_get_modifier(filename):
if filename.endswith('.psgi') or filename.endswith('.pl'):
return 5
if filename.endswith('.lua') or filename.endswith('.ws'):
return 6
if filename.endswith('.ru') or filename.endswith('.rb'):
return 7
return 0
def uwsgi_get_socket(filename):
s = None
if filename.endswith('.xml'):
try:
udom = minidom.parse(filename)
uroot = udom.getElementsByTagName('uwsgi')[0]
s = uroot.getElementsByTagName('socket')[0].childNodes[0].data
except:
pass
elif filename.endswith('.ini'):
try:
c = ConfigParser.ConfigParser()
c.read(filename)
s = c.get('uwsgi', 'socket')
except:
pass
elif filename.endswith('.yml') and UWSGI_HAS_YAML:
try:
fd = open(filename, 'r')
y = yaml.load(fd)
s = y['uwsgi']['socket']
fd.close()
except:
pass
return s
def uwsgi_find_mountpoint(filename):
mp = []
found_mp = False
if filename.endswith('.xml'):
try:
udom = minidom.parse(filename)
uroot = udom.getElementsByTagName('uwsgi')[0]
for m in uroot.getElementsByTagName('app'):
try:
mp.append(m.attributes['mountpoint'].value)
found_mp = True
except:
pass
except:
pass
if found_mp:
return mp
return ['/']
def find_uwsgi_binary():
return path_find_binary (DEFAULT_BINS, extra_dirs = DEFAULT_PATHS)
#
# Data Validation
#
VALS = [
("%s!uwsgi_binary" %(PREFIX), validations.is_not_empty),
("%s!uwsgi_cfg" %(PREFIX), validations.is_not_empty),
("%s!new_host" %(PREFIX), validations.is_not_empty),
("%s!document_root"%(PREFIX), validations.is_not_empty),
("%s!new_webdir" %(PREFIX), validations.is_not_empty),
("%s!uwsgi_binary" %(PREFIX), validations.is_local_file_exists),
("%s!uwsgi_cfg" %(PREFIX), is_uwsgi_cfg),
("%s!new_host" %(PREFIX), validations.is_new_vserver_nick),
("%s!document_root"%(PREFIX), validations.is_local_dir_exists),
("%s!new_webdir" %(PREFIX), validations.is_dir_formatted),
]
# VServer
CTK.publish ('^/wizard/vserver/uwsgi$', Welcome)
CTK.publish ('^/wizard/vserver/uwsgi/2$', LocalSource)
CTK.publish ('^/wizard/vserver/uwsgi/3$', Host)
# Rule
CTK.publish ('^/wizard/vserver/(\d+)/uwsgi$', Welcome)
CTK.publish ('^/wizard/vserver/(\d+)/uwsgi/2$', LocalSource)
CTK.publish ('^/wizard/vserver/(\d+)/uwsgi/3$', WebDirectory)
# Common
CTK.publish (r'^%s$'%(URL_APPLY), Commit, method="POST", validation=VALS)
| gpl-2.0 | -1,414,187,419,765,483,000 | 31.408889 | 247 | 0.610326 | false |
garyd203/flying-circus | src/flyingcircus/_raw/codecommit.py | 1 | 1270 | """Raw representations of every data type in the AWS CodeCommit service.
See Also:
`AWS developer guide for CodeCommit
<https://docs.aws.amazon.com/codecommit/latest/userguide/index.html>`_
This file is automatically generated, and should not be directly edited.
"""
from attr import attrib
from attr import attrs
from ..core import ATTRSCONFIG
from ..core import Resource
from ..core import ResourceProperties
from ..core import create_object_converter
__all__ = ["Repository", "RepositoryProperties"]
@attrs(**ATTRSCONFIG)
class RepositoryProperties(ResourceProperties):
Code = attrib(default=None)
RepositoryDescription = attrib(default=None)
RepositoryName = attrib(default=None)
Tags = attrib(default=None)
Triggers = attrib(default=None)
@attrs(**ATTRSCONFIG)
class Repository(Resource):
"""A Repository for CodeCommit.
See Also:
`AWS Cloud Formation documentation for Repository
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codecommit-repository.html>`_
"""
RESOURCE_TYPE = "AWS::CodeCommit::Repository"
Properties: RepositoryProperties = attrib(
factory=RepositoryProperties,
converter=create_object_converter(RepositoryProperties),
)
| lgpl-3.0 | 4,461,252,906,516,049,400 | 27.863636 | 113 | 0.73937 | false |
PaulSalden/weechat-scripts | urlwrap.py | 1 | 3456 | # -*- coding: utf-8 -*-
import weechat
from time import strftime
SCRIPT_NAME = "urlwrap"
SCRIPT_AUTHOR = "Paul Salden <[email protected]>"
SCRIPT_VERSION = "0.99"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Prevents alignment of multiline messages containing an url."
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, "shutdown_cb", "")
infolist = weechat.infolist_get("filter", "", "")
filters = []
while weechat.infolist_next(infolist):
filters.append(weechat.infolist_string(infolist, "name"))
weechat.infolist_free(infolist)
filtername = "urlwrap_filter"
while filtername in filters: filtername = "".join((filtername, "_"))
weechat.command("", "/filter add {} * urlwrap_filter_tag *".format(filtername))
def _get_buffer(server, channel):
return weechat.info_get("irc_buffer", ",".join((server, channel)))
def _s(option):
# return the string value of option
return weechat.config_string(weechat.config_get(option))
def _c(option, bgoption=False):
# return a color character with numbers based on options
if bgoption:
return weechat.color(",".join((weechat.config_color(weechat.config_get(option)),
weechat.config_color(weechat.config_get(bgoption)))))
return weechat.color(weechat.config_color(weechat.config_get(option)))
def _reconstruct_print(prefix, message, highlighted):
# as printing without alignment also strips timestamp and delimiter,
# they must be reconstructed
timestamp = strftime(_s("weechat.look.buffer_time_format"))
timestamp = _c("weechat.color.chat_time") + "".join([
l if l.isdigit() else _c("weechat.color.chat_time_delimiters")+l+_c("weechat.color.chat_time")
for l in timestamp])
if highlighted:
prefix = "".join((_c("weechat.color.chat_highlight", "weechat.color.chat_highlight_bg"),
weechat.string_remove_color(prefix, "")))
prefix = "".join((_c("weechat.color.chat_nick_prefix"), _s("weechat.look.nick_prefix"),
prefix, _c("weechat.color.chat_nick_suffix"), _s("weechat.look.nick_suffix"),
weechat.color("reset")))
delimiter = "".join((" ", _c("weechat.color.chat_delimiters"), _s("weechat.look.prefix_suffix"),
weechat.color("reset"), " ")) if _s("weechat.look.prefix_align") != "none" else " "
return "{} {}{}{}".format(timestamp, prefix, delimiter, message)
def modifier_cb(data, modifier, modifier_data, string):
if "irc_privmsg" in modifier_data and ("http://" in string or "https://" in string):
buffer = weechat.buffer_search("irc", modifier_data.split(";")[1])
mynick = weechat.buffer_get_string(buffer, "localvar_nick")
taglist = modifier_data.split(";")[2].split(",")
for tag in taglist:
if tag[:5] == "nick_":
nick = tag[5:]
break
prefix, message = string.split("\t", 1)
highlighted = nick != mynick and weechat.string_has_highlight(message, mynick)
weechat.prnt_date_tags(buffer, 0, "urlwrap_filter_tag", string)
weechat.prnt(buffer, "\t\t{}".format(_reconstruct_print(prefix, message, highlighted)))
return ""
return string
def shutdown_cb():
weechat.command("", "/filter del {}".format(filtername))
return weechat.WEECHAT_RC_OK
weechat.hook_modifier("weechat_print", "modifier_cb", "")
| gpl-3.0 | 3,089,040,915,440,459,300 | 40.146341 | 102 | 0.642361 | false |
jrichte43/ProjectEuler | Problem-0186/solutions.py | 1 | 1811 |
__problem_title__ = "Connectedness of a network"
__problem_url___ = "https://projecteuler.net/problem=186"
__problem_description__ = "Here are the records from a busy telephone system with one million " \
"users: The telephone number of the caller and the called number in " \
"record n are Caller(n) = S and Called(n) = S where S come from the " \
""Lagged Fibonacci Generator": For 1 ≤ k ≤ 55, S = [100003 - 200003k + " \
"300007k ] (modulo 1000000) For 56 ≤ k, S = [S + S ] (modulo 1000000) " \
"If Caller(n) = Called(n) then the user is assumed to have misdialled " \
"and the call fails; otherwise the call is successful. From the start " \
"of the records, we say that any pair of users X and Y are friends if " \
"X calls Y or vice-versa. Similarly, X is a friend of a friend of Z if " \
"X is a friend of Y and Y is a friend of Z; and so on for longer " \
"chains. The Prime Minister's phone number is 524287. After how many " \
"successful calls, not counting misdials, will 99% of the users " \
"(including the PM) be a friend, or a friend of a friend etc., of the " \
"Prime Minister?"
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | 8,526,298,695,389,920,000 | 46.5 | 100 | 0.538504 | false |
dpaiton/OpenPV | pv-core/analysis/python/plot_proximity_2nd_exclusive.py | 1 | 4186 | """
Plots the Histogram
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
import random
if len(sys.argv) < 2:
print "usage: time_stability filename"
print len(sys.argv)
sys.exit()
w = rw.PVReadWeights(sys.argv[1])
w2 = rw.PVReadWeights(sys.argv[2])
space = 1
d = np.zeros((5,5))
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
nf = w.nf
margin = 50
marginstart = margin
marginend = nx - margin
acount = 0
patchposition = []
supereasytest = 1
coord = 1
coord = int(coord)
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
where = []
zep = []
for k in range(numpat):
kx = conv.kxPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
p = w.next_patch()
if len(p) != nxp * nyp:
continue
if marginstart < kx < marginend:
if marginstart < ky < marginend:
where.append([p])
else:
where.append([p])
else:
where.append([p])
wherebox = where
wherebox = np.reshape(wherebox, (nx,ny, 25))
print "shape = ", np.shape(wherebox)
prefinal = []
prefinal = np.array(prefinal)
prefinal2 = []
tprefinal2 = np.array(prefinal2)
count2 = 0
qi = np.zeros((1,26))
for k in range(numpat):
kx = conv.kxPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
if marginstart < kx < marginend:
if marginstart < ky < marginend:
howmany = [1]
w = [0, 1]
a = np.matrix(wherebox[kx, ky])
if np.sum(a) > 4.0:
for i in range(25):
i+=1
box = wherebox[((ky-i)):((ky+1+i)), ((kx-i)):((kx+1+i))]
count = 0
qq = []
countw = 0
bleng = len(box)-2
for g in range(bleng):
b = np.matrix(box[g+1,0])
q = (a * np.transpose(b)) / (math.sqrt(a*np.transpose(a))*math.sqrt(b*np.transpose(b)))
if countw == 0:
qq = np.append(qq, q)
else:
qq = np.add(qq, q)
countw+=1
b = np.matrix(box[g+1, (len(box)-1)])
q = (a * np.transpose(b)) / (math.sqrt(a*np.transpose(a))*math.sqrt(b*np.transpose(b)))
qq = np.add(qq, q)
countw+=1
for h in range(len(box)):
b = np.matrix(box[0, h])
q = (a * np.transpose(b)) / (math.sqrt(a*np.transpose(a))*math.sqrt(b*np.transpose(b)))
qq = np.add(qq, q)
countw+=1
b = np.matrix(box[(len(box)-1), h])
q = (a * np.transpose(b)) / (math.sqrt(a*np.transpose(a))*math.sqrt(b*np.transpose(b)))
qq = np.add(qq, q)
countw+=1
qq = qq / countw
howmany = np.append(howmany, qq)
count2 += 1.0
qi = np.add(qi, howmany)
print
print "pre qi = ", qi
qi = qi / count2
print "count2 = ", count2
print
print
qi = np.reshape(qi,(np.shape(qi)[1], 1))
print "qi = ", qi
print "qi shape = ", np.shape(qi)
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='darkslategray')
ax.set_xlabel('Distance\n with-inhib=Yellow without-inhib=Red')
ax.set_ylabel('Number of Shared Features')
ax.set_title('proximity')
ax.plot((np.arange(len(qi))+1), qi, "-o", color='y')
#ax.plot((np.arange(len(postfinal2))+1), postfinal2, "-o", color='r')
#ax.plot(np.arange(len(prefinal[2])), prefinal[2], "-o", color=cm.spectral(0.4))
#ax.plot(np.arange(len(prefinal[3])), prefinal[3], "-o", color=cm.spectral(0.5))
#ax.plot(np.arange(len(prefinal[4])), prefinal[4], "-o", color=cm.spectral(0.6))
#ax.plot(np.arange(len(prefinal[5])), prefinal[5], "-o", color=cm.spectral(0.7))
#ax.plot(np.arange(len(prefinal[6])), prefinal[6], "-o", color=cm.spectral(0.8))
#ax.plot(np.arange(len(prefinal[7])), prefinal[7], "-o", color=cm.spectral(0.9))
ax.set_ylim(0.0, 1.0)
plt.show()
#end fig loop
| epl-1.0 | -7,084,743,835,861,948,000 | 23.057471 | 105 | 0.54085 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.