repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
vinhlh/bite-project | refs/heads/master | deps/gdata-python-client/src/gdata/contentforshopping/client.py | 29 | #!/usr/bin/python
#
# Copyright (C) 2010-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extend the gdata client for the Content API for Shopping."""
__author__ = 'afshar (Ali Afshar), dhermes (Daniel Hermes)'
import urllib
import atom.data
import gdata.client
from gdata.contentforshopping.data import ClientAccount
from gdata.contentforshopping.data import ClientAccountFeed
from gdata.contentforshopping.data import DatafeedEntry
from gdata.contentforshopping.data import DatafeedFeed
from gdata.contentforshopping.data import DataQualityEntry
from gdata.contentforshopping.data import DataQualityFeed
from gdata.contentforshopping.data import InventoryFeed
from gdata.contentforshopping.data import ProductEntry
from gdata.contentforshopping.data import ProductFeed
from gdata.contentforshopping.data import UsersEntry
from gdata.contentforshopping.data import UsersFeed
CFS_VERSION = 'v1'
CFS_HOST = 'content.googleapis.com'
CFS_URI = 'https://%s/content' % CFS_HOST
CFS_PROJECTION = 'schema'
class ContentForShoppingClient(gdata.client.GDClient):
"""Client for Content for Shopping API.
:param account_id: Merchant account ID. This value will be used by default
for all requests, but may be overridden on a
request-by-request basis.
:param api_version: The version of the API to target. Default value: 'v1'.
:param **kwargs: Pass all addtional keywords to the GDClient constructor.
"""
api_version = '1.0'
def __init__(self, account_id=None, api_version=CFS_VERSION,
cfs_uri=CFS_URI, **kwargs):
self.cfs_account_id = account_id
self.cfs_api_version = api_version
self.cfs_uri = cfs_uri
gdata.client.GDClient.__init__(self, **kwargs)
def _create_uri(self, account_id, resource, path=(), use_projection=True,
dry_run=False, warnings=False, max_results=None,
start_token=None, start_index=None,
performance_start=None, performance_end=None):
"""Create a request uri from the given arguments.
If arguments are None, use the default client attributes.
"""
account_id = account_id or self.cfs_account_id
if account_id is None:
raise ValueError('No Account ID set. '
'Either set for the client, or per request')
segments = [self.cfs_uri, self.cfs_api_version, account_id, resource]
if use_projection:
segments.append(CFS_PROJECTION)
segments.extend(urllib.quote(value) for value in path)
result = '/'.join(segments)
request_params = []
if dry_run:
request_params.append('dry-run')
if warnings:
request_params.append('warnings')
if max_results is not None:
request_params.append('max-results=%s' % max_results)
if start_token is not None:
request_params.append('start-token=%s' % start_token)
if start_index is not None:
request_params.append('start-index=%s' % start_index)
if performance_start is not None:
request_params.append('performance.start=%s' % performance_start)
if performance_end is not None:
request_params.append('performance.end=%s' % performance_end)
request_params = '&'.join(request_params)
if request_params:
result = '%s?%s' % (result, request_params)
return result
def _create_product_id(self, id, country, language, channel='online'):
return '%s:%s:%s:%s' % (channel, language, country, id)
def _create_batch_feed(self, entries, operation, feed=None,
feed_class=ProductFeed):
if feed is None:
feed = feed_class()
for entry in entries:
entry.batch_operation = gdata.data.BatchOperation(type=operation)
feed.entry.append(entry)
return feed
# Operations on a single product
def get_product(self, id, country, language, account_id=None,
auth_token=None):
"""Get a product by id, country and language.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language)
uri = self._create_uri(account_id, 'items/products', path=[pid])
return self.get_entry(uri, desired_class=ProductEntry,
auth_token=auth_token)
GetProduct = get_product
def insert_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Create a new product, by posting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products',
dry_run=dry_run, warnings=warnings)
return self.post(product, uri=uri, auth_token=auth_token)
InsertProduct = insert_product
def update_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update a product, by putting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False
by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateProduct = update_product
def delete_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete a product
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteProduct = delete_product
# Operations on multiple products
def get_products(self, max_results=None, start_token=None, start_index=None,
performance_start=None, performance_end=None,
account_id=None, auth_token=None):
"""Get a feed of products for the account.
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_token: The start token of the feed provided by the API.
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param performance_start: The start date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param performance_end: The end date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'items/products',
max_results=max_results,
start_token=start_token,
start_index=start_index,
performance_start=performance_start,
performance_end=performance_end)
return self.get_feed(uri, auth_token=auth_token,
desired_class=ProductFeed)
GetProducts = get_products
def batch(self, feed, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Send a batch request.
:param feed: The feed of batch entries to send.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products', path=['batch'],
dry_run=dry_run, warnings=warnings)
return self.post(feed, uri=uri, auth_token=auth_token,
desired_class=ProductFeed)
Batch = batch
def insert_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
feed = self._create_batch_feed(products, 'insert')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
InsertProducts = insert_products
def update_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'update')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
UpdateProducts = update_products
def delete_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete the products using a batch request.
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'delete')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
DeleteProducts = delete_products
# Operations on datafeeds
def get_datafeeds(self, account_id=None):
"""Get the feed of datafeeds.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False)
return self.get_feed(uri, desired_class=DatafeedFeed)
GetDatafeeds = get_datafeeds
# Operations on a single datafeed
def get_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Get the feed of a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DatafeedEntry)
GetDatafeed = get_datafeed
def insert_datafeed(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a datafeed.
:param entry: XML Content of post request required for registering a
datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertDatafeed = insert_datafeed
def update_datafeed(self, entry, feed_id, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the feed of a single datafeed.
:param entry: XML Content of put request required for updating a
datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.update(entry, auth_token=auth_token, uri=uri)
UpdateDatafeed = update_datafeed
def delete_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Delete a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteDatafeed = delete_datafeed
# Operations on client accounts
def get_client_accounts(self, max_results=None, start_index=None,
account_id=None, auth_token=None):
"""Get the feed of managed accounts
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
max_results=max_results, start_index=start_index,
use_projection=False)
return self.get_feed(uri, desired_class=ClientAccountFeed,
auth_token=auth_token)
GetClientAccounts = get_client_accounts
def get_client_account(self, client_account_id,
account_id=None, auth_token=None):
"""Get a managed account.
:param client_account_id: The Account ID of the subaccount being retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False)
return self.get_entry(uri, desired_class=ClientAccount,
auth_token=auth_token)
GetClientAccount = get_client_account
def insert_client_account(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a client account entry
:param entry: An entry of type ClientAccount
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertClientAccount = insert_client_account
def update_client_account(self, entry, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Update a client account
:param entry: An entry of type ClientAccount to update to
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateClientAccount = update_client_account
def delete_client_account(self, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Delete a client account
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteClientAccount = delete_client_account
def get_users_feed(self, account_id=None, auth_token=None):
"""Get the users feed for an account.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.get_feed(uri, auth_token=auth_token, desired_class=UsersFeed)
GetUsersFeed = get_users_feed
def get_users_entry(self, user_email, account_id=None, auth_token=None):
"""Get a users feed entry for an account.
:param user_email: Email of the user entry to be retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.get_entry(uri, auth_token=auth_token, desired_class=UsersEntry)
GetUsersEntry = get_users_entry
def insert_users_entry(self, entry, account_id=None, auth_token=None):
"""Insert a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertUsersEntry = insert_users_entry
def update_users_entry(self, entry, account_id=None, auth_token=None):
"""Update a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateUsersEntry = update_users_entry
def delete_users_entry(self, entry, account_id=None, auth_token=None):
"""Delete a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteUsersEntry = delete_users_entry
def get_data_quality_feed(self, account_id=None, auth_token=None,
max_results=None, start_index=None):
"""Get the data quality feed for an account.
:param max_results: The maximum number of results to return (default 25,
max 100).
:param start_index: The starting index of the feed to return.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'dataquality', use_projection=False,
max_results=max_results, start_index=start_index)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DataQualityFeed)
GetDataQualityFeed = get_data_quality_feed
def get_data_quality_entry(self, secondary_account_id=None,
account_id=None, auth_token=None):
"""Get the data quality feed entry for an account.
:param secondary_account_id: The Account ID of the secondary account. If
ommitted the value of account_id is used.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
if secondary_account_id is None:
secondary_account_id = account_id or self.cfs_account_id
uri = self._create_uri(account_id, 'dataquality',
path=[secondary_account_id],
use_projection=False)
return self.get_entry(uri, auth_token=auth_token,
desired_class=DataQualityEntry)
GetDataQualityEntry = get_data_quality_entry
def update_inventory_entry(self, product, id, country, language, store_code,
account_id=None, auth_token=None):
"""Make a local product update, by putting the inventory entry.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateInventoryEntry = update_inventory_entry
def add_local_id(self, product, id, country, language,
store_code, account_id=None):
"""Add an atom id to a local product with a local store specific URI.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
product.id = atom.data.Id(uri)
return product
AddLocalId = add_local_id
def update_inventory_feed(self, products, account_id=None, auth_token=None):
"""Update a batch of local products, by putting the product entry feed.
:param products: A list containing entries of
:class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
.. note:: Entries must have the atom:id element set. You can use
add_local_id to set this attribute using the store_code, product
id, country and language.
"""
feed = self._create_batch_feed(products, 'update',
feed_class=InventoryFeed)
uri = self._create_uri(account_id, 'inventory', path=['batch'],
use_projection=False)
return self.post(feed, uri=uri, auth_token=auth_token)
UpdateInventoryFeed = update_inventory_feed
|
saurabh6790/medapp | refs/heads/master | hr/doctype/job_applicant/job_applicant.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from utilities.transaction_base import TransactionBase
from webnotes.utils import extract_email_id
class DocType(TransactionBase):
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def get_sender(self, comm):
return webnotes.conn.get_value('Jobs Email Settings',None,'email_id')
def validate(self):
self.set_status() |
eddyb/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/py/testing/path/test_svnauth.py | 163 | import py
import svntestbase
from py.path import SvnAuth
import time
import sys
svnbin = py.path.local.sysfind('svn')
def make_repo_auth(repo, userdata):
""" write config to repo
user information in userdata is used for auth
userdata has user names as keys, and a tuple (password, readwrite) as
values, where 'readwrite' is either 'r' or 'rw'
"""
confdir = py.path.local(repo).join('conf')
confdir.join('svnserve.conf').write('''\
[general]
anon-access = none
password-db = passwd
authz-db = authz
realm = TestRepo
''')
authzdata = '[/]\n'
passwddata = '[users]\n'
for user in userdata:
authzdata += '%s = %s\n' % (user, userdata[user][1])
passwddata += '%s = %s\n' % (user, userdata[user][0])
confdir.join('authz').write(authzdata)
confdir.join('passwd').write(passwddata)
def serve_bg(repopath):
pidfile = py.path.local(repopath).join('pid')
port = 10000
e = None
while port < 10010:
cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
port, pidfile, repopath)
print(cmd)
try:
py.process.cmdexec(cmd)
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
else:
# XXX we assume here that the pid file gets written somewhere, I
# guess this should be relatively safe... (I hope, at least?)
counter = pid = 0
while counter < 10:
counter += 1
try:
pid = pidfile.read()
except py.error.ENOENT:
pass
if pid:
break
time.sleep(0.2)
return port, int(pid)
port += 1
raise IOError('could not start svnserve: %s' % (e,))
class TestSvnAuth(object):
def test_basic(self):
auth = SvnAuth('foo', 'bar')
assert auth.username == 'foo'
assert auth.password == 'bar'
assert str(auth)
def test_makecmdoptions_uname_pw_makestr(self):
auth = SvnAuth('foo', 'bar')
assert auth.makecmdoptions() == '--username="foo" --password="bar"'
def test_makecmdoptions_quote_escape(self):
auth = SvnAuth('fo"o', '"ba\'r"')
assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
def test_makecmdoptions_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache')
def test_makecmdoptions_no_interactive(self):
auth = SvnAuth('foo', 'bar', interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--non-interactive')
def test_makecmdoptions_no_interactive_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False,
interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache --non-interactive')
class svnwc_no_svn(py.path.svnwc):
def __new__(cls, *args, **kwargs):
self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _svn(self, *args):
self.commands.append(args)
class TestSvnWCAuth(object):
def setup_method(self, meth):
if not svnbin:
py.test.skip("svn binary required")
self.auth = SvnAuth('user', 'pass', cache_auth=False)
def test_checkout(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_commit(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.commit('msg')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_no_cache_auth(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_auth_from_constructor(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
class svnurl_no_svn(py.path.svnurl):
cmdexec_output = 'test'
popen_output = 'test'
def __new__(cls, *args, **kwargs):
self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _cmdexec(self, cmd):
self.commands.append(cmd)
return self.cmdexec_output
def _popen(self, cmd):
self.commands.append(cmd)
return self.popen_output
class TestSvnURLAuth(object):
def setup_method(self, meth):
self.auth = SvnAuth('foo', 'bar')
def test_init(self):
u = svnurl_no_svn('http://foo.bar/svn')
assert u.auth is None
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
assert u.auth is self.auth
def test_new(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
new = u.new(basename='bar')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/bar'
def test_join(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
new = u.join('foo')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/foo'
def test_listdir(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
paths = u.listdir()
assert paths[0].auth is self.auth
assert paths[1].auth is self.auth
assert paths[0].basename == 'LICENSE.txt'
def test_info(self):
u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
def dirpath(self):
return self
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
org_dp = u.__class__.dirpath
u.__class__.dirpath = dirpath
try:
info = u.info()
finally:
u.dirpath = org_dp
assert info.size == 1529
def test_open(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
foo = u.join('foo')
foo.check = lambda *args, **kwargs: True
ret = foo.open()
assert ret == 'test'
assert '--username="foo" --password="bar"' in foo.commands[0]
def test_dirpath(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
parent = u.dirpath()
assert parent.auth is self.auth
def test_mkdir(self):
u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
assert not u.commands
u.mkdir(msg='created dir foo')
assert u.commands
assert '--username="foo" --password="bar"' in u.commands[0]
def test_copy(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u2 = svnurl_no_svn('http://foo.bar/svn2')
u.copy(u2, 'copied dir')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_rename(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_remove(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.remove(msg='removing foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_export(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
target = py.path.local('/foo')
u.export(target)
assert '--username="foo" --password="bar"' in u.commands[0]
def test_log(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.popen_output = py.io.TextIO(py.builtin._totext('''\
<?xml version="1.0"?>
<log>
<logentry revision="51381">
<author>guido</author>
<date>2008-02-11T12:12:18.476481Z</date>
<msg>Creating branch to work on auth support for py.path.svn*.
</msg>
</logentry>
</log>
''', 'ascii'))
u.check = lambda *args, **kwargs: True
ret = u.log(10, 20, verbose=True)
assert '--username="foo" --password="bar"' in u.commands[0]
assert len(ret) == 1
assert int(ret[0].rev) == 51381
assert ret[0].author == 'guido'
def test_propget(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.propget('foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def pytest_funcarg__setup(request):
return Setup(request)
class Setup:
def __init__(self, request):
if not svnbin:
py.test.skip("svn binary required")
if not request.config.option.runslowtests:
py.test.skip('use --runslowtests to run these tests')
tmpdir = request.getfuncargvalue("tmpdir")
repodir = tmpdir.join("repo")
py.process.cmdexec('svnadmin create %s' % repodir)
if sys.platform == 'win32':
repodir = '/' + str(repodir).replace('\\', '/')
self.repo = py.path.svnurl("file://%s" % repodir)
if py.std.sys.platform == 'win32':
# remove trailing slash...
repodir = repodir[1:]
self.repopath = py.path.local(repodir)
self.temppath = tmpdir.mkdir("temppath")
self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
interactive=False)
make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
self.port, self.pid = serve_bg(self.repopath.dirpath())
# XXX caching is too global
py.path.svnurl._lsnorevcache._dict.clear()
request.addfinalizer(lambda: py.process.kill(self.pid))
class TestSvnWCAuthFunctional:
def test_checkout_constructor_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_function_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_failing_non_interactive(self, setup):
auth = SvnAuth('johnny', 'bar', cache_auth=False,
interactive=False)
wc = py.path.svnwc(setup.temppath, auth)
py.test.raises(Exception,
("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
setup.__dict__))
def test_log(self, setup):
wc = py.path.svnwc(setup.temppath, setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
foo = wc.ensure('foo.txt')
wc.commit('added foo.txt')
log = foo.log()
assert len(log) == 1
assert log[0].msg == 'added foo.txt'
def test_switch(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
wc.checkout(svnurl)
wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
wc.commit('added foo dir with foo.txt file')
wc.ensure('bar', dir=True)
wc.commit('added bar dir')
bar = wc.join('bar')
bar.switch(svnurl + '/foo')
assert bar.join('foo.txt')
def test_update(self, setup):
wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
auth=setup.auth)
wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
auth=setup.auth)
wc1.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc2.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc1.ensure('foo', dir=True)
wc1.commit('added foo dir')
wc2.update()
assert wc2.join('foo').check()
auth = SvnAuth('unknown', 'unknown', interactive=False)
wc2.auth = auth
py.test.raises(Exception, 'wc2.update()')
def test_lock_unlock_status(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
foo = wc.join('foo')
foo.lock()
status = foo.status()
assert status.locked
foo.unlock()
status = foo.status()
assert not status.locked
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.lock()')
py.test.raises(Exception, 'foo.unlock()')
def test_diff(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
wc.update()
rev = int(wc.status().rev)
foo = wc.join('foo')
foo.write('bar')
diff = foo.diff()
assert '\n+bar\n' in diff
foo.commit('added some content')
diff = foo.diff()
assert not diff
diff = foo.diff(rev=rev)
assert '\n+bar\n' in diff
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.diff(rev=rev)')
class TestSvnURLAuthFunctional:
def test_listdir(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
u.ensure('foo')
paths = u.listdir()
assert len(paths) == 1
assert paths[0].auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
py.test.raises(Exception, 'u.listdir()')
def test_copy(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.mkdir('foo')
assert foo.check()
bar = u.join('bar')
foo.copy(bar)
assert bar.check()
assert bar.auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
bar = u.join('bar')
py.test.raises(Exception, 'foo.copy(bar)')
def test_write_read(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.ensure('foo')
fp = foo.open()
try:
data = fp.read()
finally:
fp.close()
assert data == ''
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
py.test.raises(Exception, 'foo.open()')
# XXX rinse, repeat... :|
|
kiwicopple/MyMDb | refs/heads/master | venv/Lib/site-packages/sphinx/util/texescape.py | 11 | # -*- coding: utf-8 -*-
"""
sphinx.util.texescape
~~~~~~~~~~~~~~~~~~~~~
TeX escaping helper.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
tex_replacements = [
# map TeX special chars
(u'$', ur'\$'),
(u'%', ur'\%'),
(u'&', ur'\&'),
(u'#', ur'\#'),
(u'_', ur'\_'),
(u'{', ur'\{'),
(u'}', ur'\}'),
(u'[', ur'{[}'),
(u']', ur'{]}'),
(u'`', ur'{}`'),
(u'\\',ur'\textbackslash{}'),
(u'~', ur'\textasciitilde{}'),
(u'<', ur'\textless{}'),
(u'>', ur'\textgreater{}'),
(u'^', ur'\textasciicircum{}'),
# map special Unicode characters to TeX commands
(u'¶', ur'\P{}'),
(u'§', ur'\S{}'),
(u'€', ur'\texteuro{}'),
(u'∞', ur'\(\infty\)'),
(u'±', ur'\(\pm\)'),
(u'→', ur'\(\rightarrow\)'),
(u'‣', ur'\(\rightarrow\)'),
# used to separate -- in options
(u'', ur'{}'),
# map some special Unicode characters to similar ASCII ones
(u'─', ur'-'),
(u'⎽', ur'\_'),
(u'╲', ur'\textbackslash{}'),
(u'|', ur'\textbar{}'),
(u'│', ur'\textbar{}'),
(u'ℯ', ur'e'),
(u'ⅈ', ur'i'),
(u'₁', ur'1'),
(u'₂', ur'2'),
# map Greek alphabet
(u'α', ur'\(\alpha\)'),
(u'β', ur'\(\beta\)'),
(u'γ', ur'\(\gamma\)'),
(u'δ', ur'\(\delta\)'),
(u'ε', ur'\(\epsilon\)'),
(u'ζ', ur'\(\zeta\)'),
(u'η', ur'\(\eta\)'),
(u'θ', ur'\(\theta\)'),
(u'ι', ur'\(\iota\)'),
(u'κ', ur'\(\kappa\)'),
(u'λ', ur'\(\lambda\)'),
(u'μ', ur'\(\mu\)'),
(u'ν', ur'\(\nu\)'),
(u'ξ', ur'\(\xi\)'),
(u'ο', ur'o'),
(u'π', ur'\(\pi\)'),
(u'ρ', ur'\(\rho\)'),
(u'σ', ur'\(\sigma\)'),
(u'τ', ur'\(\tau\)'),
(u'υ', u'\\(\\upsilon\\)'),
(u'φ', ur'\(\phi\)'),
(u'χ', ur'\(\chi\)'),
(u'ψ', ur'\(\psi\)'),
(u'ω', ur'\(\omega\)'),
(u'Α', ur'A'),
(u'Β', ur'B'),
(u'Γ', ur'\(\Gamma\)'),
(u'Δ', ur'\(\Delta\)'),
(u'Ε', ur'E'),
(u'Ζ', ur'Z'),
(u'Η', ur'H'),
(u'Θ', ur'\(\Theta\)'),
(u'Ι', ur'I'),
(u'Κ', ur'K'),
(u'Λ', ur'\(\Lambda\)'),
(u'Μ', ur'M'),
(u'Ν', ur'N'),
(u'Ξ', ur'\(\Xi\)'),
(u'Ο', ur'O'),
(u'Π', ur'\(\Pi\)'),
(u'Ρ', ur'P'),
(u'Σ', ur'\(\Sigma\)'),
(u'Τ', ur'T'),
(u'Υ', u'\\(\\Upsilon\\)'),
(u'Φ', ur'\(\Phi\)'),
(u'Χ', ur'X'),
(u'Ψ', ur'\(\Psi\)'),
(u'Ω', ur'\(\Omega\)'),
(u'Ω', ur'\(\Omega\)'),
]
tex_escape_map = {}
tex_replace_map = {}
tex_hl_escape_map_new = {}
def init():
for a, b in tex_replacements:
tex_escape_map[ord(a)] = b
tex_replace_map[ord(a)] = u'_'
for a, b in tex_replacements:
if a in u'[]{}\\': continue
tex_hl_escape_map_new[ord(a)] = b
|
rcos/Observatory | refs/heads/master | observatory/dashboard/migrations/0004_set_mentor.py | 1 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
User = orm['auth.User']
UserInfo = orm.UserInfo
for user in User.objects.all():
temp = UserInfo(user=user, mentor=user.is_staff)
temp.save()
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("cannot reverse this migration")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard.authorrequest': {
'Meta': {'object_name': 'AuthorRequest'},
'autodetected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dashboard.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'dashboard.blog': {
'Meta': {'object_name': 'Blog'},
'from_feed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'most_recent_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'rss': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'dashboard.blogpost': {
'Meta': {'object_name': 'BlogPost', '_ormbases': ['dashboard.Event']},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dashboard.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {}),
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dashboard.Event']", 'unique': 'True', 'primary_key': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'markdown': ('django.db.models.fields.TextField', [], {})
},
'dashboard.commit': {
'Meta': {'object_name': 'Commit', '_ormbases': ['dashboard.Event']},
'diff': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dashboard.Event']", 'unique': 'True', 'primary_key': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dashboard.Repository']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'dashboard.contributor': {
'Meta': {'object_name': 'Contributor'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dashboard.Project']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'dashboard.event': {
'Meta': {'object_name': 'Event'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'from_feed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dashboard.Project']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'dashboard.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dashboard.Blog']", 'unique': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentored'", 'null': 'True', 'to': u"orm['auth.User']"}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'presentations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repository': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dashboard.Repository']", 'unique': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'wiki': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'dashboard.repository': {
'Meta': {'object_name': 'Repository'},
'clone_url': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'cmd': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'from_feed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'most_recent_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'repo_rss': ('django.db.models.fields.URLField', [], {'max_length': '128'}),
'vcs': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '3'}),
'web_url': ('django.db.models.fields.URLField', [], {'max_length': '128'})
},
'dashboard.screenshot': {
'Meta': {'object_name': 'Screenshot'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'extension': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dashboard.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'dashboard.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'mentor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'info'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['dashboard']
symmetrical = True
|
koltegirish/Arduino | refs/heads/esp8266 | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/poolmanager.py | 168 | # urllib3/poolmanager.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import connection_from_url, port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
return pool_cls(host, port, **self.connection_pool_kw)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
# If the scheme, host, or port doesn't match existing open connections,
# open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(RequestMethods):
"""
Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method
will make requests to any url through the defined proxy. The ProxyManager
class will automatically set the 'Host' header if it is not provided.
"""
def __init__(self, proxy_pool):
self.proxy_pool = proxy_pool
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
host = parse_url(url).host
if host:
headers_['Host'] = host
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
kw['assert_same_host'] = False
kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers'))
return self.proxy_pool.urlopen(method, url, **kw)
def proxy_from_url(url, **pool_kw):
proxy_pool = connection_from_url(url, **pool_kw)
return ProxyManager(proxy_pool)
|
neilswainston/development-py | refs/heads/master | synbiochemdev/__init__.py | 8 | '''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
|
switchkiller/ProjDjanko | refs/heads/master | lib/python2.7/posixpath.py | 4 | /usr/lib/python2.7/posixpath.py |
balajikris/autorest | refs/heads/master | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/ParameterFlattening/autorestparameterflattening/models/availability_set_update_parameters.py | 16 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AvailabilitySetUpdateParameters(Model):
"""AvailabilitySetUpdateParameters.
:param tags: A set of tags. A description about the set of tags.
:type tags: dict
"""
_validation = {
'tags': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, tags):
self.tags = tags
|
bubakazouba/Robinhood-for-Google-Finance | refs/heads/master | Robinhood.py | 1 |
import getpass
import json
import requests
import urllib
import sys
try:
from urllib.request import urlretrieve #py3
except ImportError:
from urllib import urlretrieve # py2
class Robinhood:
endpoints = {
"login": "https://api.robinhood.com/api-token-auth/",
"investment_profile": "https://api.robinhood.com/user/investment_profile/",
"accounts":"https://api.robinhood.com/accounts/",
"ach_iav_auth":"https://api.robinhood.com/ach/iav/auth/",
"ach_relationships":"https://api.robinhood.com/ach/relationships/",
"ach_transfers":"https://api.robinhood.com/ach/transfers/",
"applications":"https://api.robinhood.com/applications/",
"dividends":"https://api.robinhood.com/dividends/",
"edocuments":"https://api.robinhood.com/documents/",
"instruments":"https://api.robinhood.com/instruments/",
"margin_upgrades":"https://api.robinhood.com/margin/upgrades/",
"markets":"https://api.robinhood.com/markets/",
"notifications":"https://api.robinhood.com/notifications/",
"orders":"https://api.robinhood.com/orders/",
"password_reset":"https://api.robinhood.com/password_reset/request/",
"portfolios":"https://api.robinhood.com/portfolios/",
"positions":"https://api.robinhood.com/positions/",
"quotes":"https://api.robinhood.com/quotes/",
"historicals":"https://api.robinhood.com/quotes/historicals/",
"document_requests":"https://api.robinhood.com/upload/document_requests/",
"user":"https://api.robinhood.com/user/",
"watchlists":"https://api.robinhood.com/watchlists/",
"news":"https://api.robinhood.com/midlands/news/"
}
session = None
username = None
password = None
headers = None
auth_token = None
##############################
#Logging in and initializing
##############################
def __init__(self):
self.session = requests.session()
if sys.version_info[0] < 3: #py2
self.session.proxies = urllib.getproxies()
else: #py3
self.session.proxies = urllib.request.getproxies()
self.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
}
self.session.headers = self.headers
def login_prompt(self):
"""Prompts user for username and password and calls login()."""
username = raw_input("Username: ")
password = getpass.getpass()
return self.login(username=username, password=password)
def login(self, username, password):
self.username = username
self.password = password
#data = urllib.urlencode({"password" : self.password, "username" : self.username})
if sys.version_info[0] < 3: #py2
data = urllib.urlencode({"password" : self.password, "username" : self.username})
else: #py3
data = urllib.parse.urlencode({"password" : self.password, "username" : self.username})
res = self.session.post(self.endpoints['login'], data=data)
res = res.json()
if 'token' in res:
self.auth_token = res['token']
print('Login successfully')
elif 'mfa_type' in res: # When MFA is turned on
if sys.version_info[0] < 3:
mfa_code = raw_input("MFA code: ").strip()
else:
mfa_code = input("MFA code: ").strip()
if sys.version_info[0] < 3:
data = urllib.urlencode({"password" : self.password, "username" : self.username, 'mfa_code': mfa_code})
else:
data = urllib.parse.urlencode({"password" : self.password, "username" : self.username, 'mfa_code': mfa_code})
res_mfa = self.session.post(self.endpoints['login'], data=data)
res_mfa = res_mfa.json()
if 'token' in res_mfa:
self.auth_token = res_mfa['token']
print('Login successfully')
else:
print('Login Failed')
return False
else:
print('Login Failed')
return False
self.headers['Authorization'] = 'Token '+self.auth_token
return True
##############################
#GET DATA
##############################
def get_endpoint(self, endpoint=None):
res = self.session.get(self.endpoints[endpoint])
return json.loads(res.content.decode('utf-8'))
def get_custom_endpoint(self, endpoint=None):
res = self.session.get(endpoint)
return json.loads(res.content.decode('utf-8'))
def investment_profile(self):
self.session.get(self.endpoints['investment_profile'])
def instruments(self, stock=None):
res = self.session.get(self.endpoints['instruments'], params={'query':stock.upper()})
res = res.json()
return res['results']
def quote_data(self, stock=None):
#Prompt for stock if not entered
if stock is None:
stock = raw_input("Symbol: ");
url = str(self.endpoints['quotes']) + str(stock) + "/"
#Check for validity of symbol
try:
if sys.version_info[0] < 3: #py2
res = json.loads((urllib.urlopen(url)).read().decode('utf-8'));
else: #py3
res = json.loads((urllib.request.urlopen(url)).read().decode('utf-8'));
if len(res) > 0:
return res;
else:
raise NameError("Invalid Symbol: " + stock);
except (ValueError):
raise NameError("Invalid Symbol: " + stock);
def get_quote(self, stock=None):
data = self.quote_data(stock)
return data["symbol"]
def get_historical_quotes(self,symbol,interval,span,bounds='regular'):
# Valid combination
# interval = 5minute | 10minute + span = day, week
# interval = day + span = year
# interval = week
# bounds can be 'regular' for regular hours or 'extended' for extended hours
res = self.session.get(self.endpoints['historicals'], params={'symbols':','.join(symbol).upper(), 'interval':interval, 'span':span, 'bounds':bounds})
return res.json()
def get_news(self, symbol):
return self.session.get(self.endpoints['news']+symbol.upper()+"/").json()
def print_quote(self, stock=None):
data = self.quote_data(stock)
print(data["symbol"] + ": $" + data["last_trade_price"]);
def print_quotes(self, stocks):
for i in range(len(stocks)):
self.print_quote(stocks[i]);
def ask_price(self, stock=None):
return self.quote_data(stock)['ask_price'];
def ask_size(self, stock=None):
return self.quote_data(stock)['ask_size'];
def bid_price(self, stock=None):
return self.quote_data(stock)['bid_price'];
def bid_size(self, stock=None):
return self.quote_data(stock)['bid_size'];
def last_trade_price(self, stock=None):
return self.quote_data(stock)['last_trade_price'];
def previous_close(self, stock=None):
return self.quote_data(stock)['previous_close'];
def previous_close_date(self, stock=None):
return self.quote_data(stock)['previous_close_date'];
def adjusted_previous_close(self, stock=None):
return self.quote_data(stock)['adjusted_previous_close'];
def symbol(self, stock=None):
return self.quote_data(stock)['symbol'];
def last_updated_at(self, stock=None):
return self.quote_data(stock)['updated_at'];
def get_account(self):
res = self.session.get(self.endpoints['accounts'])
res = res.json()
return res['results'][0]
def get_url(self,url):
return self.session.get(url).json()
##############################
# PORTFOLIOS DATA
##############################
def portfolios(self):
"""Returns the user's portfolio data."""
return self.session.get(self.endpoints['portfolios']).json()['results'][0]
def adjusted_equity_previous_close(self):
return float(self.portfolios()['adjusted_equity_previous_close'])
def equity(self):
return float(self.portfolios()['equity'])
def equity_previous_close(self):
return float(self.portfolios()['equity_previous_close'])
def excess_margin(self):
return float(self.portfolios()['excess_margin'])
def extended_hours_equity(self):
return float(self.portfolios()['extended_hours_equity'])
def extended_hours_market_value(self):
return float(self.portfolios()['extended_hours_market_value'])
def last_core_equity(self):
return float(self.portfolios()['last_core_equity'])
def last_core_market_value(self):
return float(self.portfolios()['last_core_market_value'])
def market_value(self):
return float(self.portfolios()['market_value'])
def order_history(self):
return self.session.get(self.endpoints['orders']).json()
def dividends(self):
return self.session.get(self.endpoints['dividends']).json()
##############################
# POSITIONS DATA
##############################
def positions(self):
"""Returns the user's positions data."""
return self.session.get(self.endpoints['positions']).json()
def securities_owned(self):
"""
Returns a list of symbols of securities of which there are more
than zero shares in user's portfolio.
"""
positions = self.positions()
securities = []
for position in positions['results']:
quantity = float(position['quantity'])
if quantity > 0:
securities.append(self.session.get(position['instrument']).json()['symbol'])
return securities
##############################
#PLACE ORDER
##############################
def place_order(self, instrument, quantity=1, bid_price = None, transaction=None):
if bid_price == None:
bid_price = self.quote_data(instrument['symbol'])['bid_price']
data = 'account=%s&instrument=%s&price=%f&quantity=%d&side=%s&symbol=%s&time_in_force=gfd&trigger=immediate&type=market' % (
self.get_account()['url'],
urllib.unquote(instrument['url']),
float(bid_price),
quantity,
transaction,
instrument['symbol']
)
res = self.session.post(self.endpoints['orders'], data=data)
return res
def place_buy_order(self, instrument, quantity, bid_price=None):
transaction = "buy"
return self.place_order(instrument, quantity, bid_price, transaction)
def place_sell_order(self, instrument, quantity, bid_price=None):
transaction = "sell"
return self.place_order(instrument, quantity, bid_price, transaction)
|
Parcks/core | refs/heads/master | src/domain/post_install/shell/shell_command_runnable_factory.py | 1 | """
Scriptable Packages Installer - Parcks
Copyright (C) 2017 JValck - Setarit
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Setarit - parcks[at]setarit.com
"""
from __future__ import absolute_import
from src.domain.post_install.shell.shell_command_runner import ShellCommandRunner
from src.domain.post_install.shell.root_shell_command_runner import RootShellCommandRunner
class ShellCommandRunnableFactory():
def create(self, shell_command):
"""
Creates the correct :class:`src.domain.shell_command.ShellCommand`
:param shell_command: Indicates if the ShellCommand requires root privileges
:type shell_command: :obj:`src.domain.shell_command.ShellCommand`
:returns: The :class:`src.domain.post_install.shell.shell_command_runnable.ShellCommandRunnable` that will execute the ShellCommand
:rtype: :obj:`src.domain.post_install.shell.shell_command_runnable.ShellCommandRunnable`
"""
if(shell_command.asRoot):
return RootShellCommandRunner(shell_command)
else:
return ShellCommandRunner(shell_command)
|
aleksandra-tarkowska/django | refs/heads/master | tests/utils_tests/test_module/__init__.py | 439 | class SiteMock(object):
_registry = {}
site = SiteMock()
|
osamak/student-portal | refs/heads/master | clubs/admin.py | 2 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from clubs.models import Club, Team, College
from core.models import StudentClubYear
class ClubFilter(admin.SimpleListFilter):
title = u"نوع النادي"
parameter_name = 'type'
def lookups(self, request, model_admin):
return (
('p', u'الرئاسة'),
('s', u'نادي متخصص'),
('c', u'نادي كلية'),
)
def queryset(self, request, queryset):
if self.value() == 'p':
return queryset.filter(english_name__icontains='Presidency')
elif self.value() == 'c':
return queryset.exclude(college__isnull=True)
elif self.value() == 's':
return queryset.filter(college__isnull=True).exclude(english_name__icontains='Presidency')
class SingleUserChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
try:
profile = obj.common_profile
return "%s (%s)" % (obj.username, profile.get_ar_full_name())
except ObjectDoesNotExist:
return obj.username
class MultipleUserChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
try:
profile = obj.common_profile
return "%s (%s)" % (obj.username, profile.get_ar_full_name())
except ObjectDoesNotExist:
return obj.username
class ClubAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ClubAdminForm, self).__init__(*args, **kwargs)
if self.instance.id:
year = self.instance.year
else:
year = StudentClubYear.objects.get_current()
self.fields['parent'].queryset = Club.objects.filter(year=year)
self.fields['possible_parents'].queryset = Club.objects.filter(year=year)
media_members = User.objects.filter(memberships__english_name="Media Center",
memberships__year=year) | \
User.objects.filter(coordination__english_name="Media Center",
coordination__year=year)
self.fields['media_assessor'] = SingleUserChoiceField(required=False, queryset=media_members.order_by("username"))
#coordinator = SingleUserChoiceField(required=False, queryset=User.objects.order_by("username"))
#employee = SingleUserChoiceField(required=False, queryset=User.objects.filter(common_profile__is_student=False).order_by("username"))
#media_assessor = SingleUserChoiceField(required=False, queryset=User.objects.order_by("username"))
class Meta:
model = Club
fields = '__all__'
class ClubAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'city', 'email', 'coordinator',
'number_of_members', 'get_total_points')
list_filter = (ClubFilter, 'city', 'gender', 'year')
search_fields = ('name', 'city', 'email')
filter_horizontal = ('members', 'deputies',
'media_representatives', 'possible_parents')
raw_id_fields = ['coordinator', 'deputies', 'members', 'media_representatives', 'media_assessor', 'employee']
form = ClubAdminForm
def number_of_members(self, obj):
return obj.members.count()
number_of_members.short_description = u"عدد الأعضاء"
class CollegeAdmin(admin.ModelAdmin):
list_display = ('name', 'city', 'section', 'gender')
list_filter = ('city', 'gender')
class TeamAdminForm(forms.ModelForm):
coordinator = SingleUserChoiceField(required=False, queryset=User.objects.order_by("username"))
class Meta:
model = Club
fields = '__all__'
class TeamAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'city', 'gender', 'coordinator', 'get_member_count')
list_filter = ('city', 'gender', 'year')
search_fields = ('name', 'city', 'gender', 'code_name', 'email')
filter_horizontal = ('members',)
raw_id_fields = ['members','coordinator']
admin.site.register(College, CollegeAdmin)
admin.site.register(Club, ClubAdmin)
admin.site.register(Team, TeamAdmin) |
cricketclubucd/davisdragons | refs/heads/master | platform-tools/systrace/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/syspath/mypkg/__init__.py | 26 | """ fake package """
|
DStauffman/dstauffman2 | refs/heads/master | dstauffman2/games/pentago/utils.py | 1 | r"""
Utils module file for the "pentago" game. It defines the generic utility functions.
Notes
-----
#. Written by David C. Stauffer in January 2016.
"""
#%% Imports
import doctest
import logging
import os
import unittest
import numpy as np
from dstauffman import modd
from dstauffman2 import get_root_dir as dcs_root_dir
from dstauffman2.games.pentago.classes import Move
from dstauffman2.games.pentago.constants import INT_TOKEN, ONE_OFF, PLAYER, SIZES, WIN, \
_rotate_board
#%% Globals
logger = logging.getLogger(__name__)
#%% get_root_dir
def get_root_dir():
r"""
Gets the full path to the root directory of the pentago game.
Returns
-------
folder : str
Path to the pentago root folder
Notes
-----
#. Written by David C. Stauffer in January 2016.
Examples
--------
>>> from dstauffman2.games.pentago import get_root_dir
>>> folder = get_root_dir()
"""
folder = os.path.join(dcs_root_dir(), 'games', 'pentago')
return folder
#%% rotate_board
rotate_board = _rotate_board
#%% calc_cur_move
def calc_cur_move(cur_move, cur_game):
r"""
Calculates whose move it is based on the turn and game number.
Parameters
----------
cur_move : int
Current move number
cur_game : int
Current game number
Returns
-------
move : int
Current move, from {1=white, -1=black}
Examples
--------
>>> from dstauffman2.games.pentago import calc_cur_move
>>> move = calc_cur_move(0, 0)
>>> print(move)
1
"""
if np.mod(cur_move + cur_game, 2) == 0:
move = PLAYER['white']
else:
move = PLAYER['black']
return move
#%% check_for_win
def check_for_win(board):
r"""Checks for a win."""
# find white and black wins
white = np.flatnonzero(np.sum(np.expand_dims(board.ravel() == PLAYER['white'], axis=1) * WIN, axis=0) == 5)
black = np.flatnonzero(np.sum(np.expand_dims(board.ravel() == PLAYER['black'], axis=1) * WIN, axis=0) == 5)
# determine winner
if len(white) == 0:
if len(black) == 0:
winner = PLAYER['none']
else:
winner = PLAYER['black']
else:
if len(black) == 0:
winner = PLAYER['white']
else:
winner = PLAYER['draw']
# check for a full game board after determining no other win was found
if winner == PLAYER['none'] and not np.any(board == PLAYER['none']):
winner = PLAYER['draw']
# find winning pieces on the board
if winner == PLAYER['none']:
win_mask = np.zeros((SIZES['board'],SIZES['board']), dtype=bool)
else:
logger.debug('Win detected. Winner is {}.'.format(list(PLAYER)[list(PLAYER.values()).index(winner)]))
win_mask = np.reshape(np.sum(WIN[:, white], axis=1) + np.sum(WIN[:, black], axis=1), (SIZES['board'], SIZES['board'])) != 0
return (winner, win_mask)
#%% find_moves
def find_moves(board):
r"""
Finds the best current move.
Notes
-----
#. Currently this function is only trying to find a win in one move situation.
Examples
--------
>>> from dstauffman2.games.pentago import find_moves
>>> import numpy as np
>>> board = np.reshape(np.hstack((np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), np.zeros(24, dtype=int))), (6, 6))
>>> (white_moves, black_moves) = find_moves(board)
>>> print(white_moves[0])
row: 0, col: 1, quad: 1, dir: 1
>>> print(black_moves)
[]
"""
#%% get_move_from_one_off
def get_move_from_one_off(big_board, ix, ONE_OFF):
r"""Turns the given index into a Move instance."""
# preallocate x & y to NaNs in case the winning move is just a rotation
row = np.full(len(ix), INT_TOKEN, dtype=int)
column = row.copy()
# find missing piece
pos_ix = np.logical_and(np.logical_xor(np.abs(big_board), ONE_OFF[:,ix]), ONE_OFF[:,ix])
assert np.all(np.sum(pos_ix, axis=0) <= 1), 'Only exactly one or fewer places should be found.'
# pull out element number from 0 to 35
(one_off_row, one_off_col) = np.nonzero(pos_ix)
# convert to row and column
row[one_off_col] = one_off_row // SIZES['board']
column[one_off_col] = np.mod(one_off_row, SIZES['board'])
# get quadrant and rotation number
# based on order that ONE_OFF was built, so permutations of first quads 1,2,3,4, second left,right;
num = np.ceil(ix/WIN.shape[1]).astype(int)
# pull out quadrant number
quadrant = modd(num, 4)
# pull out rotation direction
direction = np.full(len(ix), -1, dtype=int)
direction[num < 5] = 1
# convert to a move class
move = set()
for i in range(len(ix)):
move.add(Move(row[i], column[i], quadrant[i], direction[i], power=5))
return move
# expand the board to a linear 2D matrix
big_board = np.expand_dims(board.ravel(), axis=1)
# check for wins that shouldn't exist
test = big_board * WIN
score = np.sum(test, axis=0)
if np.any(np.abs(score) >= 5):
raise ValueError('Board should not already be in a winning position.')
# cross reference two matrices with element-wise multiplication
test = big_board * ONE_OFF
# find score
score = np.sum(test, axis=0)
# find white and black rotate to win moves
rot_white = np.flatnonzero(score >= 5)
rot_black = np.flatnonzero(score <= -5)
# find white and black one off potentials
white = np.flatnonzero((score >= 4) & (score < 5))
black = np.flatnonzero((score <= -4) & (score > -5))
# see if the remaining piece is an open square
if len(white) > 0:
pos_white = ONE_OFF[:, white]
needed = np.logical_xor(pos_white, big_board)
free = np.logical_and(needed, np.logical_not(big_board))
ix_white = white[np.any(free, axis=0)]
else:
ix_white = np.array([], dtype=int)
if len(black) > 0:
pos_black = ONE_OFF[:, black]
needed = np.logical_xor(pos_black, big_board)
free = np.logical_and(needed, np.logical_not(big_board))
ix_black = black[np.any(free, axis=0)]
else:
ix_black = np.array([], dtype=int)
# find winning moves
# placement winning moves
white_set = get_move_from_one_off(big_board, ix_white, ONE_OFF)
black_set = get_move_from_one_off(big_board, ix_black, ONE_OFF)
# rotation only winning moves
white_rotations = get_move_from_one_off(big_board, rot_white, ONE_OFF)
black_rotations = get_move_from_one_off(big_board, rot_black, ONE_OFF)
# fill in all available row and columns positions for the rotate to win moves
empty = np.flatnonzero(big_board == PLAYER['none'])
for ix in empty:
this_row = ix // SIZES['board']
this_col = np.mod(ix, SIZES['board'])
for this_rot in white_rotations:
this_move = Move(this_row, this_col, this_rot.quadrant, this_rot.direction, power=5)
white_set.add(this_move)
for this_rot in black_rotations:
this_move = Move(this_row, this_col, this_rot.quadrant, this_rot.direction, power=5)
black_set.add(this_move)
# check for ties and set their power to -1
ties = white_set & black_set
for this_move in ties:
white_set.remove(this_move)
black_set.remove(this_move)
this_move.power = -1
white_set.add(this_move)
black_set.add(this_move)
# convert to list, sort by power, such that ties go at the end
white_moves = sorted(list(white_set))
black_moves = sorted(list(black_set))
return (white_moves, black_moves)
#%% create_board_from_moves
def create_board_from_moves(moves, first_player):
r"""Recreates a board from a move history."""
# make sure the first player is valid
assert first_player == PLAYER['white'] or first_player == PLAYER['black']
# create the initial board
board = np.full((SIZES['board'], SIZES['board']), PLAYER['none'], dtype=int)
# alias this player
this_player = first_player
# loop through the move history
for this_move in moves:
# check that square is empty
assert board[this_move.row, this_move.column] == PLAYER['none'], 'Invalid move encountered.'
# place the piece
board[this_move.row, this_move.column] = this_player
# rotate the board
_rotate_board(board, this_move.quadrant, this_move.direction, inplace=True)
# update the next player to move
this_player = PLAYER['white'] if this_player == PLAYER['black'] else PLAYER['black']
return board
#%% Unit test
if __name__ == '__main__':
unittest.main(module='dstauffman2.games.pentago.tests.test_utils', exit=False)
doctest.testmod(verbose=False)
|
elijah513/django | refs/heads/master | django/contrib/webdesign/__init__.py | 264 | import warnings
from django.utils.deprecation import RemovedInDjango110Warning
default_app_config = 'django.contrib.webdesign.apps.WebDesignConfig'
warnings.warn(
"django.contrib.webdesign will be removed in Django 1.10. The "
"{% lorem %} tag is now included in the built-in tags.",
RemovedInDjango110Warning
)
|
gylian/sickrage | refs/heads/master | lib/adba/aniDBtvDBmaper.py | 24 | #!/usr/bin/env python
#
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa. If not, see <http://www.gnu.org/licenses/>.
import os
import xml.etree.cElementTree as etree
import aniDBfileInfo as fileInfo
class TvDBMap():
def __init__(self,filePath=None):
self.xmlMap = fileInfo.read_tvdb_map_xml(filePath)
def get_tvdb_for_anidb(self,anidb_id):
return self._get_x_for_y(anidb_id,"anidbid","tvdbid")
def get_anidb_for_tvdb(self,tvdb_id):
return self._get_x_for_y(tvdb_id,"tvdbid","anidbid")
def _get_x_for_y(self,xValue,x,y):
#print("searching "+x+" with the value "+str(xValue)+" and want to give back "+y)
xValue = str(xValue)
for anime in self.xmlMap.findall("anime"):
try:
if anime.get(x,False) == xValue:
return int(anime.get(y,0))
except ValueError, e:
continue
return 0
def get_season_episode_for_anidb_absoluteNumber(self,anidb_id,absoluteNumber):
# NOTE: this cant be done without the length of each season from thetvdb
#TODO: implement
season = 0
episode = 0
for anime in self.xmlMap.findall("anime"):
if int(anime.get("anidbid",False)) == anidb_id:
defaultSeason = int(anime.get("defaulttvdbseason",1))
return (season,episode)
def get_season_episode_for_tvdb_absoluteNumber(self,anidb_id,absoluteNumber):
#TODO: implement
season = 0
episode = 0
return (season,episode) |
LinDA-tools/LindaWorkbench | refs/heads/master | linda/query_designer/views.py | 1 | from datetime import datetime
import json
import urllib
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.utils.http import urlquote
import requests
from linda_app.models import DatasourceDescription, VocabularyProperty, Query, get_configuration
from linda_app.settings import LINDA_HOME
def designer_defaults():
params = {
'datasources': list(DatasourceDescription.objects.all()),
'RDF2ANY_SERVER': get_configuration().rdf2any_server
}
params['datasources'].insert(0,
DatasourceDescription(title="All private data dources", name="all", is_public=False
, uri=LINDA_HOME + "sparql/all/", createdOn=datetime.today(),
updatedOn=datetime.today()))
return params
# Home page
def index(request):
params = designer_defaults()
endpoint = request.GET.get('endpoint')
dt_id = request.GET.get('dt_id')
if endpoint:
params['datasource_default'] = endpoint
elif dt_id:
params['datasource_default'] = DatasourceDescription.objects.get(name=request.GET.get('dt_id'))
if not params['datasource_default']:
return Http404
return render(request, "builder_advanced/index.html", params)
# Load an existing design
def load_design(request, pk):
params = designer_defaults()
params['query'] = Query.objects.get(pk=pk)
try:
params['datasource_default'] = DatasourceDescription.objects.filter(uri=params['query'].endpoint)[0]
except IndexError:
params['datasource_default'] = params['query'].endpoint
if not params['query']:
raise Http404
return render(request, "builder_advanced/index.html", params)
# API calls
# get endpoint by data source name
def get_endpoint_from_dt_name(dt_name):
'''
if dt_name != "all": # search in all private data source
datasources = DatasourceDescription.objects.filter(name=dt_name)
if not datasources: # data source not found by name
raise Http404
return datasources[0].get_endpoint()
else:
return get_configuration().private_sparql_endpoint
'''
return dt_name
# Execute a SparQL query on an endpoint and return json response
def sparql_query_json(endpoint, query, timeout=None, append_slash=False, http_response=True):
# encode the query
query_enc = urlquote(query, safe='')
# ClioPatria bugfix
if append_slash and endpoint[-1] != '/':
endpoint += '/'
# get query results and turn them into json
# with &output=json we support non-standard endpoints like IMDB & World Factbook
response = requests.get(
endpoint + "?Accept=" + urlquote(
"application/sparql-results+json") + "&query=" + query_enc + "&format=json&output=json", timeout=timeout)
# get encoding - if missing, asume utf
encoding = response.encoding
if not encoding:
encoding = 'utf-8'
if response.encoding != 'utf-8':
text = bytes(response.text, encoding).decode('utf-8-sig')
else:
text = response.text
# ClioPatria bugfix
if not append_slash:
try:
j_obj = json.loads(text)
except:
return sparql_query_json(endpoint, query, timeout, append_slash=True)
if response.status_code != 200:
return HttpResponse(text, status=response.status_code)
# return the response
if http_response:
return HttpResponse(text, "application/json")
else:
return j_obj
# Get active classes in a data source
def active_classes(request, dt_name):
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# editor classes
if request.GET.get('q'):
q = request.GET.get('q')
if request.GET.get('prefix'):
regex = request.GET.get('prefix') + '(.)*' + q + '(.)*'
else:
regex = '^http://(/)*(.)*' + q + '(.)*'
query = 'select distinct ?Concept where {[] a ?Concept. FILTER regex(str(?Concept), "' + regex + '" , "i")} LIMIT 20'
else:
# get page
p = request.GET.get('p', '1')
# check if searching distinct
if request.GET.get('distinct'):
distinct = "DISTINCT"
else:
distinct = ""
# query to get all classes with at least one instance
classes_query_paginate_by = 10000
query = "SELECT " + distinct + " ?Concept WHERE { ?s a ?Concept } LIMIT " + str(classes_query_paginate_by) + " OFFSET " + str(
(int(p) - 1) * classes_query_paginate_by)
return sparql_query_json(endpoint, query)
# Get active classes in a data source
def active_root_classes(request, dt_name):
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# query to get all classes with at least one instance
query = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nSELECT DISTINCT ?class ((count(?x)) AS ?cnt) WHERE {?x a ?class. FILTER NOT EXISTS {?class rdfs:subClassOf ?parentClass.} } GROUP BY ?class ORDER BY DESC (?cnt)"
return sparql_query_json(endpoint, query)
# Get active subclasses in a data source
def active_subclasses(request, dt_name):
# get parent class
if not request.GET.get('parent_class'):
raise Http404
parent_class = urllib.parse.unquote(request.GET.get('parent_class'))
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# query to get all classes with at least one instance
query = "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nSELECT DISTINCT ?class (count(?x) AS ?cnt) WHERE {?x a ?class. ?class rdfs:subClassOf <" + parent_class + ">. } GROUP BY ?class ORDER BY DESC (?cnt)"
return sparql_query_json(endpoint, query)
# Get active object properties in a data source
def object_properties(request, dt_name):
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# query to get all classes with at least one instance
query = "PREFIX owl: <http://www.w3.org/2002/07/owl#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nSELECT DISTINCT ?property ?domain ?range WHERE {?property a owl:ObjectProperty. ?property rdfs:domain ?domain. ?property rdfs:range ?range}"
return sparql_query_json(endpoint, query)
# Get all properties of class instances in a data source
def active_class_properties(request, dt_name):
# get class searched
if not request.GET.get('class_uri'):
raise Http404
class_uri = urllib.parse.unquote(request.GET.get('class_uri'))
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# get if pagination was set
if request.GET.get('page'):
p = int(request.GET['page'])
offset = (p - 1) * 200
page_str = " OFFSET " + str(offset) + " LIMIT 200"
else:
page_str = ""
# query to get all properties of a class with at least one instance
if request.GET.get('order') == "true":
query = "SELECT DISTINCT ?property (count(?x) AS ?cnt) WHERE {?x a <" + class_uri + ">. ?x ?property ?o } GROUP BY ?property ORDER BY DESC(?cnt)" + page_str
else:
query = "SELECT ?property WHERE {?x a <" + class_uri + ">. ?x ?property ?o }" + page_str
return sparql_query_json(endpoint, query)
# Get all properties in a data source
def active_properties(request, dt_name):
# get the query string
q = request.GET.get('q', '')
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
if request.GET.get('prefix'):
regex = request.GET.get('prefix') + '(.)*' + q + '(.)*'
else:
regex = '^http://(/)*(.)*' + q + '(.)*'
query = 'SELECT DISTINCT ?property WHERE {?x ?property ?o FILTER regex(str(?property), "' + regex + '" , "i")} LIMIT 20'
return sparql_query_json(endpoint, query)
def uri_to_label(uri):
label = uri.split('/')[-1].split('#')[-1].replace('_', ' ')
return urllib.parse.unquote(label)
# Suggest entities of a type
# e.g search for countries in World FactBook typing "fra"
# will return <http://wifo5-04.informatik.uni-mannheim.de/factbook/resource/France>
def get_entity_suggestions(request, dt_name):
# get query
q = request.GET.get('term', '')
q = q.replace(' ', '_')
# get instance & property type
class_uri = request.GET.get('class_uri')
property_uri = request.GET.get('property_uri')
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
regex = '^http://(/)*(.)*' + q + '(.)*'
if property_uri:
if q:
query = 'SELECT DISTINCT ?instance WHERE {?x a <' + class_uri + \
'>. ?x <' + property_uri + '> ?instance FILTER regex(str(?instance), "' + regex + '" , "i")} LIMIT 20'
else:
query = 'SELECT DISTINCT ?instance WHERE {?x a <' + class_uri + '>. ?x <' +\
property_uri + '> ?instance} LIMIT 20'
else:
if q:
query = 'SELECT DISTINCT ?instance WHERE {?instance a <' + class_uri + \
'> FILTER regex(str(?instance), "' + regex + '" , "i")} LIMIT 20'
else:
query = 'SELECT DISTINCT ?instance WHERE {?instance a <' + class_uri + '>} LIMIT 20'
# get json result
result = sparql_query_json(endpoint, query)
# make array of results
results = []
res = json.loads(result.content.decode('utf8'))
for b in res['results']['bindings']:
results.append({"value": b['instance']['value'], "label": uri_to_label(b['instance']['value'])})
return HttpResponse(json.dumps(results), "application/json")
# Get the return type of a property
def get_property_type(request, dt_name):
# get property uri
if not request.GET.get('property_uri'):
raise Http404
property_uri = urllib.parse.unquote(request.GET.get('property_uri'))
# find type and create json response
props = VocabularyProperty.objects.filter(uri=property_uri)
if not props: # could not detect the property in the vocabulary repository
tp = ""
else: # get the return type (range)
tp = props[0].range_uri()
data = json.dumps({'type': tp})
# return the response
return HttpResponse(data, "application/json")
# Get the domain of a property
def get_properties_with_domain(request, dt_name):
# get class uri
if not request.GET.get('class_uri'):
raise Http404
class_uri = urllib.parse.unquote(request.GET.get('class_uri'))
# find properties and create json response
# resembles a SparQL response json to ease the client's job
r = {"results": {
"bindings": []}
}
for p in VocabularyProperty.objects.filter(domain=class_uri):
r["results"]["bindings"].append({"property": {"value": p.uri}})
data = json.dumps(r)
# return the response
return HttpResponse(data, "application/json")
# Get number of class instances
def class_info(request, dt_name):
# get class searched
if not request.GET.get('class_uri'):
raise Http404
class_uri = urllib.parse.unquote(request.GET.get('class_uri'))
# get the endpoint of the query
endpoint = get_endpoint_from_dt_name(dt_name)
# query to get all classes with at least one instance
query = "SELECT (count(?x) AS ?cnt) WHERE {?x a <" + class_uri + ">}"
return sparql_query_json(endpoint, query)
API_QUERY_LIMIT = 100
def auto_paginate(q, page):
"""
If no limit/offset is specified inside the query, adds limit & offset to query based on current page and sets flag
to True.
Otherwise, returns query untouched and sets flag to False.
"""
if 'limit ' not in q.lower() and 'offset ' not in q.lower(): # no pagination parameters
return '%s LIMIT %d OFFSET %d' % (q, API_QUERY_LIMIT, (page - 1)*API_QUERY_LIMIT), True
else:
return q, False
# API call to execute a specific query, auto-paginate & get results
def execute_query_api(request, q_id):
# get query & current page
query = get_object_or_404(Query, pk=q_id)
page = int(request.GET.get('page', '1'))
# auto-paginate
sparql, include_links = auto_paginate(query.sparql, page)
# execute the query
result = sparql_query_json(query.endpoint, sparql, http_response=False)
if type(result) == HttpResponse: # error case
return result
else:
# include pagination links
if include_links:
links = {}
if page > 1:
links['prev'] = '/api/query/%d/execute/?page=%d' % (query.pk, page - 1)
if len(result['results']['bindings']) == API_QUERY_LIMIT:
links['next'] = '/api/query/%d/execute/?page=%d' % (query.pk, page + 1)
result['links'] = links
return HttpResponse(json.dumps(result), content_type='application/json')
|
Elucidation/ChessboardDetect | refs/heads/master | generateMLDataset.py | 1 | # coding=utf-8
import PIL.Image
import matplotlib.image as mpimg
import scipy.ndimage
import cv2 # For Sobel etc
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
np.set_printoptions(suppress=True, linewidth=200) # Better printing of arrays
# Load pt_dataset.txt and generate the windowed tiles for all the good and bad
# points in folders dataset/good dataset/bad
def loadImage(filepath, doGrayscale=False):
img_orig = PIL.Image.open(filepath)
img_width, img_height = img_orig.size
# Resize
aspect_ratio = min(500.0/img_width, 500.0/img_height)
new_width, new_height = ((np.array(img_orig.size) * aspect_ratio)).astype(int)
img = img_orig.resize((new_width,new_height), resample=PIL.Image.BILINEAR)
if (doGrayscale):
img = img.convert('L') # grayscale
img = np.array(img)
return img
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if os.path.isdir(path):
pass
else:
raise
def main():
input_data = 'pt_dataset2.txt'
WINSIZE = 5
dataset_folder = 'dataset_gray_%d' % WINSIZE
DO_GRAYSCALE = True
DO_BINARIZATION = False
DO_OPENING = False
if (DO_BINARIZATION and not DO_GRAYSCALE):
raise('Error, must be grayscale if doing binarization.')
count_good = 0
count_bad = 0
good_features = []
good_labels = []
bad_features = []
bad_labels = []
# save all points to a file
with open(input_data, 'r') as f:
lines = [x.strip() for x in f.readlines()]
n = len(lines)/5
# n = 1
for i in range(n):
print("On %d/%d" % (i+1, n))
filename = lines[i*5]
s0 = lines[i*5+1].split()
s1 = lines[i*5+2].split()
s2 = lines[i*5+3].split()
s3 = lines[i*5+4].split()
good_pts = np.array([s1, s0], dtype=np.int).T
bad_pts = np.array([s3, s2], dtype=np.int).T
img_filepath = 'input/%s.png' % filename
if not os.path.exists(img_filepath):
img_filepath = 'input/%s.jpg' % filename
if not os.path.exists(img_filepath):
img_filepath = 'input_yt/%s.jpg' % filename
if not os.path.exists(img_filepath):
img_filepath = 'input_yt/%s.png' % filename
img = loadImage(img_filepath, DO_GRAYSCALE)
kernel = np.ones((3,3),np.uint8)
# Good points
for i in range(good_pts.shape[0]):
pt = good_pts[i,:]
if (np.any(pt <= WINSIZE) or np.any(pt >= np.array(img.shape[:2]) - WINSIZE)):
continue
else:
tile = img[pt[0]-WINSIZE:pt[0]+WINSIZE+1, pt[1]-WINSIZE:pt[1]+WINSIZE+1]
if DO_BINARIZATION:
tile = cv2.adaptiveThreshold(tile,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
if DO_OPENING:
tile = cv2.morphologyEx(tile, cv2.MORPH_OPEN, kernel)
good_features.append(tile)
good_labels.append(1)
count_good += 1
# Bad points
for i in range(bad_pts.shape[0]):
pt = bad_pts[i,:]
if (np.any(pt <= WINSIZE) or np.any(pt >= np.array(img.shape[:2]) - WINSIZE)):
continue
else:
tile = img[pt[0]-WINSIZE:pt[0]+WINSIZE+1, pt[1]-WINSIZE:pt[1]+WINSIZE+1]
if DO_BINARIZATION:
tile = cv2.adaptiveThreshold(tile,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
if DO_OPENING:
tile = cv2.morphologyEx(tile, cv2.MORPH_OPEN, kernel)
bad_features.append(tile)
bad_labels.append(0)
count_bad += 1
features = np.array(good_features + bad_features)
print(features.shape)
labels = np.array(good_labels + bad_labels, dtype=np.float32)
print(labels.shape)
np.savez('dataset2_%d' % WINSIZE, features=features, labels=labels)
# Example to use: print(np.load('dataset_5.npz')['features'])
print ("Finished %d good and %d bad tiles" % (count_good, count_bad))
if __name__ == '__main__':
main()
|
Kefkius/electrum-frc | refs/heads/master | lib/msqr.py | 59 | # from http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
def modular_sqrt(a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned is no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if legendre_symbol(a, p) != 1:
return 0
elif a == 0:
return 0
elif p == 2:
return p
elif p % 4 == 3:
return pow(a, (p + 1) / 4, p)
# Partition p-1 to s * 2^e for an odd s (i.e.
# reduce all the powers of 2 from p-1)
#
s = p - 1
e = 0
while s % 2 == 0:
s /= 2
e += 1
# Find some 'n' with a legendre symbol n|p = -1.
# Shouldn't take long.
#
n = 2
while legendre_symbol(n, p) != -1:
n += 1
# Here be dragons!
# Read the paper "Square roots from 1; 24, 51,
# 10 to Dan Shanks" by Ezra Brown for more
# information
#
# x is a guess of the square root that gets better
# with each iteration.
# b is the "fudge factor" - by how much we're off
# with the guess. The invariant x^2 = ab (mod p)
# is maintained throughout the loop.
# g is used for successive powers of n to update
# both a and b
# r is the exponent - decreases with each update
#
x = pow(a, (s + 1) / 2, p)
b = pow(a, s, p)
g = pow(n, s, p)
r = e
while True:
t = b
m = 0
for m in xrange(r):
if t == 1:
break
t = pow(t, 2, p)
if m == 0:
return x
gs = pow(g, 2 ** (r - m - 1), p)
g = (gs * gs) % p
x = (x * gs) % p
b = (b * g) % p
r = m
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls
|
amenonsen/ansible | refs/heads/devel | lib/ansible/cli/adhoc.py | 29 | # Copyright: (c) 2012, Michael DeHaan <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_text
from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.play import Play
from ansible.utils.display import Display
display = Display()
class AdHocCLI(CLI):
''' is an extra-simple tool/framework/API for doing 'remote things'.
this command allows you to define and run a single task 'playbook' against a set of hosts
'''
def init_parser(self):
''' create an options parser for bin/ansible '''
super(AdHocCLI, self).init_parser(usage='%prog <host-pattern> [options]',
desc="Define and run a single task 'playbook' against"
" a set of hosts",
epilog="Some modules do not make sense in Ad-Hoc (include,"
" meta, etc)")
opt_help.add_runas_options(self.parser)
opt_help.add_inventory_options(self.parser)
opt_help.add_async_options(self.parser)
opt_help.add_output_options(self.parser)
opt_help.add_connect_options(self.parser)
opt_help.add_check_options(self.parser)
opt_help.add_runtask_options(self.parser)
opt_help.add_vault_options(self.parser)
opt_help.add_fork_options(self.parser)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
# options unique to ansible ad-hoc
self.parser.add_argument('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
self.parser.add_argument('-m', '--module-name', dest='module_name',
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
self.parser.add_argument('args', metavar='pattern', help='host pattern')
def post_process_args(self, options):
'''Post process and validate options for bin/ansible '''
options = super(AdHocCLI, self).post_process_args(options)
display.verbosity = options.verbosity
self.validate_conflicts(options, runas_opts=True, fork_opts=True)
return options
def _play_ds(self, pattern, async_val, poll):
check_raw = context.CLIARGS['module_name'] in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw')
mytask = {'action': {'module': context.CLIARGS['module_name'], 'args': parse_kv(context.CLIARGS['module_args'], check_raw=check_raw)}}
# avoid adding to tasks that don't support it, unless set, then give user an error
if context.CLIARGS['module_name'] not in ('include_role', 'include_tasks') and any(frozenset((async_val, poll))):
mytask['async_val'] = async_val
mytask['poll'] = poll
return dict(
name="Ansible Ad-Hoc",
hosts=pattern,
gather_facts='no',
tasks=[mytask])
def run(self):
''' create and execute the single task playbook '''
super(AdHocCLI, self).run()
# only thing left should be host pattern
pattern = to_text(context.CLIARGS['args'], errors='surrogate_or_strict')
sshpass = None
becomepass = None
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
# get basic objects
loader, inventory, variable_manager = self._play_prereqs()
try:
hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern)
except AnsibleError:
if context.CLIARGS['subset']:
raise
else:
hosts = []
display.warning("No hosts matched, nothing to do")
if context.CLIARGS['listhosts']:
display.display(' hosts (%d):' % len(hosts))
for host in hosts:
display.display(' %s' % host)
return 0
if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']:
err = "No argument passed to %s module" % context.CLIARGS['module_name']
if pattern.endswith(".yml"):
err = err + ' (did you mean to run ansible-playbook?)'
raise AnsibleOptionsError(err)
# Avoid modules that don't work with ad-hoc
if context.CLIARGS['module_name'] in ('import_playbook',):
raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands"
% context.CLIARGS['module_name'])
play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval'])
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
# used in start callback
playbook = Playbook(loader)
playbook._entries.append(play)
playbook._file_name = '__adhoc_playbook__'
if self.callback:
cb = self.callback
elif context.CLIARGS['one_line']:
cb = 'oneline'
# Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks'
elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default':
cb = C.DEFAULT_STDOUT_CALLBACK
else:
cb = 'minimal'
run_tree = False
if context.CLIARGS['tree']:
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
C.TREE_DIR = context.CLIARGS['tree']
run_tree = True
# now create a task queue manager to execute the play
self._tqm = None
try:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=passwords,
stdout_callback=cb,
run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
run_tree=run_tree,
forks=context.CLIARGS['forks'],
)
self._tqm.send_callback('v2_playbook_on_start', playbook)
result = self._tqm.run(play)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
finally:
if self._tqm:
self._tqm.cleanup()
if loader:
loader.cleanup_all_tmp_files()
return result
|
cedar101/quepy-ko | refs/heads/master | quepy/__init__.py | 10 | # coding: utf-8
# Copyright (c) 2012, Machinalis S.R.L.
# This file is part of quepy and is distributed under the Modified BSD License.
# You should have received a copy of license in the LICENSE file.
#
# Authors: Rafael Carrascosa <[email protected]>
# Gonzalo Garcia Berrotaran <[email protected]>
"""
Quepy converts Natural Language Question to database queries.
"""
VERSION = 0.2
import logging
from quepy.quepyapp import install, QuepyApp
def set_loglevel(level=logging.WARNING):
logger = logging.getLogger("quepy")
logger.setLevel(level)
|
dweinstein/mitmproxy | refs/heads/master | libmproxy/web/app.py | 12 | import os.path
import re
import tornado.web
import tornado.websocket
import logging
import json
from .. import version, filt
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def set_default_headers(self):
super(RequestHandler, self).set_default_headers()
self.set_header("Server", version.NAMEVERSION)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws://* ; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type").startswith("application/json"):
return None
return json.loads(self.request.body)
@property
def state(self):
return self.application.master.state
@property
def master(self):
return self.application.master
@property
def flow(self):
flow_id = str(self.path_kwargs["flow_id"])
flow = self.state.flows.get(flow_id)
if flow:
return flow
else:
raise APIError(400, "Flow not found.")
def write_error(self, status_code, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super(RequestHandler, self).write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
_ = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
self.render("index.html")
class FiltHelp(RequestHandler):
def get(self):
self.write(dict(
commands=filt.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False)
for conn in cls.connections:
try:
conn.write_message(message)
except:
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set()
class Flows(RequestHandler):
def get(self):
self.write(dict(
data=[f.get_state(short=True) for f in self.state.flows]
))
class ClearAll(RequestHandler):
def post(self):
self.state.clear()
class AcceptFlows(RequestHandler):
def post(self):
self.state.flows.accept_all(self.master)
class AcceptFlow(RequestHandler):
def post(self, flow_id):
self.flow.accept_intercept(self.master)
class FlowHandler(RequestHandler):
def delete(self, flow_id):
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
def put(self, flow_id):
flow = self.flow
flow.backup()
for a, b in self.json.iteritems():
if a == "request":
request = flow.request
for k, v in b.iteritems():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.load_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
elif a == "response":
response = flow.response
for k, v in b.iteritems():
if k == "msg":
response.msg = str(v)
elif k == "code":
response.status_code = int(v)
elif k == "http_version":
response.http_version = str(v)
elif k == "headers":
response.headers.load_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
else:
print "Warning: Unknown update {}: {}".format(a, b)
self.state.update_flow(flow)
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
self.master.duplicate_flow(self.flow)
class RevertFlow(RequestHandler):
def post(self, flow_id):
self.state.revert(self.flow)
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.state.update_flow(self.flow)
r = self.master.replay_request(self.flow)
if r:
raise APIError(400, r)
class FlowContent(RequestHandler):
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search("filename=([\w\" \.\-\(\)]+)", original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r"[^\w\" \.\-\(\)]", "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.content)
class Events(RequestHandler):
def get(self):
self.write(dict(
data=list(self.state.events)
))
class Settings(RequestHandler):
def get(self):
self.write(dict(
data=dict(
version=version.VERSION,
mode=str(self.master.server.config.mode),
intercept=self.state.intercept_txt
)
))
def put(self):
update = {}
for k, v in self.json.iteritems():
if k == "intercept":
self.state.set_intercept(v)
update[k] = v
else:
print("Warning: Unknown setting {}: {}".format(k, v))
ClientConnection.broadcast(
type="settings",
cmd="update",
data=update
)
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FiltHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/accept", AcceptFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/accept", AcceptFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(r"/settings", Settings),
(r"/clear", ClearAll),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
)
super(Application, self).__init__(handlers, **settings)
|
code-ape/pyspec | refs/heads/master | pyspec/importer.py | 1 | import inspect
import imp
import sys, os
def pyspec_import(target_name):
stack = inspect.stack()
frames = [frame_obj[0] for frame_obj in stack]
last_frame = frames[1]
abs_path_name = os.path.abspath('..')
sys.path.append(abs_path_name)
path_name = "../" + target_name + ".py"
temp = imp.find_module(target_name)
mod = imp.load_module(target_name, *temp)
last_frame.f_globals[target_name] = mod
last_frame.f_locals[target_name] = mod
|
metaml/nupic | refs/heads/master | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_pdf.py | 69 | # -*- coding: iso-8859-1 -*-
"""
A PDF matplotlib backend (not yet complete)
Author: Jouni K Seppänen <[email protected]>
"""
from __future__ import division
import os
import re
import sys
import time
import warnings
import zlib
import numpy as npy
from cStringIO import StringIO
from datetime import datetime
from math import ceil, cos, floor, pi, sin
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import __version__, rcParams, get_data_path
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, reverse_dict, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, Bbox, BboxBase
from matplotlib.path import Path
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g. font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * the alpha channel of images
# * image compression could be improved (PDF supports png-like compression)
# * encoding of fonts, including mathtext fonts and unicode support
# * Type 1 font support (i.e., "pdf.use_afm")
# * TTF support has lots of small TODOs, e.g. how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
# * use_tex
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(' '.join(strings[lasti:]))
return '\n'.join(result)
_string_escape_regex = re.compile(r'([\\()])')
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, float):
if not npy.isfinite(obj):
raise ValueError, "Can only output finite numbers in PDF"
r = "%.10f" % obj
return r.rstrip('0').rstrip('.')
# Integers are written as such.
elif isinstance(obj, (int, long)):
return "%d" % obj
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif is_string_like(obj):
return '(' + _string_escape_regex.sub(r'\\\1', obj) + ')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = ["<<"]
r.extend(["%s %s" % (Name(key).pdfRepr(), pdfRepr(val))
for key, val in obj.items()])
r.append(">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = ["["]
r.extend([pdfRepr(val) for val in obj])
r.append("]")
return fill(r)
# Booleans.
elif isinstance(obj, bool):
return ['false', 'true'][obj]
# The null keyword.
elif obj is None:
return 'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight: z = time.altzone
else: z = time.timezone
if z == 0: r += 'Z'
elif z < 0: r += "+%02d'%02d'" % ((-z)//3600, (-z)%3600)
else: r += "-%02d'%02d'" % (z//3600, z%3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
raise TypeError, \
"Don't know a PDF representation for %s objects." \
% type(obj)
class Reference:
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return "%d 0 R" % self.id
def write(self, contents, file):
write = file.write
write("%d 0 obj\n" % self.id)
write(pdfRepr(contents))
write("\nendobj\n")
class Name:
"""PDF name object."""
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
self.name = self._regex.sub(Name.hexify, name)
def __repr__(self):
return "<Name %s>" % self.name
def hexify(match):
return '#%02x' % ord(match.group())
hexify = staticmethod(hexify)
def pdfRepr(self):
return '/' + self.name
class Operator:
"""PDF operator object."""
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
# PDF operators (not an exhaustive list)
_pdfops = dict(close_fill_stroke='b', fill_stroke='B', fill='f',
closepath='h', close_stroke='s', stroke='S', endpath='n',
begin_text='BT', end_text='ET',
curveto='c', rectangle='re', lineto='l', moveto='m',
concat_matrix='cm',
use_xobject='Do',
setgray_stroke='G', setgray_nonstroke='g',
setrgb_stroke='RG', setrgb_nonstroke='rg',
setcolorspace_stroke='CS', setcolorspace_nonstroke='cs',
setcolor_stroke='SCN', setcolor_nonstroke='scn',
setdash='d', setlinejoin='j', setlinecap='J', setgstate='gs',
gsave='q', grestore='Q',
textpos='Td', selectfont='Tf', textmatrix='Tm',
show='Tj', showkern='TJ',
setlinewidth='w', clip='W')
Op = Bunch(**dict([(name, Operator(value))
for name, value in _pdfops.items()]))
class Stream:
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
def __init__(self, id, len, file, extra=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header """
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None: self.extra = dict()
else: self.extra = extra
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression']:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = StringIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write("%d 0 obj\n" % self.id)
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write("\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write("\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write("\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile:
"""PDF file with one page."""
def __init__(self, width, height, dpi, filename):
self.width, self.height = width, height
self.dpi = dpi
if rcParams['path.simplify']:
self.simplify = (width * dpi, height * dpi)
else:
self.simplify = None
self.nextObject = 1 # next free object id
self.xrefTable = [ [0, 65535, 'the zero object'] ]
self.passed_in_file_object = False
if is_string_like(filename):
fh = file(filename, 'wb')
elif is_writable_file_like(filename):
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write("%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write("%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.infoObject = self.reserveObject('info')
pagesObject = self.reserveObject('pages')
thePageObject = self.reserveObject('page 0')
contentObject = self.reserveObject('contents of page 0')
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.XObjectObject = self.reserveObject('external objects')
resourceObject = self.reserveObject('resources')
root = { 'Type': Name('Catalog'),
'Pages': pagesObject }
self.writeObject(self.rootObject, root)
info = { 'Creator': 'matplotlib ' + __version__ \
+ ', http://matplotlib.sf.net',
'Producer': 'matplotlib pdf backend',
'CreationDate': datetime.today() }
# Possible TODO: Title, Author, Subject, Keywords
self.writeObject(self.infoObject, info)
pages = { 'Type': Name('Pages'),
'Kids': [ thePageObject ],
'Count': 1 }
self.writeObject(pagesObject, pages)
thePage = { 'Type': Name('Page'),
'Parent': pagesObject,
'Resources': resourceObject,
'MediaBox': [ 0, 0, dpi*width, dpi*height ],
'Contents': contentObject }
self.writeObject(thePageObject, thePage)
# self.fontNames maps filenames to internal font names
self.fontNames = {}
self.nextFont = 1 # next free internal font name
self.fontInfo = {} # information on fonts: metrics, encoding
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
# The PDF spec recommends to include every procset
procsets = [ Name(x)
for x in "PDF Text ImageB ImageC ImageI".split() ]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = { 'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'ProcSet': procsets }
self.writeObject(resourceObject, resources)
# Start the content stream of the page
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
def close(self):
# End the content stream and write out the various deferred
# objects
self.endStream()
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in self.alphaStates.values()]))
self.writeHatches()
xobjects = dict(self.images.values())
for tup in self.markers.values():
xobjects[tup[0]] = tup[1]
for name, value in self.multi_byte_charprocs.items():
xobjects[name] = value
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill(map(pdfRepr, data)))
self.write('\n')
def beginStream(self, id, len, extra=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra)
def endStream(self):
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(fontprop, fontext='afm')
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in self.fontNames.items():
if filename.endswith('.afm'):
fontdictObject = self._write_afm_font(filename)
elif filename.endswith('.pfb') or filename.endswith('.pfa'):
# a Type 1 font; limited support for now
fontdictObject = self.embedType1(filename, self.fontInfo[Fx])
else:
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fontdictObject = self.embedTTF(realpath, chars[1])
fonts[Fx] = fontdictObject
#print >>sys.stderr, filename
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
fh = file(filename)
font = AFM(fh)
fh.close()
fontname = font.get_fontname()
fontdict = { 'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding') }
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedType1(self, filename, fontinfo):
# TODO: font effects such as SlantFont
fh = open(filename, 'rb')
matplotlib.verbose.report(
'Embedding Type 1 font ' + filename, 'debug')
try:
fontdata = fh.read()
finally:
fh.close()
font = FT2Font(filename)
widthsObject, fontdescObject, fontdictObject, fontfileObject = \
[ self.reserveObject(n) for n in
('font widths', 'font descriptor',
'font dictionary', 'font file') ]
firstchar = 0
lastchar = len(fontinfo.widths) - 1
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(font.postscript_name),
'FirstChar': 0,
'LastChar': lastchar,
'Widths': widthsObject,
'FontDescriptor': fontdescObject,
}
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [ Name(ch) for ch in enc ]
differencesArray = [ 0 ] + differencesArray
fontdict.update({
'Encoding': { 'Type': Name('Encoding'),
'Differences': differencesArray },
})
_, _, fullname, familyname, weight, italic_angle, fixed_pitch, \
ul_position, ul_thickness = font.get_ps_font_info()
flags = 0
if fixed_pitch: flags |= 1 << 0 # fixed width
if 0: flags |= 1 << 1 # TODO: serif
if 1: flags |= 1 << 2 # TODO: symbolic (most TeX fonts are)
else: flags |= 1 << 5 # non-symbolic
if italic_angle: flags |= 1 << 6 # italic
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(font.postscript_name),
'Flags': flags,
'FontBBox': font.bbox,
'ItalicAngle': italic_angle,
'Ascent': font.ascender,
'Descent': font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': familyname,
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
#'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdictObject, fontdict)
self.writeObject(widthsObject, fontinfo.widths)
self.writeObject(fontdescObject, descriptor)
t1font = type1font.Type1Font(filename)
self.beginStream(fontfileObject.id, None,
{ 'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0 })
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdictObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(str(filename))
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest: return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0: return floor(value)
else: return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type' : Name('Font'),
'BaseFont' : ps_name,
'FirstChar' : firstchar,
'LastChar' : lastchar,
'FontDescriptor' : fontdescObject,
'Subtype' : Name('Type3'),
'Name' : descriptor['FontName'],
'FontBBox' : bbox,
'FontMatrix' : [ .001, 0, 0, .001, 0, 0 ],
'CharProcs' : charprocsObject,
'Encoding' : {
'Type' : Name('Encoding'),
'Differences' : differencesArray},
'Widths' : widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
unicode = decode_char(charcode)
width = font.load_char(unicode, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+1) ]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)
charprocs = {}
charprocsRef = {}
for charname, stream in rawcharprocs.items():
charprocDict = { 'Length': len(stream) }
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find("d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type' : Name('Font'),
'Subtype' : Name('CIDFontType2'),
'BaseFont' : ps_name,
'CIDSystemInfo' : {
'Registry' : 'Adobe',
'Ordering' : 'Identity',
'Supplement' : 0 },
'FontDescriptor' : fontdescObject,
'W' : wObject,
'CIDToGIDMap' : cidToGidMapObject
}
type0FontDict = {
'Type' : Name('Font'),
'Subtype' : Name('Type0'),
'BaseFont' : ps_name,
'Encoding' : Name('Identity-H'),
'DescendantFonts' : [cidFontDictObject],
'ToUnicode' : toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
fontfile = open(filename, 'rb')
length1 = 0
while True:
data = fontfile.read(4096)
if not data: break
length1 += len(data)
self.currentstream.write(data)
fontfile.close()
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = [u'\u0000'] * 65536
cmap = font.get_charmap()
unicode_mapping = []
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange)))
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
ps_name = Name(font.get_sfnt()[(1,0,0,6)])
pclt = font.get_sfnt_table('pclt') \
or { 'capHeight': 0, 'xHeight': 0 }
post = font.get_sfnt_table('post') \
or { 'italicAngle': (0,0) }
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False #ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH: flags |= 1 << 0
if 0: flags |= 1 << 1 # TODO: serif
if symbolic: flags |= 1 << 2
else: flags |= 1 << 5
if sf & ITALIC: flags |= 1 << 6
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type' : Name('FontDescriptor'),
'FontName' : ps_name,
'Flags' : flags,
'FontBBox' : [ cvt(x, nearest=False) for x in font.bbox ],
'Ascent' : cvt(font.ascender, nearest=False),
'Descent' : cvt(font.descender, nearest=False),
'CapHeight' : cvt(pclt['capHeight'], nearest=False),
'XHeight' : cvt(pclt['xHeight']),
'ItalicAngle' : post['italicAngle'][1], # ???
'StemV' : 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
warnings.warn(("'%s' can not be subsetted into a Type 3 font. " +
"The entire font will be embedded in the output.") %
os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, { 'Type': Name('ExtGState'),
'CA': alpha, 'ca': alpha })
return name
def hatchPattern(self, lst):
pattern = self.hatchPatterns.get(lst, None)
if pattern is not None:
return pattern[0]
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[lst] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 144.0
density = 24.0
for lst, name in self.hatchPatterns.items():
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = { 'Procsets':
[ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] }
self.beginStream(
ob.id, None,
{ 'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res })
# lst is a tuple of stroke color, fill color,
# number of - lines, number of / lines,
# number of | lines, number of \ lines
rgb = lst[0]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke)
if lst[1] is not None:
rgb = lst[1]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
if lst[2]: # -
for j in npy.arange(0.0, sidelen, density/lst[2]):
self.output(0, j, Op.moveto,
sidelen, j, Op.lineto)
if lst[3]: # /
for j in npy.arange(0.0, sidelen, density/lst[3]):
self.output(0, j, Op.moveto,
sidelen-j, sidelen, Op.lineto,
sidelen-j, 0, Op.moveto,
sidelen, j, Op.lineto)
if lst[4]: # |
for j in npy.arange(0.0, sidelen, density/lst[4]):
self.output(j, 0, Op.moveto,
j, sidelen, Op.lineto)
if lst[5]: # \
for j in npy.arange(sidelen, 0.0, -density/lst[5]):
self.output(sidelen, j, Op.moveto,
j, sidelen, Op.lineto,
j, 0, Op.moveto,
0, j, Op.lineto)
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
## These two from backend_ps.py
## TODO: alpha (SMask, p. 518 of pdf spec)
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
a = rgba[:,:,3:]
return h, w, rgb.tostring(), a.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def writeImages(self):
for img, pair in self.images.items():
img.flipud_out()
if img.is_grayscale:
height, width, data = self._gray(img)
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
else:
height, width, data, adata = self._rgb(img)
smaskObject = self.reserveObject("smask")
stream = self.beginStream(
smaskObject.id,
self.reserveObject('length of smask stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(adata) # TODO: predictors (i.e., output png)
self.endStream()
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8,
'SMask': smaskObject})
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
img.flipud_out()
def markerObject(self, path, trans, fillp, lw):
"""Return name of a marker XObject representing the given path."""
key = (path, trans, fillp is not None, lw)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
self.markers[key] = (name, ob, path, trans, fillp, lw)
else:
name = result[0]
return name
def writeMarkers(self):
for tup in self.markers.values():
name, object, path, trans, fillp, lw = tup
bbox = path.get_extents(trans)
bbox = bbox.padded(lw * 0.5)
self.beginStream(
object.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents) })
self.writePath(path, trans)
if fillp:
self.output(Op.fill_stroke)
else:
self.output(Op.stroke)
self.endStream()
#@staticmethod
def pathOperations(path, transform, simplify=None):
tpath = transform.transform_path(path)
cmds = []
last_points = None
for points, code in tpath.iter_segments(simplify):
if code == Path.MOVETO:
cmds.extend(points)
cmds.append(Op.moveto)
elif code == Path.LINETO:
cmds.extend(points)
cmds.append(Op.lineto)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
cmds.extend(points[2:])
cmds.append(Op.curveto)
elif code == Path.CURVE4:
cmds.extend(points)
cmds.append(Op.curveto)
elif code == Path.CLOSEPOLY:
cmds.append(Op.closepath)
last_points = points
return cmds
pathOperations = staticmethod(pathOperations)
def writePath(self, path, transform):
cmds = self.pathOperations(
path, transform, self.simplify)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell()
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell()
self.write("xref\n0 %d\n" % self.nextObject)
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print >>sys.stderr, \
'No offset for object %d (%s)' % (i, name)
borken = True
else:
self.write("%010d %05d n \n" % (offset, generation))
i += 1
if borken:
raise AssertionError, 'Indirect object does not exist'
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write("trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject }))
# Could add 'ID'
self.write("\nstartxref\n%d\n%%%%EOF\n" % self.startxref)
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, dpi, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.file.used_characters = self.used_characters = {}
self.mathtext_parser = MathTextParser("Pdf")
self.dpi = dpi
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = gc._fillcolor
gc._fillcolor = fillcolor
delta = self.gc.delta(gc)
if delta: self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, (str, unicode)):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# MGDTODO: Support clippath here
gc = self.new_gc()
if bbox is not None:
gc.set_clip_rectangle(bbox)
self.check_gc(gc)
h, w = im.get_size_out()
h, w = 72.0*h/self.image_dpi, 72.0*w/self.image_dpi
imob = self.file.imageObject(im)
self.file.output(Op.gsave, w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
stream = self.file.writePath(path, transform)
self.file.output(self.gc.paint())
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
self.check_gc(gc, rgbFace)
fillp = rgbFace is not None
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fillp, self.gc._linewidth)
tpath = trans.transform_path(path)
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def _setup_textpos(self, x, y, descent, angle, oldx=0, oldy=0, olddescent=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, (y + descent) - (oldy + olddescent), Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output( cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, descent, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
fontsize *= self.dpi/72.0
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype), Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
fontsize *= self.dpi/72.0
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# Gather font information and do some setup for combining
# characters into strings.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
psfont = self.tex_font_mapping(dvifont.texname)
pdfname = self.file.fontName(psfont.filename)
if self.file.fontInfo.get(pdfname, None) is None:
self.file.fontInfo[pdfname] = Bunch(
encodingfile=psfont.encoding,
widths=dvifont.widths,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
seq += [['text', x1, y1, [chr(glyph)], x1+width]]
# Find consecutive text strings with constant x coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx = 0, 0
while i < len(seq)-1:
elt, next = seq[i:i+2]
if elt[0] == next[0] == 'text' and elt[2] == next[2]:
offset = elt[4] - next[1]
if abs(offset) < 0.1:
elt[3][-1] += next[3][0]
elt[4] += next[4]-next[1]
else:
elt[3] += [offset*1000.0/dvifont.size, next[3][0]]
elt[4] = next[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, 0, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g. variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0,0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype == 3:
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath: return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points() * self.dpi/72.0
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
descent = -b * fontsize / 1000
fonttype = 42
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
descent = font.get_descent() / 64.0
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, str) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = unicode(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1
and chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, descent, angle)
self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
olddescent = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, descent, 0, oldx, 0, olddescent, 0)
self.file.output(self.encode_string(chunk, fonttype), Op.show)
oldx = newx
olddescent = descent
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# A total height (including the descent) needs to be returned.
return page.width, page.height+page.descent, page.descent
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale
h *= scale
d *= scale
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(prop, fontext='afm')
font = self.afm_font_cache.get(filename)
if font is None:
fh = file(filename)
font = AFM(fh)
self.afm_font_cache[filename] = font
fh.close()
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(str(filename))
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), self.dpi)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / self.dpi, self.file.height / self.dpi
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return `d`
def _strokep(self):
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def _fillp(self):
return ((self._fillcolor is not None or self._hatch) and
(len(self._fillcolor) <= 3 or self._fillcolor[3] != 0.0))
def close_and_paint(self):
if self._strokep():
if self._fillp():
return Op.close_fill_stroke
else:
return Op.close_stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
def paint(self):
if self._strokep():
if self._fillp():
return Op.fill_stroke
else:
return Op.stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
capstyles = { 'butt': 0, 'round': 1, 'projecting': 2 }
joinstyles = { 'miter': 0, 'round': 1, 'bevel': 2 }
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha):
name = self.file.alphaState(alpha)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch = hatch.lower()
lst = ( self._rgb,
self._fillcolor,
hatch.count('-') + hatch.count('+'),
hatch.count('/') + hatch.count('x'),
hatch.count('|') + hatch.count('+'),
hatch.count('\\') + hatch.count('x') )
name = self.file.hatchPattern(lst)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while (self._cliprect, self._clippath) != (cliprect, clippath) \
and self.parent is not None:
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if (self._cliprect, self._clippath) != (cliprect, clippath):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
cmds.extend(
PdfFile.pathOperations(
*clippath.get_transformed_path_and_affine()) +
[Op.clip, Op.endpath])
return cmds
commands = (
(('_cliprect', '_clippath'), clip_cmd), # must come first since may pop
(('_alpha',), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
different = bool(ours != theirs)
except ValueError:
ours = npy.asarray(ours)
theirs = npy.asarray(theirs)
different = ours.shape != theirs.shape or npy.any(ours != theirs)
if different:
break
if different:
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
self._fillcolor = other._fillcolor
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPdf(thisFig)
manager = FigureManagerPdf(canvas, num)
return manager
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
ppi = 72 # Postscript points in an inch
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(ppi)
width, height = self.figure.get_size_inches()
file = PdfFile(width, height, ppi, filename)
renderer = MixedModeRenderer(
width, height, ppi, RendererPdf(file, ppi, image_dpi))
self.figure.draw(renderer)
renderer.finalize()
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureManager = FigureManagerPdf
|
kingctan/oppia | refs/heads/master | core/storage/feedback/gae_models_test.py | 6 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.platform import models
from core.tests import test_utils
(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
CREATED_ON_FIELD = 'created_on'
LAST_UPDATED_FIELD = 'last_updated'
DELETED_FIELD = 'deleted'
FIELDS_NOT_REQUIRED = [CREATED_ON_FIELD, LAST_UPDATED_FIELD, DELETED_FIELD]
class SuggestionModelTest(test_utils.GenericTestBase):
"""Tests the SuggestionModel class."""
def setUp(self):
super(SuggestionModelTest, self).setUp()
feedback_models.SuggestionModel.create('exp_id1', 'thread_id1',
'author_id', 1, 'state_name',
'description',
{'old_content': {}})
feedback_models.SuggestionModel.create('exp_id1', 'thread_id2',
'author_id', 1, 'state_name',
'description',
{'old_content': {}})
feedback_models.SuggestionModel.create('exp_id2', 'thread_id2',
'author_id', 1, 'state_name',
'description',
{'old_content': {}})
def _get_suggestion_models_for_test(self, suggestions_list):
"""Removes fields that are set to default values in the base model and
are thus not explicitly verified in tests."""
updated_suggestions_list = []
for suggestion in suggestions_list:
suggestion_dict = suggestion.to_dict()
for field in FIELDS_NOT_REQUIRED:
if field in suggestion_dict:
suggestion_dict.pop(field)
updated_suggestions_list.append(suggestion_dict)
return updated_suggestions_list
def test_create_new_object_runs_successfully(self):
feedback_models.SuggestionModel.create('exp_id3', 'thread_id2',
'author_id', 1, 'state_name',
'description',
{'old_content': {}})
suggestion = (
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'exp_id3', 'thread_id2'))
self.assertEqual(suggestion.exploration_id, 'exp_id3')
self.assertEqual(suggestion.author_id, 'author_id')
self.assertEqual(suggestion.exploration_version, 1)
self.assertEqual(suggestion.state_name, 'state_name')
self.assertEqual(suggestion.description, 'description')
self.assertEqual(suggestion.state_content, {'old_content': {}})
def test_create_suggestion_fails_if_thread_already_has_suggestion(self):
with self.assertRaisesRegexp(Exception, 'There is already a feedback '
'thread with the given thread id: '
'exp_id1.thread_id1'):
feedback_models.SuggestionModel.create('exp_id1',
'thread_id1', 'author_id', 1,
'state_name',
'description',
{'old_content': {}})
def test_get_by_exploration_and_thread_id_suggestion_present(self):
actual_suggestion = [(
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'exp_id1', 'thread_id1'))]
expected_suggestion = [feedback_models.SuggestionModel(
id='exp_id1.thread_id1',
author_id='author_id',
exploration_id='exp_id1',
exploration_version=1,
state_name='state_name',
description='description',
state_content={'old_content': {}})]
self.assertEqual(len(self._get_suggestion_models_for_test(
actual_suggestion)), 1)
self.assertEqual(
self._get_suggestion_models_for_test(expected_suggestion),
self._get_suggestion_models_for_test(actual_suggestion))
def test_get_by_exploration_and_thread_id_no_suggestion(self):
actual_suggestion = (
feedback_models.SuggestionModel.get_by_exploration_and_thread_id(
'invalid_exp_id', 'thread_id1'))
self.assertIsNone(actual_suggestion)
|
vladmm/intellij-community | refs/heads/master | python/testData/formatter/alignDictLiteralOnValue.py | 79 | {
"a": 1,
"bbb": [
2
],
"bbbbb": 3
}
|
unkyulee/elastic-cms | refs/heads/master | src/task/modules/FSIDX.py | 1 | import os
import pwd
import time
from datetime import datetime
import xml.etree.ElementTree as ET
from sqlalchemy import create_engine
from sqlalchemy.sql import text
def run(p):
AllGood = True
# Read Query and parse parameters
root = ET.fromstring(p["action"]["query"])
SQL_CONNECTION = root.find("SQL_CONNECTION").text.strip()
TABLE = root.find("TABLE").text.strip()
PATHs = [] # table_field:ldap_field
for path in root.findall("PATH"):
PATHs.append({"dir":path.find("dir").text.strip(),
"idx":path.find("idx").text.strip()})
# prepare sql engine
engine = create_engine(SQL_CONNECTION)
# Loop Through the dir
for path in PATHs:
DeleteFiles(TABLE, engine, p["log"], path["idx"])
AddFiles(TABLE, engine, p["log"], path["idx"], path["dir"])
p["log"].success("All file indexing finished")
return AllGood
def DeleteFiles(TABLE, engine, log, idx):
SQL = """
SELECT ino, filepath from {} WHERE idx=:idx
""".format(TABLE)
DELSQL = """
DELETE FROM {} WHERE ino=:ino
""".format(TABLE)
log.info("scanning for removed files ... {}".format(idx))
RemovedFile = 0
with engine.connect() as conn:
result = conn.execute(text(SQL), idx=idx)
for r in result:
try:
if os.path.isfile(r.filepath):
# if file exists then check if it has same id
# same name file but could be different file
s = os.stat(r.filepath)
if s.st_ino != r.ino:
# log.info("File removed .. {} - {}\n{}".format(s.st_ino, r.id, r.filepath))
conn.execute(text(DELSQL), ino=r.ino)
RemovedFile += 1
else:
# if file doesn't exist then also delete
# log.info("File removed .. {}\n{}".format(r.id, r.filepath))
conn.execute(text(DELSQL), ino=r.ino)
RemovedFile += 1
except Exception, e:
# log.info("File removed .. {}\n{}".format(r.id, r.filepath))
conn.execute(text(DELSQL), ino=r.ino)
RemovedFile += 1
log.success("{} files removed ... {}".format(RemovedFile, idx))
def AddFiles(TABLE, engine, log, idx, path):
log.info("Searching .. {}\n{}".format(idx, path))
# get last update date
LastUpdated = GetLastUpdate(TABLE, engine, idx)
# loop recursively through the path
row_count = 0
for dirName, subDirName, fileList in os.walk(path):
for name in fileList:
try:
s = os.stat("{}/{}".format(dirName, name))
# skip those are not changed
if datetime.fromtimestamp(int(s.st_mtime)) <= LastUpdated:
continue
#
fileinfo = {
"ino": s.st_ino, "dev": s.st_dev, "idx": idx,
"filepath": "{}/{}".format(dirName, name),
"dir": dirName, "name": name,
"ext": os.path.splitext(name)[1],
"owner": pwd.getpwuid(s.st_uid).pw_name,
"created": time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(s.st_ctime)),
"updated": time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(s.st_mtime))
}
#
InsertFileIndex(TABLE, engine, fileinfo)
row_count += 1
except Exception, e:
log.error("{}/{}".format(dirName, name), e)
log.success("Indexed {} updated files".format(row_count))
# Get last updated timestamp
def GetLastUpdate(TABLE, engine, idx):
SQL = """
SELECT updated FROM {} WHERE idx='{}' ORDER BY updated DESC LIMIT 1
""".format(TABLE, idx)
LastUpdated = datetime(1970,1,1)
with engine.connect() as conn:
sql_result = conn.execute(text(SQL))
for r in sql_result:
LastUpdated = r.updated
return LastUpdated
# Insert if the file doesn't exist
def InsertFileIndex(TABLE, engine, f):
SQL = """
INSERT INTO {}
(ino, dev, idx, filepath, dir, name, ext, owner, created, updated)
VALUES
(:ino, :dev, :idx, :filepath, :dir,
:name, :ext, :owner, :created, :updated)
ON DUPLICATE KEY UPDATE
filepath = :filepath, dir = :dir, name = :name,
ext = :ext, owner = :owner, is_updated = 1,
created = :created, updated = :updated
""".format(TABLE)
with engine.connect() as conn:
conn.execute(text(SQL), ino=f["ino"], dev=f["dev"], idx = f["idx"],
filepath=f["filepath"], dir=f["dir"], name=f["name"],
ext=f["ext"], owner=f["owner"], created=f["created"],
updated=f["updated"])
|
rogerhil/thegamesdb | refs/heads/master | setup.py | 1 | import os
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
local_file = lambda f: open(os.path.join(os.path.dirname(__file__), f)).read()
if __name__ == '__main__':
setup(
name='thegamesdb',
version='0.9.1',
description='The Games DB API wrapper for Python',
long_description=long_description,
author='Rogerio Hilbert Lima',
author_email='[email protected]',
url='https://github.com/rogerhil/thegamesdb',
download_url='https://github.com/rogerhil/thegamesdb/tarball/0.9.1',
packages=find_packages()
)
|
jbenden/ansible | refs/heads/devel | lib/ansible/parsing/dataloader.py | 14 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import os
import json
import re
import tempfile
from yaml import YAMLError
from ansible.module_utils.six import text_type, string_types
from ansible.errors import AnsibleFileNotFound, AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.module_utils.basic import is_executable
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope
from ansible.parsing.quoting import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# Tries to determine if a path is inside a role, last dir must be 'tasks'
# this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible.
RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep))
class DataLoader:
'''
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
# optionally: dl.set_vault_password('foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
def __init__(self):
self._basedir = '.'
self._FILE_CACHE = dict()
self._tempfiles = set()
# initialize the vault stuff with an empty password
# TODO: replace with a ref to something that can get the password
# a creds/auth provider
# self.set_vault_password(None)
self._vaults = {}
self._vault = VaultLib()
self.set_vault_secrets(None)
# TODO: since we can query vault_secrets late, we could provide this to DataLoader init
def set_vault_secrets(self, vault_secrets):
self._vault.secrets = vault_secrets
def load(self, data, file_name='<string>', show_content=True):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
new_data = None
# YAML parser will take JSON as it is a subset.
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
in_data = text_type(data)
else:
in_data = data
try:
# we first try to load this data as JSON
new_data = json.loads(data)
except:
# must not be JSON, let the rest try
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
in_data = text_type(data)
else:
in_data = data
try:
new_data = self._safe_load(in_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
if isinstance(data, AnsibleUnicode):
new_data = AnsibleUnicode(new_data)
new_data.ansible_pos = data.ansible_pos
return new_data
def load_from_file(self, file_name, cache=True, unsafe=False):
''' Loads data from a file, which can contain either JSON or YAML. '''
file_name = self.path_dwim(file_name)
display.debug("Loading data from %s" % file_name)
# if the file has already been read in and cached, we'll
# return those results to avoid more file/vault operations
if cache and file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
# read the file contents and load the data structure from them
(b_file_data, show_content) = self._get_file_contents(file_name)
file_data = to_text(b_file_data, errors='surrogate_or_strict')
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
# cache the file contents for next time
self._FILE_CACHE[file_name] = parsed_data
if unsafe:
return parsed_data
else:
# return a deep copy here, so the cache is not affected
return copy.deepcopy(parsed_data)
def path_exists(self, path):
path = self.path_dwim(path)
return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
def is_file(self, path):
path = self.path_dwim(path)
return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
def is_directory(self, path):
path = self.path_dwim(path)
return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
def list_directory(self, path):
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path):
'''is the given path executable?'''
path = self.path_dwim(path)
return is_executable(path)
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name, self._vault.secrets)
try:
return loader.get_single_data()
finally:
try:
loader.dispose()
except AttributeError:
pass # older versions of yaml don't have dispose function, ignore
def _get_file_contents(self, file_name):
'''
Reads the file contents from the given file name
If the contents are vault-encrypted, it will decrypt them and return
the decrypted data
:arg file_name: The name of the file to read. If this is a relative
path, it will be expanded relative to the basedir
:raises AnsibleFileNotFOund: if the file_name does not refer to a file
:raises AnsibleParserError: if we were unable to read the file
:return: Returns a byte string of the file contents
'''
if not file_name or not isinstance(file_name, (binary_type, text_type)):
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
b_file_name = to_bytes(self.path_dwim(file_name))
# This is what we really want but have to fix unittests to make it pass
# if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
if not self.path_exists(b_file_name) or not self.is_file(b_file_name):
raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
show_content = True
try:
with open(b_file_name, 'rb') as f:
data = f.read()
if is_encrypted(data):
# FIXME: plugin vault selector
b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(data)
data = self._vault.decrypt(data, filename=b_file_name)
show_content = False
return (data, show_content)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)), orig_exc=e)
def _handle_error(self, yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
def get_basedir(self):
''' returns the current basedir '''
return self._basedir
def set_basedir(self, basedir):
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
self._basedir = to_text(basedir)
def path_dwim(self, given):
'''
make relative paths work like folks expect.
'''
given = unquote(given)
given = to_text(given, errors='surrogate_or_strict')
if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
path = given
else:
basedir = to_text(self._basedir, errors='surrogate_or_strict')
path = os.path.join(basedir, given)
return unfrackpath(path, follow=False)
def _is_role(self, path):
''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc '''
b_path = to_bytes(path, errors='surrogate_or_strict')
b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
for finddir in (b'meta', b'tasks'):
for suffix in (b'.yml', b'.yaml', b''):
b_main = b'main%s' % (suffix)
b_tasked = b'%s/%s' % (finddir, b_main)
if (
RE_TASKS.search(path) and
os.path.exists(os.path.join(b_path, b_main)) or
os.path.exists(os.path.join(b_upath, b_tasked)) or
os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked))
):
return True
return False
def path_dwim_relative(self, path, dirname, source, is_role=False):
'''
find one file in either a role or playbook dir with or without
explicitly named dirname subdirs
Used in action plugins and lookups to find supplemental files that
could be in either place.
'''
search = []
source = to_text(source, errors='surrogate_or_strict')
# I have full path, nothing else needs to be looked at
if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'):
search.append(unfrackpath(source, follow=False))
else:
# base role/play path + templates/files/vars + relative filename
search.append(os.path.join(path, dirname, source))
basedir = unfrackpath(path, follow=False)
# not told if role, but detect if it is a role and if so make sure you get correct base path
if not is_role:
is_role = self._is_role(path)
if is_role and RE_TASKS.search(path):
basedir = unfrackpath(os.path.dirname(path), follow=False)
cur_basedir = self._basedir
self.set_basedir(basedir)
# resolved base role/play path + templates/files/vars + relative filename
search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
self.set_basedir(cur_basedir)
if is_role and not source.endswith(dirname):
# look in role's tasks dir w/o dirname
search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
# try to create absolute path for loader basedir + templates/files/vars + filename
search.append(unfrackpath(os.path.join(dirname, source), follow=False))
# try to create absolute path for loader basedir
search.append(unfrackpath(os.path.join(basedir, source), follow=False))
# try to create absolute path for dirname + filename
search.append(self.path_dwim(os.path.join(dirname, source)))
# try to create absolute path for filename
search.append(self.path_dwim(source))
for candidate in search:
if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
break
return candidate
def path_dwim_relative_stack(self, paths, dirname, source, is_role=False):
'''
find one file in first path in stack taking roles into account and adding play basedir as fallback
:arg paths: A list of text strings which are the paths to look for the filename in.
:arg dirname: A text string representing a directory. The directory
is prepended to the source to form the path to search for.
:arg source: A text string which is the filename to search for
:rtype: A text string
:returns: An absolute path to the filename ``source`` if found
:raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
'''
b_dirname = to_bytes(dirname)
b_source = to_bytes(source)
result = None
search = []
if source is None:
display.warning('Invalid request to find a file that matches a "null" value')
elif source and (source.startswith('~') or source.startswith(os.path.sep)):
# path is absolute, no relative needed, check existence and return source
test_path = unfrackpath(b_source, follow=False)
if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
result = test_path
else:
display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
for path in paths:
upath = unfrackpath(path, follow=False)
b_upath = to_bytes(upath, errors='surrogate_or_strict')
b_mydir = os.path.dirname(b_upath)
# if path is in role and 'tasks' not there already, add it into the search
if is_role or self._is_role(path):
if b_mydir.endswith(b'tasks'):
search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source))
search.append(os.path.join(b_mydir, b_source))
else:
# don't add dirname if user already is using it in source
if b_source.split(b'/')[0] != b_dirname:
search.append(os.path.join(b_upath, b_dirname, b_source))
search.append(os.path.join(b_upath, b_source))
elif b_dirname not in b_source.split(b'/'):
# don't add dirname if user already is using it in source
if b_source.split(b'/')[0] != dirname:
search.append(os.path.join(b_upath, b_dirname, b_source))
search.append(os.path.join(b_upath, b_source))
# always append basedir as last resort
# don't add dirname if user already is using it in source
if b_source.split(b'/')[0] != dirname:
search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
search.append(os.path.join(to_bytes(self.get_basedir()), b_source))
display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
for b_candidate in search:
display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
if os.path.exists(b_candidate):
result = to_text(b_candidate)
break
if result is None:
raise AnsibleFileNotFound(file_name=source, paths=[to_text(p) for p in search])
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def get_real_file(self, file_path, decrypt=True):
"""
If the file is vault encrypted return a path to a temporary decrypted file
If the file is not encrypted then the path is returned
Temporary files are cleanup in the destructor
"""
if not file_path or not isinstance(file_path, (binary_type, text_type)):
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
raise AnsibleFileNotFound(file_name=file_path)
real_path = self.path_dwim(file_path)
try:
if decrypt:
with open(to_bytes(real_path), 'rb') as f:
# Limit how much of the file is read since we do not know
# whether this is a vault file and therefore it could be very
# large.
if is_encrypted_file(f, count=len(b_HEADER)):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
data = f.read()
if not self._vault.secrets:
raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
data = self._vault.decrypt(data, filename=real_path)
# Make a temp file
real_path = self._create_content_tempfile(data)
self._tempfiles.add(real_path)
return real_path
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
def cleanup_tmp_file(self, file_path):
"""
Removes any temporary files created from a previous call to
get_real_file. file_path must be the path returned from a
previous call to get_real_file.
"""
if file_path in self._tempfiles:
os.unlink(file_path)
self._tempfiles.remove(file_path)
def cleanup_all_tmp_files(self):
for f in self._tempfiles:
try:
self.cleanup_tmp_file(f)
except Exception as e:
display.warning("Unable to cleanup temp files: %s" % to_native(e))
|
tlakshman26/cinder-https-changes | refs/heads/master | cinder/ssh_utils.py | 22 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities related to SSH connection management."""
import os
from eventlet import pools
from oslo_config import cfg
from oslo_log import log as logging
import paramiko
import six
from cinder import exception
from cinder.i18n import _, _LI
LOG = logging.getLogger(__name__)
ssh_opts = [
cfg.BoolOpt('strict_ssh_host_key_policy',
default=False,
help='Option to enable strict host key checking. When '
'set to "True" Cinder will only connect to systems '
'with a host key present in the configured '
'"ssh_hosts_key_file". When set to "False" the host key '
'will be saved upon first connection and used for '
'subsequent connections. Default=False'),
cfg.StrOpt('ssh_hosts_key_file',
default='$state_path/ssh_known_hosts',
help='File containing SSH host keys for the systems with which '
'Cinder needs to communicate. OPTIONAL: '
'Default=$state_path/ssh_known_hosts'),
]
CONF = cfg.CONF
CONF.register_opts(ssh_opts)
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.privatekey = privatekey
self.hosts_key_file = None
# Validate good config setting here.
# Paramiko handles the case where the file is inaccessible.
if not CONF.ssh_hosts_key_file:
raise exception.ParameterNotFound(param='ssh_hosts_key_file')
elif not os.path.isfile(CONF.ssh_hosts_key_file):
# If using the default path, just create the file.
if CONF.state_path in CONF.ssh_hosts_key_file:
open(CONF.ssh_hosts_key_file, 'a').close()
else:
msg = (_("Unable to find ssh_hosts_key_file: %s") %
CONF.ssh_hosts_key_file)
raise exception.InvalidInput(reason=msg)
if 'hosts_key_file' in kwargs.keys():
self.hosts_key_file = kwargs.pop('hosts_key_file')
LOG.info(_LI("Secondary ssh hosts key file %(kwargs)s will be "
"loaded along with %(conf)s from /etc/cinder.conf."),
{'kwargs': self.hosts_key_file,
'conf': CONF.ssh_hosts_key_file})
LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' "
"using ssh_hosts_key_file '%(key_file)s'.",
{'policy': CONF.strict_ssh_host_key_policy,
'key_file': CONF.ssh_hosts_key_file})
self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
if not self.hosts_key_file:
self.hosts_key_file = CONF.ssh_hosts_key_file
else:
self.hosts_key_file += ',' + CONF.ssh_hosts_key_file
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
try:
ssh = paramiko.SSHClient()
if ',' in self.hosts_key_file:
files = self.hosts_key_file.split(',')
for f in files:
ssh.load_host_keys(f)
else:
ssh.load_host_keys(self.hosts_key_file)
# If strict_ssh_host_key_policy is set we want to reject, by
# default if there is not entry in the known_hosts file.
# Otherwise we use AutoAddPolicy which accepts on the first
# Connect but fails if the keys change. load_host_keys can
# handle hashed known_host entries.
if self.strict_ssh_host_key_policy:
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
else:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if self.password:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
timeout=self.conn_timeout)
elif self.privatekey:
pkfile = os.path.expanduser(self.privatekey)
privatekey = paramiko.RSAKey.from_private_key_file(pkfile)
ssh.connect(self.ip,
port=self.port,
username=self.login,
pkey=privatekey,
timeout=self.conn_timeout)
else:
msg = _("Specify a password or private_key")
raise exception.CinderException(msg)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set through the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Error connecting via ssh: %s") % six.text_type(e)
LOG.error(msg)
raise paramiko.SSHException(msg)
def get(self):
"""Return an item from the pool, when one is available.
This may cause the calling greenthread to block. Check if a
connection is active before returning it.
For dead connections create and return a new connection.
"""
conn = super(SSHPool, self).get()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
def remove(self, ssh):
"""Close an ssh client and remove it from free_items."""
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
|
VigTech/Vigtech-Services | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 487 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
|
anorfleet/turntable | refs/heads/master | test/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/__init__.py | 159 | """
Eigenvalue solver using iterative methods.
Find k eigenvectors and eigenvalues of a matrix A using the
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
These methods are most useful for large sparse matrices.
- eigs(A,k)
- eigsh(A,k)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
from __future__ import division, print_function, absolute_import
from .arpack import *
|
pleaseproject/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/distutils/tests/test_dist.py | 47 | """Tests for distutils.dist."""
import os
import io
import sys
import unittest
import warnings
import textwrap
from distutils.dist import Distribution, fix_help_options
from distutils.cmd import Command
from test.support import TESTFN, captured_stdout, run_unittest
from distutils.tests import support
class test_dist(Command):
"""Sample distutils extension command."""
user_options = [
("sample-option=", "S", "help text"),
]
def initialize_options(self):
self.sample_option = None
class TestDistribution(Distribution):
"""Distribution subclasses that avoids the default search for
configuration files.
The ._config_files attribute must be set before
.parse_config_files() is called.
"""
def find_config_files(self):
return self._config_files
class DistributionTestCase(support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
del sys.argv[1:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(DistributionTestCase, self).tearDown()
def create_distribution(self, configfiles=()):
d = TestDistribution()
d._config_files = configfiles
d.parse_config_files()
d.parse_command_line()
return d
def test_command_packages_unspecified(self):
sys.argv.append("build")
d = self.create_distribution()
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_command_packages_cmdline(self):
from distutils.tests.test_dist import test_dist
sys.argv.extend(["--command-packages",
"foo.bar,distutils.tests",
"test_dist",
"-Ssometext",
])
d = self.create_distribution()
# let's actually try to load our test command:
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "distutils.tests"])
cmd = d.get_command_obj("test_dist")
self.assertTrue(isinstance(cmd, test_dist))
self.assertEqual(cmd.sample_option, "sometext")
def test_command_packages_configfile(self):
sys.argv.append("build")
self.addCleanup(os.unlink, TESTFN)
f = open(TESTFN, "w")
try:
print("[global]", file=f)
print("command_packages = foo.bar, splat", file=f)
finally:
f.close()
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "foo.bar", "splat"])
# ensure command line overrides config:
sys.argv[1:] = ["--command-packages", "spork", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(),
["distutils.command", "spork"])
# Setting --command-packages to '' should cause the default to
# be used even if a config file specified something else:
sys.argv[1:] = ["--command-packages", "", "build"]
d = self.create_distribution([TESTFN])
self.assertEqual(d.get_command_packages(), ["distutils.command"])
def test_empty_options(self):
# an empty options dictionary should not stay in the
# list of attributes
klass = Distribution
# catching warnings
warns = []
def _warn(msg):
warns.append(msg)
old_warn = warnings.warn
warnings.warn = _warn
try:
dist = klass(attrs={'author': 'xxx',
'name': 'xxx',
'version': 'xxx',
'url': 'xxxx',
'options': {}})
finally:
warnings.warn = old_warn
self.assertEqual(len(warns), 0)
def test_finalize_options(self):
attrs = {'keywords': 'one,two',
'platforms': 'one,two'}
dist = Distribution(attrs=attrs)
dist.finalize_options()
# finalize_option splits platforms and keywords
self.assertEqual(dist.metadata.platforms, ['one', 'two'])
self.assertEqual(dist.metadata.keywords, ['one', 'two'])
def test_get_command_packages(self):
dist = Distribution()
self.assertEqual(dist.command_packages, None)
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command'])
self.assertEqual(dist.command_packages,
['distutils.command'])
dist.command_packages = 'one,two'
cmds = dist.get_command_packages()
self.assertEqual(cmds, ['distutils.command', 'one', 'two'])
def test_announce(self):
# make sure the level is known
dist = Distribution()
args = ('ok',)
kwargs = {'level': 'ok2'}
self.assertRaises(ValueError, dist.announce, args, kwargs)
class MetadataTestCase(support.TempdirManager, support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.argv = sys.argv, sys.argv[:]
def tearDown(self):
sys.argv = self.argv[0]
sys.argv[:] = self.argv[1]
super(MetadataTestCase, self).tearDown()
def test_simple_metadata(self):
attrs = {"name": "package",
"version": "1.0"}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.0" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("obsoletes:" not in meta.lower())
def test_provides(self):
attrs = {"name": "package",
"version": "1.0",
"provides": ["package", "package.sub"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_provides(),
["package", "package.sub"])
self.assertEqual(dist.get_provides(),
["package", "package.sub"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("obsoletes:" not in meta.lower())
def test_provides_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"provides": ["my.pkg (splat)"]})
def test_requires(self):
attrs = {"name": "package",
"version": "1.0",
"requires": ["other", "another (==1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_requires(),
["other", "another (==1.0)"])
self.assertEqual(dist.get_requires(),
["other", "another (==1.0)"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("Requires: other" in meta)
self.assertTrue("Requires: another (==1.0)" in meta)
self.assertTrue("obsoletes:" not in meta.lower())
def test_requires_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"requires": ["my.pkg (splat)"]})
def test_obsoletes(self):
attrs = {"name": "package",
"version": "1.0",
"obsoletes": ["other", "another (<1.0)"]}
dist = Distribution(attrs)
self.assertEqual(dist.metadata.get_obsoletes(),
["other", "another (<1.0)"])
self.assertEqual(dist.get_obsoletes(),
["other", "another (<1.0)"])
meta = self.format_metadata(dist)
self.assertTrue("Metadata-Version: 1.1" in meta)
self.assertTrue("provides:" not in meta.lower())
self.assertTrue("requires:" not in meta.lower())
self.assertTrue("Obsoletes: other" in meta)
self.assertTrue("Obsoletes: another (<1.0)" in meta)
def test_obsoletes_illegal(self):
self.assertRaises(ValueError, Distribution,
{"name": "package",
"version": "1.0",
"obsoletes": ["my.pkg (splat)"]})
def format_metadata(self, dist):
sio = io.StringIO()
dist.metadata.write_pkg_file(sio)
return sio.getvalue()
def test_custom_pydistutils(self):
# fixes #2166
# make sure pydistutils.cfg is found
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
temp_dir = self.mkdtemp()
user_filename = os.path.join(temp_dir, user_filename)
f = open(user_filename, 'w')
try:
f.write('.')
finally:
f.close()
try:
dist = Distribution()
# linux-style
if sys.platform in ('linux', 'darwin'):
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertTrue(user_filename in files)
# win32-style
if sys.platform == 'win32':
# home drive should be found
os.environ['HOME'] = temp_dir
files = dist.find_config_files()
self.assertTrue(user_filename in files,
'%r not found in %r' % (user_filename, files))
finally:
os.remove(user_filename)
def test_fix_help_options(self):
help_tuples = [('a', 'b', 'c', 'd'), (1, 2, 3, 4)]
fancy_options = fix_help_options(help_tuples)
self.assertEqual(fancy_options[0], ('a', 'b', 'c'))
self.assertEqual(fancy_options[1], (1, 2, 3))
def test_show_help(self):
# smoke test, just makes sure some help is displayed
dist = Distribution()
sys.argv = []
dist.help = 1
dist.script_name = 'setup.py'
with captured_stdout() as s:
dist.parse_command_line()
output = [line for line in s.getvalue().split('\n')
if line.strip() != '']
self.assertTrue(len(output) > 0)
def test_long_description(self):
long_desc = textwrap.dedent("""\
example::
We start here
and continue here
and end here.""")
attrs = {"name": "package",
"version": "1.0",
"long_description": long_desc}
dist = Distribution(attrs)
meta = self.format_metadata(dist)
meta = meta.replace('\n' + 8 * ' ', '\n')
self.assertTrue(long_desc in meta)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DistributionTestCase))
suite.addTest(unittest.makeSuite(MetadataTestCase))
return suite
if __name__ == "__main__":
run_unittest(test_suite())
|
nghia-huynh/gem5-stable | refs/heads/master | src/dev/arm/AbstractNVM.py | 41 | # Copyright (c) 2013-2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Rene de Jong
#
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class AbstractNVM(SimObject):
type = 'AbstractNVM'
abstract = True
cxx_header = "dev/arm/abstract_nvm.hh"
|
jeremyosborne/python | refs/heads/master | third_party/docopt/dopt.py | 1 | '''
Usage: oparse.py [options]
Options:
-h, --help show this help message and exit
-f FILE, --file=FILE write report to FILE [default: report.txt]
-q, --quiet don't print status messages to stdout [default: False]
-l LINES, --lines=LINES
how many lines to add to the report [default: 10]
'''
# This example requires the third party module [docopt](http://docopt.org).
#
# pip install docopt
# # for validation, something docopt lacks and optparse/argparse has
# pip install schema
#
# After installing, to get the best effect of how this module works:
#
# * finish oparse.py
# * run `python oparse.py -h`
# * copy the help output into the docstring of this module
# * run this file
from docopt import docopt
from schema import Schema, Use
# the --lines parameter should be an integer, everything else is whatever.
schema = Schema({'--lines': Use(int, error='lines should be an int'),
object: object})
# Get the options.
options = docopt(__doc__)
# Validate and convert options to specific schema types from plain text.
options = schema.validate(options)
# Testing
#print(options)
# Slightly rewritten program from oparse.py
report_line = "All work and no play makes Jack a dull boy.\n"
if not options["--quiet"]:
print "Welcome to Report Writer 9000"
print "Value of options['--quiet']: %s" % options["--quiet"]
print "Value of options['--lines']: %s" % options["--lines"]
print "Value of options['--file']: %s" % options["--file"]
print "Writing report..."
with open(options["--file"], "w") as f:
for i in range(options["--lines"]):
f.write(report_line)
|
e3krisztian/bead | refs/heads/master | bead_cli/web/__init__.py | 12133432 | |
tobegit3hub/keystone_docker | refs/heads/master | keystone/policy/backends/__init__.py | 12133432 | |
philanthropy-u/edx-platform | refs/heads/master | common/test/acceptance/pages/lms/video/__init__.py | 12133432 | |
hynnet/openwrt-mt7620 | refs/heads/master | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/threading.py | 31 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
|
brenthuisman/phd_tools | refs/heads/master | analysis.poisontest.py | 1 | #!/usr/bin/env python
import dump, numpy as np,plot,sys,glob2 as glob,matplotlib.pyplot as plt
rootn='ipnl-patient-spot-auger.root'
#rootn='iba-patient-spot-auger.root'
filelist = glob.glob("./**/ipnl-patient-spot-auger.root")
data=[]
x=[]
for ffilen in filelist:
print 'opening',ffilen
for key,val in dump.thist2np_xy(ffilen).items():
#print key
if key == 'reconstructedProfileHisto':
#print val
x = val[0] #no need to append, is same for all
data.append([i/max(val[1]) for i in val[1]])
#print len(x)
a = np.vstack(data)
a = np.rollaxis(a,1)
#plt.imshow(a, cmap='hot', interpolation='nearest')
#plt.show()
#quit()
f, ax1 = plot.subplots(nrows=1, ncols=1, sharex=False, sharey=False)
#for abin in a:
#ax1.step(range(len(abin)), abin, label='', color='steelblue', lw=1)
ax1.step(range(len(a[0])), a[0], label='', color='steelblue', lw=1)
ax1.set_xlabel('Depth [mm]')
ax1.set_ylabel('PG detected [counts]')
#ax1.set_xlim(-0.5,5.5)
ax1.set_ylim(bottom=0)
ax1.set_title('PG detection')
plot.texax(ax1)
f.savefig('poisontest.pdf', bbox_inches='tight')
plot.close('all')
|
zimeon/iiif | refs/heads/main | iiif_static.py | 1 | #!/usr/bin/env python
"""iiif_static: Generate static images implementing the IIIF Image API level 0.
Copyright 2014--2018 Simeon Warner
"""
import logging
import optparse
import sys
import os.path
from iiif import __version__
from iiif.error import IIIFError
from iiif.static import IIIFStatic, IIIFStaticError
def main():
"""Parse arguments, instantiate IIIFStatic, run."""
if (sys.version_info < (2, 7)):
sys.exit("This program requires python version 2.7 or later")
# Options and arguments
p = optparse.OptionParser(description='IIIF Image API static file generator',
usage='usage: %prog [options] file [[file2..]] (-h for help)',
version='%prog ' + __version__)
p.add_option('--dst', '-d', action='store', default='/tmp',
help="Destination directory for images [default '%default']")
p.add_option('--tilesize', '-t', action='store', type='int', default=512,
help="Tilesize in pixels [default %default]")
p.add_option('--api-version', '--api', '-a', action='store', default='2.1',
help="API version, may be 1.1, 2.0 or 2.1 [default %default]")
p.add_option('--prefix', '-p', action='store', default=None,
help="URI prefix for where the images will be served from (default '%default'). "
"An empty prefix may be OK if the HTML page including the image shares the "
"the same root on the same server as the images, otherwise a full URL should "
"be specified. This is used to construct the @id in the info.json")
p.add_option('--identifier', '-i', action='store', default=None,
help="Identifier for the image that will be used in place of the filename "
"(minus extension). Notes that this option cannot be used if more than "
"one image file is to be processed")
p.add_option('--extra', '-e', action='append', default=[],
help="Extra request parameters to be used to generate static files, may be "
"repeated (e.g. '/full/90,/0/default.jpg' for a 90 wide thumnail)")
p.add_option('--write-html', action='store', default=None,
help="Write HTML page to the specified directory using the 'identifier.html' "
"as the filename. HTML will launch OpenSeadragon for this image and to "
"display some of information about info.json and tile locations. HTML will "
"assume OpenSeadragon at relative path openseadragonVVV/openseadragon.min.js "
"and user-interface icons in openseadragonVVV/images, where VVV are the "
"three parts of the version number. The --include-osd flag is also specified "
"then OpenSeadragon will be copied to these locations")
p.add_option('--include-osd', action='store_true',
help="Include OpenSeadragon files with --write-html flag")
p.add_option('--osd-version', action='store', default='2.0.0',
help="Generate static images for older versions of OpenSeadragon. Use of versions "
"prior to 1.2.1 will force use of /w,h/ for size parameter instead of /w,/. "
"Likely useful only in combination with --api-version=1.1 "
"[default %default]")
p.add_option('--osd-width', action='store', type='int', default='500',
help="Width of OpenSeadragon pane in pixels. Applies only with "
"--write-html [default %default]")
p.add_option('--osd-height', action='store', type='int', default='500',
help="Height of OpenSeadragon pane in pixels. Applies only with "
"--write-html [default %default]")
p.add_option('--generator', action='store_true', default=False,
help="Use named generator modules in iiif.generators package instead "
"of a starting image [default %default]")
p.add_option('--max-image-pixels', action='store', type='int', default=0,
help="Set the maximum number of pixels in an image. A non-zero value "
"will set a hard limit on the image size. If left unset then the "
"default configuration of the Python Image Libary (PIL) will give "
"a DecompressionBombWarning if the image size exceeds a default "
"maximum, but otherwise continue as normal")
p.add_option('--dryrun', '-n', action='store_true',
help="Do not write anything, say what would be done")
p.add_option('--quiet', '-q', action='store_true',
help="Quite (no output unless there is a warning/error)")
p.add_option('--verbose', '-v', action='store_true',
help="Verbose")
(opt, sources) = p.parse_args()
level = logging.DEBUG if (opt.verbose) else \
logging.WARNING if (opt.quiet) else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s',
level=level)
logger = logging.getLogger(os.path.basename(__file__))
if (not opt.write_html and opt.include_osd):
logger.warn(
"--include-osd has no effect without --write-html, ignoring")
if (len(sources) == 0):
logger.warn("No sources specified, nothing to do, bye! (-h for help)")
elif (len(sources) > 1 and opt.identifier):
logger.error(
"Cannot use --identifier/-i option with multiple sources, aborting.")
else:
try:
sg = IIIFStatic(dst=opt.dst, tilesize=opt.tilesize,
api_version=opt.api_version, dryrun=opt.dryrun,
prefix=opt.prefix, osd_version=opt.osd_version,
generator=opt.generator,
max_image_pixels=opt.max_image_pixels,
extras=opt.extra)
for source in sources:
# File or directory (or neither)?
if (os.path.isfile(source) or opt.generator):
logger.info("source file: %s" % (source))
sg.generate(source, identifier=opt.identifier)
if (opt.write_html):
sg.write_html(html_dir=opt.write_html, include_osd=opt.include_osd,
osd_width=opt.osd_width, osd_height=opt.osd_height)
elif (os.path.isdir(source)):
logger.warn(
"Ignoring source '%s': directory coversion not supported" % (source))
else:
logger.warn(
"Ignoring source '%s': neither file nor path" % (source))
except (IIIFStaticError, IIIFError) as e:
# catch known errors and report nicely...
logger.error("Error: " + str(e))
if __name__ == '__main__':
main()
|
nkgilley/home-assistant | refs/heads/dev | homeassistant/components/mpchc/__init__.py | 36 | """The mpchc component."""
|
alephu5/Soundbyte | refs/heads/master | environment/lib/python3.3/site-packages/IPython/nbconvert/writers/files.py | 7 | """
Contains writer for writing nbconvert output to filesystem.
"""
#-----------------------------------------------------------------------------
#Copyright (c) 2013, the IPython Development Team.
#
#Distributed under the terms of the Modified BSD License.
#
#The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
from IPython.utils.traitlets import Unicode
from IPython.utils.path import link_or_copy
from .base import WriterBase
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FilesWriter(WriterBase):
"""Consumes nbconvert output and produces files."""
build_directory = Unicode("", config=True,
help="""Directory to write output to. Leave blank
to output to the current directory""")
# Make sure that the output directory exists.
def _build_directory_changed(self, name, old, new):
if new and not os.path.isdir(new):
os.makedirs(new)
def __init__(self, **kw):
super(FilesWriter, self).__init__(**kw)
self._build_directory_changed('build_directory', self.build_directory,
self.build_directory)
def _makedir(self, path):
"""Make a directory if it doesn't already exist"""
if path and not os.path.isdir(path):
self.log.info("Making directory %s", path)
os.makedirs(path)
def write(self, output, resources, notebook_name=None, **kw):
"""
Consume and write Jinja output to the file system. Output directory
is set via the 'build_directory' variable of this instance (a
configurable).
See base for more...
"""
# Verify that a notebook name is provided.
if notebook_name is None:
raise TypeError('notebook_name')
# Pull the extension and subdir from the resources dict.
output_extension = resources.get('output_extension', None)
# Write all of the extracted resources to the destination directory.
# NOTE: WE WRITE EVERYTHING AS-IF IT'S BINARY. THE EXTRACT FIG
# PREPROCESSOR SHOULD HANDLE UNIX/WINDOWS LINE ENDINGS...
for filename, data in resources.get('outputs', {}).items():
# Determine where to write the file to
dest = os.path.join(self.build_directory, filename)
path = os.path.dirname(dest)
self._makedir(path)
# Write file
self.log.debug("Writing %i bytes to support file %s", len(data), dest)
with io.open(dest, 'wb') as f:
f.write(data)
# Copy referenced files to output directory
if self.build_directory:
for filename in self.files:
# Copy files that match search pattern
for matching_filename in glob.glob(filename):
# Make sure folder exists.
dest = os.path.join(self.build_directory, filename)
path = os.path.dirname(dest)
self._makedir(path)
# Copy if destination is different.
if not os.path.normpath(dest) == os.path.normpath(matching_filename):
self.log.info("Linking %s -> %s", matching_filename, dest)
link_or_copy(matching_filename, dest)
# Determine where to write conversion results.
if output_extension is not None:
dest = notebook_name + '.' + output_extension
else:
dest = notebook_name
if self.build_directory:
dest = os.path.join(self.build_directory, dest)
# Write conversion results.
self.log.info("Writing %i bytes to %s", len(output), dest)
with io.open(dest, 'w', encoding='utf-8') as f:
f.write(output)
return dest
|
gardster/omim | refs/heads/master | tools/python/stylesheet/convert_styles.py | 53 | import sys
lns = open(sys.argv[1]).readlines()
lns = [l.strip("\n") for l in lns]
newlns = []
isCaption = False
captionLns = []
leadSpaces = ""
i = 0
for l in lns:
if not isCaption:
i = l.find(" caption ")
if i != -1:
isCaption = True
captionLns = []
leadSpaces = l[0:i + 1]
newlns.append(l)
newlns.append(leadSpaces + " primary {")
else:
i = l.find(" path_text ")
if i != -1:
isCaption = True
captionLns = []
leadSpaces = l[0:i + 1]
newlns.append(l)
newlns.append(leadSpaces + " primary {")
else:
newlns.append(l)
else:
if l[i + 1] == "}":
isCaption = False
newlns.append(l)
else:
if l.find("priority") == -1:
newlns.append(" " + l)
captionLns.append(" " + l)
else:
newlns.append(leadSpaces + " }")
# newlns.append(leadSpaces + " secondary {")
# for l1 in captionLns:
# newlns.append(l1)
# newlns.append(leadSpaces + " }")
newlns.append(l)
for i in newlns:
print i
|
open-power-ref-design/opsmgr | refs/heads/master | horizon/test/integration_tests/pages/operational_management/__init__.py | 12133432 | |
ocwc/ocwc-data | refs/heads/master | search/data/management/__init__.py | 12133432 | |
svenstaro/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/a/b/c/d/e/__init__.py | 12133432 | |
dreamapplehappy/myblog | refs/heads/master | node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSVersion.py | 486 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name == '2013' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 non-Express has a x64-x86 cross that we want to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '12.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
ibrahima/kernel_i9300 | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
explosion/spaCy | refs/heads/master | spacy/lang/ca/tokenizer_exceptions.py | 1 | from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...symbols import ORTH, NORM
from ...util import update_exc
_exc = {}
for exc_data in [
{ORTH: "aprox.", NORM: "aproximadament"},
{ORTH: "pàg.", NORM: "pàgina"},
{ORTH: "p.ex.", NORM: "per exemple"},
{ORTH: "gen.", NORM: "gener"},
{ORTH: "feb.", NORM: "febrer"},
{ORTH: "abr.", NORM: "abril"},
{ORTH: "jul.", NORM: "juliol"},
{ORTH: "set.", NORM: "setembre"},
{ORTH: "oct.", NORM: "octubre"},
{ORTH: "nov.", NORM: "novembre"},
{ORTH: "dec.", NORM: "desembre"},
{ORTH: "Dr.", NORM: "doctor"},
{ORTH: "Sr.", NORM: "senyor"},
{ORTH: "Sra.", NORM: "senyora"},
{ORTH: "Srta.", NORM: "senyoreta"},
{ORTH: "núm", NORM: "número"},
{ORTH: "St.", NORM: "sant"},
{ORTH: "Sta.", NORM: "santa"},
{ORTH: "'l"},
{ORTH: "'ls"},
{ORTH: "'m"},
{ORTH: "'n"},
{ORTH: "'ns"},
{ORTH: "'s"},
{ORTH: "'t"},
]:
_exc[exc_data[ORTH]] = [exc_data]
# Times
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m.", NORM: "p.m."}]
for h in range(1, 12 + 1):
for period in ["a.m.", "am"]:
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "a.m."}]
for period in ["p.m.", "pm"]:
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "p.m."}]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|
Dziolas/invenio | refs/heads/scoap3 | modules/bibauthorid/lib/bibauthorid_string_utils.py | 25 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
bibauthorid_string_utils
Bibauthorid utilities used by many parts of the framework
'''
def string_partition(s, sep, direc='l'):
'''
Partition a string by the first occurrence of the separator.
Mimics the string.partition function, which is not available in Python2.4
@param s: string to be partitioned
@type s: string
@param sep: separator to partition by
@type sep: string
@param dir: direction (left 'l' or right 'r') to search the separator from
@type dir: string
@return: tuple of (left or sep, sep, right of sep)
@rtype: tuple
'''
if direc == 'r':
i = s.rfind(sep)
else:
i = s.find(sep)
if i < 0:
return (s, '', '')
else:
return (s[0:i], s[i:i + 1], s[i + 1:])
def unpackbib(bibrecref):
"""
Creates a tuple (700, 123, 456) from a bibrecref string("100:123,456").
@param bibrecref and return: bibrecref
@type bibrecref: string
@type return: (int, int int)
"""
table, tail = bibrecref.split(":")
bibref, bibrec = tail.split(",")
return (int(table), int(bibref), int(bibrec))
|
GetStream/django_twitter | refs/heads/master | stream_twitter/migrations/0001_squashed_0002_auto_20170929_1307.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-05 09:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import stream_django.activity
class Migration(migrations.Migration):
replaces = [('stream_twitter', '0001_initial'),
('stream_twitter', '0002_auto_20170929_1307')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='followers', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='friends', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=160)),
('occurrences', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=160)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
bases=(stream_django.activity.Activity, models.Model),
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('picture', models.ImageField(
blank=True, upload_to='profile_pictures')),
('user', models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='follow',
unique_together=set([('user', 'target')]),
),
]
|
Just-D/chromium-1 | refs/heads/master | ppapi/generators/idl_diff.py | 180 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print 'Adding %s' % self.mode
elif not self.now:
print 'Missing %s' % self.mode
else:
print 'Modifying %s' % self.mode
for line in self.was:
print 'src: >>%s<<' % line
for line in self.now:
print 'gen: >>%s<<' % line
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print "Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline)
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
# print "LINE=%s" % line
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print 'Missing: %s' % name
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print "\n\nDelta between:\n src=%s\n gen=%s\n" % (src, gen)
for change in changes:
change.Dump()
print 'Done with %s\n\n' % src
if GetOption('ok'):
open(diff, 'wt').write(output)
if GetOption('halt'):
return 1
else:
print "\nSAME:\n src=%s\n gen=%s" % (src, gen)
if input: print ' ** Matched expected diff. **'
print '\n'
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
rqelibari/sirup | refs/heads/master | tasks.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tasks.py
Part of sirup project
(c) 2017 Copyright Rezart Qelibari <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from invoke import Collection, task
from invocations.watch import watch
import pytest
@task(help={
'module': "Just runs tests/<module>.py.",
'opts': "Extra flags (comma separated) for the test runner"
})
def test(c, module=None, opts=None):
"""
Run tests with pytest.
"""
args = []
# Turn users options into argument list items
if opts:
# Add whitespace to opts
optsList = opts.split(',')
optsList = [elem.strip() for elem in optsList]
args += optsList
# Run all suits or only a specific one
if module:
specific_module = " ./tests/%s.py" % module
args.append(specific_module)
pytest.main(args)
@task(name="watch")
def watch_tests(c, module=None, opts=None):
"""
Watch source tree and test tree for changes, rerunning tests as necessary.
Honors ``tests.package`` setting re: which source directory to watch for
changes.
"""
package = c.config.get('tests', {}).get('package')
patterns = ['\./tests/']
if package:
patterns.append('\./{0}/'.format(package))
kwargs = {'module': module, 'opts': opts}
# Kick things off with an initial test (making sure it doesn't exit on its
# own if tests currently fail)
c.config.run.warn = True
test(c, **kwargs)
# Then watch
watch(c, test, patterns, ['.*/\..*\.swp'], **kwargs)
@task
def coverage(c, html=True):
"""
Run tests w/ coverage enabled, optionally generating HTML & opening it.
:param bool html:
Whether to generate & open an HTML report. Default: ``True``.
"""
# Generate actual coverage data. NOTE: this will honor a local .coveragerc
package = c.config.get('tests', {}).get('package')
test_opts = "--cov-config=.coveragerc , --cov=%s" % package
test(c, opts=test_opts)
if html:
c.run("coverage html && open htmlcov/index.html")
ns = Collection(
test, coverage, watch_tests
)
ns.configure({
'tests': {
'package': 'sirup',
},
})
|
gilbert-yuan/gooderp_addons | refs/heads/master | warehouse/tests/test_inventory.py | 5 | # -*- coding: utf-8 -*-
from odoo.tests.common import TransactionCase
from odoo.exceptions import UserError
class TestInventory(TransactionCase):
def setUp(self):
super(TestInventory, self).setUp()
self.env.ref('core.goods_category_1').account_id = self.env.ref(
'finance.account_goods').id
self.env.ref('warehouse.wh_in_whin1').date = '2016-02-06'
self.env.ref('warehouse.wh_in_whin3').date = '2016-02-06'
self.others_in = self.browse_ref('warehouse.wh_in_whin1')
self.others_in_2 = self.browse_ref('warehouse.wh_in_whin3')
self.goods_mouse = self.browse_ref('goods.mouse')
self.sh_warehouse = self.browse_ref('warehouse.sh_stock')
# 创建一个临时的一个库存调拨,将1个商品调拨到上海仓库
self.temp_mouse_in = self.env['wh.move.line'].with_context({
'type': 'in',
}).create({
'move_id': self.others_in.move_id.id,
'goods_id': self.goods_mouse.id,
'uom_id': self.goods_mouse.uom_id.id,
'uos_id': self.goods_mouse.uos_id.id,
'warehouse_dest_id': self.sh_warehouse.id,
'goods_qty': 1,
'goods_uos_qty': self.goods_mouse.anti_conversion_unit(1),
'cost_unit': 30,
'lot': 'MOUSE0001',
})
# 商品 实际数量 实际辅助数量
# 键鼠套装 96 2
# 鼠标 1 1
# 网线 48 1
self.temp_mouse_in.location_id = self.env.ref('warehouse.b001_location').id
self.others_in.approve_order()
self.others_in_2.approve_order()
self.temp_mouse_in.action_done()
# 创建一个临时的库存调拨,此时数量为0,但是辅助数量为1
self.temp_mouse_in_zero_qty = self.env['wh.move.line'].with_context({
'type': 'in',
}).create({
'move_id': self.others_in.move_id.id,
'goods_id': self.goods_mouse.id,
'uom_id': self.goods_mouse.uom_id.id,
'uos_id': self.goods_mouse.uos_id.id,
'warehouse_dest_id': self.sh_warehouse.id,
'goods_qty': 0,
'goods_uos_qty': 0,
'cost_unit': 30,
'lot': 'MOUSE0002',
})
self.temp_mouse_in_zero_qty.action_done()
self.inventory = self.env['wh.inventory'].create({
'warehouse_id': self.browse_ref('warehouse.hd_stock').id,
})
self.inventory.query_inventory()
def test_query_inventory(self):
# 盘点单查询的结果必须和每个商品单据查询的结果一致
for line in self.inventory.line_ids:
goods_stock = line.goods_id.get_stock_qty()[0]
self.assertEqual(goods_stock.get('warehouse'),
line.warehouse_id.name)
if line.goods_id.name == u'网线': # 网线在途移库 120个,盘点时应减去
self.assertEqual(goods_stock.get('qty') - 120, line.real_qty)
else:
self.assertEqual(goods_stock.get('qty'), line.real_qty)
# 当指定仓库的时候,选择的行必须是该仓库的
self.inventory.warehouse_id = self.sh_warehouse
self.inventory.query_inventory()
for line in self.inventory.line_ids:
self.assertEqual(line.warehouse_id, self.sh_warehouse)
# 指定商品的时候,选择的行必须是该商品的
self.inventory.goods = [4, self.goods_mouse.id] # u'鼠标'
self.inventory.query_inventory()
for line in self.inventory.line_ids:
self.assertEqual(line.goods_id.name, u'鼠标')
self.inventory.unlink()
self.assertTrue(not self.inventory.exists())
def test_query_inventory_transfer_order(self):
'''盘点单查询的盘点数量不应该包含移库在途的,在途移库数量恰好等于仓库中数量'''
internal_order = self.env.ref('warehouse.wh_internal_whint0')
for line in internal_order.line_out_ids:
line.goods_qty = 48
inventory = self.env['wh.inventory'].create({
'warehouse_id': self.browse_ref('warehouse.hd_stock').id,
'goods': u'网线',
})
inventory.query_inventory()
def test_generate_inventory(self):
for line in self.inventory.line_ids:
if line.goods_id.name == u'键鼠套装':
keyboard_mouse = line
elif line.goods_id.name == u'鼠标':
mouse = line
else:
cable = line
# 不输入任何值的时候的onchange_qty会讲lot_type设置为nothing
mouse.onchange_qty()
self.assertEqual(mouse.lot_type, 'nothing')
# 实际数量小与系统库存一个的时候,差异数量为-1
mouse.inventory_qty = mouse.real_qty - 1
mouse.onchange_qty()
self.assertEqual(mouse.difference_qty, -1)
self.assertEqual(mouse.lot_type, 'out')
# 实际数量大与系统库存一个的时候,差异数量为1
mouse.inventory_qty = mouse.real_qty + 1
mouse.onchange_qty()
self.assertEqual(mouse.difference_qty, 1)
self.assertEqual(mouse.lot_type, 'in')
# 对于强制为1的商品,只能添加或减少一个商品
warning = {'warning': {
'title': u'警告',
'message': u'商品上设置了序号为1,此时一次只能盘亏或盘盈一个商品数量',
}}
mouse.inventory_qty = mouse.real_qty + 2
self.assertEqual(mouse.onchange_qty(), warning)
# 实际辅助数量改变的时候,实际数量应该跟着改变
mouse.inventory_uos_qty = mouse.real_uos_qty + 1
mouse.onchange_uos_qty()
self.assertEqual(mouse.goods_id.conversion_unit(
mouse.inventory_uos_qty), mouse.inventory_qty)
mouse.line_role_back()
mouse.inventory_qty = mouse.real_qty + 1
mouse.onchange_qty()
cable.inventory_qty = cable.real_qty - 1
cable.onchange_qty()
# 此时鼠标数量+1,网线数量-1,生成一个鼠标的入库单,和网线的出库单
self.inventory.generate_inventory()
self.assertTrue(self.inventory.out_id)
self.assertTrue(self.inventory.in_id)
# 验证商品
self.assertEqual(
self.inventory.out_id.line_out_ids.goods_id, cable.goods_id)
self.assertEqual(
self.inventory.in_id.line_in_ids.goods_id, mouse.goods_id)
# 验证数量
self.assertEqual(self.inventory.out_id.line_out_ids.goods_qty, 1)
self.assertEqual(self.inventory.in_id.line_in_ids.goods_qty, 1)
# 重新盘点的时候相关的出入库单的单据必须未审核
self.inventory.in_id.approve_order()
with self.assertRaises(UserError):
self.inventory.requery_inventory()
self.inventory.in_id.cancel_approved_order()
self.inventory.requery_inventory()
self.inventory.generate_inventory()
self.inventory.out_id.approve_order()
self.inventory.in_id.approve_order()
# 相关的出入库单据完成后,盘点单应该自动完成
self.assertEqual(self.inventory.state, 'done')
# 完成的单据不应该被删除
with self.assertRaises(UserError):
self.inventory.unlink()
results = self.inventory.open_in()
real_results = {
'type': 'ir.actions.act_window',
'res_model': 'wh.in',
'view_mode': 'form',
'res_id': self.inventory.in_id.id,
}
self.assertEqual(results, real_results)
results = self.inventory.open_out()
real_results = {
'type': 'ir.actions.act_window',
'res_model': 'wh.out',
'view_mode': 'form',
'res_id': self.inventory.out_id.id,
}
self.assertEqual(results, real_results)
def test_check_done_state_done(self):
''' Test: check_done state == 'done' '''
mouse_line = self.browse_ref('warehouse.wh_move_line_12')
mouse_line.action_done()
for line in self.inventory.line_ids:
if line.goods_id.name == u'鼠标':
mouse = line
# 实际数量小与系统库存一个的时候,差异数量为-1
mouse.inventory_qty = mouse.real_qty - 1
mouse.onchange_qty()
# 此时鼠标数量-1,生成一个鼠标的出库单
self.inventory.generate_inventory()
# 鼠标进行批号管理,出库行必须选择一个批号
self.inventory.out_id.line_out_ids[0].lot_id = mouse_line.id
self.inventory.out_id.approve_order()
self.inventory.out_id.cancel_approved_order()
def test_get_difference_uos_qty(self):
''' Test: _get_difference_uos_qty '''
for line in self.inventory.line_ids:
if line.goods_id.name == u'鼠标':
mouse = line
# 实际辅助数量少1个
mouse.inventory_uos_qty = mouse.inventory_qty - 1
mouse.onchange_uos_qty()
self.assertEqual(mouse.difference_uos_qty, -1)
def test_check_difference_identical(self):
''' Test: check_difference_identical '''
for line in self.inventory.line_ids:
if line.goods_id.name == u'鼠标':
mouse = line
# 实际辅助数量少1个
mouse.inventory_uos_qty = mouse.inventory_qty - 1
mouse.onchange_uos_qty()
self.assertEqual(mouse.difference_uos_qty, -1)
# 盘盈盘亏数量应该与辅助单位的盘盈盘亏数量盈亏方向不一致
mouse.difference_qty = 1
mouse.check_difference_identical()
def test_check_done(self):
'''盘盈盘亏产生的入库单和出库单审核时检查'''
self.inventory.query_inventory()
self.inventory.generate_inventory()
def test_inventory_get_default_warehouse(self):
''' 测试 获取盘点仓库 '''
self.env['wh.inventory'].create({
'date': '2016-12-30',
'goods': '鼠标',
})
def test_generate_inventory_twice(self):
'''重复点击生成盘点单据按钮'''
self.inventory.query_inventory()
self.inventory.generate_inventory()
with self.assertRaises(UserError):
self.inventory.generate_inventory()
def test_inventory_line_get_difference_qty(self):
'''_get_difference_qty:difference_qty=0,difference_uos_qty!=0'''
for line in self.inventory.line_ids:
if line.goods_id.name == u'鼠标':
mouse = line
# 实际辅助数量少1个 实际数量为1
mouse.inventory_uos_qty = mouse.inventory_qty - 1
self.assertEqual(mouse.difference_qty, -1)
|
s2hc-johan/plugins | refs/heads/master | v6/spell_check/spell_check.py | 4 | # -*- coding: utf-8 -*-
# Copyright © 2014 Puneeth Chaganti
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, unicode_literals
from nikola.plugin_categories import LateTask
from nikola.utils import config_changed, LOGGER
import enchant
from enchant.checker import SpellChecker
from enchant.tokenize import EmailFilter, URLFilter
class RenderPosts(LateTask):
""" Run spell check on any post that may have changed. """
name = 'spell_check'
def __init__(self):
super(RenderPosts, self).__init__()
self._dicts = dict()
self._langs = enchant.list_languages()
def gen_tasks(self):
""" Run spell check on any post that may have changed. """
self.site.scan_posts()
kw = {'translations': self.site.config['TRANSLATIONS']}
yield self.group_task()
for lang in kw['translations']:
for post in self.site.timeline[:]:
path = post.fragment_deps(lang)
task = {
'basename': self.name,
'name': path,
'file_dep': path,
'actions': [(self.spell_check, (post, lang, ))],
'clean': True,
'uptodate': [config_changed(kw)],
}
yield task
def spell_check(self, post, lang):
""" Check spellings for the given post and given language. """
if enchant.dict_exists(lang):
checker = SpellChecker(lang, filters=[EmailFilter, URLFilter])
checker.set_text(post.text(lang=lang, strip_html=True))
words = [error.word for error in checker]
words = [
word for word in words if
self._not_in_other_dictionaries(word, lang)
]
LOGGER.notice(
'Mis-spelt words in %s: %s' % (
post.fragment_deps(lang), ', '.join(words)
)
)
else:
LOGGER.notice('No dictionary found for %s' % lang)
def _not_in_other_dictionaries(self, word, lang):
""" Return True if the word is not present any dictionary for the lang.
"""
for language in self._langs:
if language.startswith('%s_' % lang): # look for en_GB, en_US, ...
dictionary = self._dicts.setdefault(
language, enchant.Dict(language)
)
if dictionary.check(word):
return False
return True
|
loretoparisi/nupic | refs/heads/master | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py | 69 | # Todd Miller [email protected]
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
|
p0cisk/Quantum-GIS | refs/heads/master | python/ext-libs/nose2/tests/functional/test_loadtests_plugin.py | 12 | from nose2.tests._common import FunctionalTestCase
class TestLoadTestsPlugin(FunctionalTestCase):
def test_simple(self):
proc = self.runIn(
'scenario/load_tests',
'-v',
'--plugin=nose2.plugins.loader.loadtests')
self.assertTestRunOutputMatches(proc, stderr='Ran 6 tests')
self.assertTestRunOutputMatches(proc, stderr='test_a..test_simple')
self.assertTestRunOutputMatches(proc, stderr='test_b..test_simple')
self.assertTestRunOutputMatches(proc, stderr='test_c..test_simple')
self.assertTestRunOutputMatches(proc, stderr='test_d..test_simple')
self.assertTestRunOutputMatches(proc, stderr='test_a..test_filter')
self.assertTestRunOutputMatches(proc, stderr='test_c..test_filter')
self.assertEqual(proc.poll(), 0)
def test_package(self):
proc = self.runIn(
'scenario/load_tests_pkg',
'-v',
'-c='
'nose2/tests/functional/support/scenario/load_tests_pkg/unittest.cfg',
'--plugin=nose2.plugins.loader.loadtests')
self.assertTestRunOutputMatches(proc, stderr='Ran 2 tests')
self.assertTestRunOutputMatches(
proc, stderr='test..ltpkg.tests.test_find_these.Test')
self.assertTestRunOutputMatches(
proc, stderr='test..ltpkg2.tests.Test')
|
jupitercoin/jupitercoin-v1.1--diff-update- | refs/heads/master | qa/rpc-tests/getchaintips.py | 6 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 120)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10);
self.nodes[2].generate(20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 130)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 140)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
nvoron23/statsmodels | refs/heads/master | statsmodels/examples/ex_grangercausality.py | 33 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 06 15:44:57 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import iteritems
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.datasets import macrodata
import statsmodels.tsa.stattools as tsa_stats
# some example data
mdata = macrodata.load().data
mdata = mdata[['realgdp','realcons']]
data = mdata.view((float,2))
data = np.diff(np.log(data), axis=0)
#R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] #f_test
gr = tsa_stats.grangercausalitytests(data[:,1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7)
assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'],
decimal=7)
lag = 2
print('\nTest Results for %d lags' % lag)
print()
print('\n'.join(['%-20s statistic: %f6.4 p-value: %f6.4' % (k, res[0], res[1])
for k, res in iteritems(gr[lag][0]) ]))
print('\n Results for auxiliary restricted regression with two lags')
print()
print(gr[lag][1][0].summary())
print('\n Results for auxiliary unrestricted regression with two lags')
print()
print(gr[lag][1][1].summary())
|
randy-waterhouse/bitcoin | refs/heads/master | test/functional/combine_logs.py | 14 | #!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
DailyActie/Surrogate-Model | refs/heads/master | 01-codes/OpenMDAO-Framework-dev/contrib/m4/examples/mid_fidelity_example.py | 1 | """
Simple M4 variable fidelity example.
Runs a DOE on a MidFidelity component instance.
Note that the MidFidelity component performs its correction calculations
on the first execution, then on subsequent executions the corrected result
is directly calculated.
"""
from m4.doe import DOE
from m4.dummy_components import Model_A2d, Model_B2d
from m4.mid_fidelity import MidFidelity
from openmdao.main.api import Assembly
from openmdao.main.datatypes.api import Float
class MyModel(Assembly):
""" Simple M4 variable fidelity example. """
def configure(self):
# Specify DOE.
doe = self.add('M4_DOE', DOE())
# The model is an M4 variable fidelity component.
doe.model = self.add('VarFi', VarFi())
doe.design_variables = [('x', 0., 5.), ('y', 0., 5.)]
doe.response_variables = [('z1'), ('z2')]
doe.type = 'rand_lhs'
doe.n_samples = 200
def execute(self):
""" Run model and print results. """
super(MyModel, self).execute()
for i, case in enumerate(self.M4_DOE.outerator):
print 'CASE %d:' % (i + 1)
for name, index, value in case.inputs:
print ' input:', name, index, value
if case.msg:
print ' FAILED: %s' % case.msg
else:
for name, index, value in case.outputs:
print ' output:', name, index, value
class VarFi(MidFidelity):
""" Example variable fidelity component. """
# Inputs.
x = Float(value=0., low=0., high=5., iotype='in', desc='X input value.')
y = Float(default_value=0., low=0., high=5., units='m', iotype='in',
desc='Y input value.')
# Outputs.
z1 = Float(0., iotype='out', desc='exp(x) + exp(y)')
z2 = Float(0., iotype='out',
desc='10.0*(x-2.0)**2 + 10.0*(y-1.5)**2 + 10.0')
def __init__(self):
super(VarFi, self).__init__()
# Inputs.
self.rs_type = 'rbf'
self.n_samples = 20
self.tolerance = 1.0e20
self.correction_function = 2 # additive(gamma)
self.w_h = 0.2
self.accuracy_test_type = 2 # additional-points
# High and low fidelity models.
self.set_hifi_model(Model_A2d())
self.set_lofi_model(Model_B2d())
# Mappings are (mid, low, high).
self.add_input_mapping('x', 'x', 'x')
self.add_input_mapping('y', 'y', 'y')
self.add_output_mapping('z1', 'z', 'z1')
self.add_output_mapping('z2', 'z', 'z2')
if __name__ == '__main__': # pragma no cover
top = MyModel()
top.run()
# top.check_save_load() # Note: requires correct pythonV.R
|
termie/pupa | refs/heads/master | nova/context.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import datetime
import random
from nova import exception
from nova import utils
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, tenant, user, groups=None, remote_address=None,
timestamp=None, request_id=None):
self.user = user
self.tenant = tenant
self.groups = groups and groups or []
self.remote_address = remote_address
if not timestamp:
timestamp = utils.utcnow()
if isinstance(timestamp, str) or isinstance(timestamp, unicode):
timestamp = utils.parse_isotime(timestamp)
self.timestamp = timestamp
if not request_id:
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-'
request_id = ''.join([random.choice(chars) for x in xrange(20)])
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'groups': self.groups,
'remote_address': self.remote_address,
'timestamp': utils.isotime(self.timestamp),
'request_id': self.request_id}
@classmethod
def from_dict(cls, values):
return cls(**values)
|
smallyear/linuxLearn | refs/heads/master | salt/salt/states/win_system.py | 1 | # -*- coding: utf-8 -*-
'''
Management of Windows system information
========================================
.. versionadded:: 2014.1.0
This state is used to manage system information such as the computer name and
description.
.. code-block:: yaml
ERIK-WORKSTATION:
system.computer_name: []
This is Erik's computer, don't touch!:
system.computer_desc: []
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
This only supports Windows
'''
if salt.utils.is_windows() and 'system.get_computer_desc' in __salt__:
return __virtualname__
return False
def computer_desc(name):
'''
Manage the computer's description field
name
The desired computer description
'''
# Just in case someone decides to enter a numeric description
name = str(name)
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer description already set to {0!r}'.format(name)}
before_desc = __salt__['system.get_computer_desc']()
if before_desc == name:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Computer description will be changed to {0!r}'
.format(name))
return ret
result = __salt__['system.set_computer_desc'](name)
if result['Computer Description'] == name:
ret['comment'] = ('Computer description successfully changed to {0!r}'
.format(name))
ret['changes'] = {'old': before_desc, 'new': name}
else:
ret['result'] = False
ret['comment'] = ('Unable to set computer description to '
'{0!r}'.format(name))
return ret
computer_description = salt.utils.alias_function(computer_desc, 'computer_description')
def computer_name(name):
'''
Manage the computer's name
name
The desired computer name
'''
# Just in case someone decides to enter a numeric description
name = str(name).upper()
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Computer name already set to {0!r}'.format(name)}
before_name = __salt__['system.get_computer_name']()
pending_name = __salt__['system.get_pending_computer_name']()
if before_name == name and pending_name is None:
return ret
elif pending_name == name:
ret['comment'] = ('The current computer name is {0!r}, but will be '
'changed to {1!r} on the next reboot'
.format(before_name, name))
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Computer name will be changed to {0!r}'.format(name)
return ret
result = __salt__['system.set_computer_name'](name)
if result is not False:
after_name = result['Computer Name']['Current']
after_pending = result['Computer Name'].get('Pending')
if ((after_pending is not None and after_pending == name) or
(after_pending is None and after_name == name)):
ret['comment'] = 'Computer name successfully set to {0!r}'.format(name)
if after_pending is not None:
ret['comment'] += ' (reboot required for change to take effect)'
ret['changes'] = {'old': before_name, 'new': name}
else:
ret['result'] = False
ret['comment'] = 'Unable to set computer name to {0!r}'.format(name)
return ret
|
BenTheElder/test-infra | refs/heads/master | gubernator/github/admin.py | 22 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import cPickle as pickle
import logging
import os
import webapp2
from google.appengine.api import urlfetch
from google.appengine.ext import deferred
from google.appengine.ext import ndb
import models
import handlers
# ndb model.query likes to use == True
# pylint: disable=singleton-comparison
class RecomputeOpenPRs(object):
keys_only = True
@staticmethod
def query():
return models.GHIssueDigest.query(
models.GHIssueDigest.is_open == True,
models.GHIssueDigest.is_pr == True
)
@staticmethod
def handle_entity(entity):
repo, number = entity.id().split(' ')
handlers.update_issue_digest(repo, number, always_put=True)
return {'puts': 1}
@ndb.toplevel
def migrate(migration, cursor=None, last_parent=None, stop=False):
entities, next_cursor, more = migration.query().fetch_page(
10, start_cursor=cursor, keys_only=migration.keys_only)
counters = collections.Counter()
for entity in entities:
changes = migration.handle_entity(entity)
counters.update(changes)
summary = ', '.join('%s: %d' % x for x in sorted(counters.items()))
if entities:
logging.info('fetched %d. %s. (%r-%r)',
len(entities), summary, entities[0], entities[-1])
if stop:
return
if more and next_cursor:
deferred.defer(migrate, migration, cursor=next_cursor, last_parent=last_parent)
class Digest(webapp2.RequestHandler):
def get(self):
results = models.GHIssueDigest.query(
models.GHIssueDigest.is_open == True)
self.response.headers['content-type'] = 'text/plain'
self.response.write(pickle.dumps(list(results), pickle.HIGHEST_PROTOCOL))
class AdminDash(webapp2.RequestHandler):
def get(self):
self.response.write("""
<form action="/admin/reprocess" method="post">
<button>Reprocess Open Issues/PRs</button><input type="checkbox" name="background">Background
</form>
<form action="/admin/digest_sync" method="post">
<button>Download GHIssueDigest from production</button>
</form>
""")
def check_csrf(self):
# https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet
# #Checking_The_Referer_Header
origin = self.request.headers.get('origin') + '/'
expected = self.request.host_url + '/'
if not (origin and origin == expected):
logging.error('csrf check failed for %s, origin: %r', self.request.url, origin)
self.abort(403)
class Reprocessor(AdminDash):
def post(self):
self.check_csrf()
migration = RecomputeOpenPRs()
if self.request.get('background'):
deferred.defer(migrate, migration)
self.response.write('running.')
else:
migrate(migration, stop=True)
class DigestSync(AdminDash):
def post(self):
if not os.environ['SERVER_SOFTWARE'].startswith('Development/'):
self.abort(400)
# For local development, download GHIssueDigests from the production
# server.
result = urlfetch.fetch(
'https://github-dot-k8s-gubernator.appspot.com/digest', deadline=60)
if result.status_code != 200:
self.abort(result.status_code)
body = result.content
self.response.headers['content-type'] = 'text/plain'
self.response.write('%s\n' % len(body))
self.response.write(repr(body[:8]))
results = pickle.loads(body)
for res in results:
res.key = ndb.Key(models.GHIssueDigest, res.key.id())
self.response.write('%s\n' % res.key)
res.put()
app = webapp2.WSGIApplication([
(r'/digest', Digest),
(r'/admin/?', AdminDash),
(r'/admin/reprocess', Reprocessor),
(r'/admin/digest_sync', DigestSync),
], debug=True)
|
anaran/olympia | refs/heads/master | apps/addons/search.py | 6 | import logging
from operator import attrgetter
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
import pyes.exceptions as pyes
import amo
import amo.search
from addons.models import Persona
from amo.utils import create_es_index_if_missing
from bandwagon.models import Collection
from compat.models import AppCompat
from users.models import UserProfile
from versions.compare import version_int
from .models import Addon
log = logging.getLogger('z.es')
def extract(addon):
"""Extract indexable attributes from an add-on."""
attrs = ('id', 'slug', 'app_slug', 'created', 'last_updated',
'weekly_downloads', 'bayesian_rating', 'average_daily_users',
'status', 'type', 'hotness', 'is_disabled', 'premium_type',
'uses_flash')
d = dict(zip(attrs, attrgetter(*attrs)(addon)))
# Coerce the Translation into a string.
d['name_sort'] = unicode(addon.name).lower()
translations = addon.translations
d['name'] = list(set(string for _, string in translations[addon.name_id]))
d['description'] = list(set(string for _, string
in translations[addon.description_id]))
d['summary'] = list(set(string for _, string
in translations[addon.summary_id]))
d['authors'] = [a.name for a in addon.listed_authors]
d['device'] = getattr(addon, 'device_ids', [])
# This is an extra query, not good for perf.
d['category'] = getattr(addon, 'category_ids', [])
d['tags'] = getattr(addon, 'tag_list', [])
d['price'] = getattr(addon, 'price', 0.0)
if addon.current_version:
d['platforms'] = [p.id for p in
addon.current_version.supported_platforms]
d['appversion'] = {}
for app, appver in addon.compatible_apps.items():
if appver:
min_, max_ = appver.min.version_int, appver.max.version_int
else:
# Fake wide compatibility for search tools and personas.
min_, max_ = 0, version_int('9999')
d['appversion'][app.id] = dict(min=min_, max=max_)
try:
d['has_version'] = addon._current_version is not None
except ObjectDoesNotExist:
d['has_version'] = None
d['app'] = [app.id for app in addon.compatible_apps.keys()]
if addon.type == amo.ADDON_PERSONA:
try:
# This would otherwise get attached when by the transformer.
d['weekly_downloads'] = addon.persona.popularity
# Boost on popularity.
d['_boost'] = addon.persona.popularity ** .2
d['has_theme_rereview'] = (
addon.persona.rereviewqueuetheme_set.exists())
except Persona.DoesNotExist:
# The addon won't have a persona while it's being created.
pass
else:
# Boost by the number of users on a logarithmic scale. The maximum
# boost (11,000,000 users for adblock) is about 5x.
d['_boost'] = addon.average_daily_users ** .2
# Double the boost if the add-on is public.
if addon.status == amo.STATUS_PUBLIC and 'boost' in d:
d['_boost'] = max(d['_boost'], 1) * 4
# Indices for each language. languages is a list of locales we want to
# index with analyzer if the string's locale matches.
for analyzer, languages in amo.SEARCH_ANALYZER_MAP.iteritems():
if (not settings.ES_USE_PLUGINS and
analyzer in amo.SEARCH_ANALYZER_PLUGINS):
continue
d['name_' + analyzer] = list(
set(string for locale, string in translations[addon.name_id]
if locale.lower() in languages))
d['summary_' + analyzer] = list(
set(string for locale, string in translations[addon.summary_id]
if locale.lower() in languages))
d['description_' + analyzer] = list(
set(string for locale, string in translations[addon.description_id]
if locale.lower() in languages))
return d
def setup_mapping(index=None, aliased=True):
"""Set up the addons index mapping."""
# Mapping describes how elasticsearch handles a document during indexing.
# Most fields are detected and mapped automatically.
appver = {'dynamic': False, 'properties': {'max': {'type': 'long'},
'min': {'type': 'long'}}}
mapping = {
# Optional boosting during indexing.
'_boost': {'name': '_boost', 'null_value': 1.0},
'properties': {
# Turn off analysis on name so we can sort by it.
'name_sort': {'type': 'string', 'index': 'not_analyzed'},
# Adding word-delimiter to split on camelcase and punctuation.
'name': {'type': 'string',
'analyzer': 'standardPlusWordDelimiter'},
'summary': {'type': 'string',
'analyzer': 'snowball'},
'description': {'type': 'string',
'analyzer': 'snowball'},
'tags': {'type': 'string',
'index': 'not_analyzed',
'index_name': 'tag'},
'platforms': {'type': 'integer', 'index_name': 'platform'},
'appversion': {'properties': dict((app.id, appver)
for app in amo.APP_USAGE)},
},
}
# Add room for language-specific indexes.
for analyzer in amo.SEARCH_ANALYZER_MAP:
if (not settings.ES_USE_PLUGINS and
analyzer in amo.SEARCH_ANALYZER_PLUGINS):
log.info('While creating mapping, skipping the %s analyzer'
% analyzer)
continue
mapping['properties']['name_' + analyzer] = {
'type': 'string',
'analyzer': analyzer,
}
mapping['properties']['summary_' + analyzer] = {
'type': 'string',
'analyzer': analyzer,
}
mapping['properties']['description_' + analyzer] = {
'type': 'string',
'analyzer': analyzer,
}
es = amo.search.get_es()
# Adjust the mapping for all models at once because fields are shared
# across all doc types in an index. If we forget to adjust one of them
# we'll get burned later on.
for model in Addon, AppCompat, Collection, UserProfile:
index = index or model._get_index()
index = create_es_index_if_missing(index, aliased=aliased)
try:
es.put_mapping(model._meta.db_table, mapping, index)
except pyes.ElasticSearchException, e:
log.error(e)
|
bl4ck5un/fanz.io | refs/heads/master | blogging/models.py | 1 | from django.db import models
# Create your models here.
class Tag(models.Model):
text = models.CharField(max_length=50)
def __str__(self):
return self.text
class Article(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(default='-')
content = models.TextField()
add_date = models.DateTimeField('date added')
is_draft = models.BooleanField(default=True)
is_public = models.BooleanField(default=False)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.title
|
tcalmant/ipopo | refs/heads/v1 | samples/rsa/__init__.py | 12133432 | |
struys/pip-faster | refs/heads/master | tests/unit/__init__.py | 12133432 | |
romykundal/meanjs-multiuploader | refs/heads/master | node_modules/nodemon/travis_after_all.py | 23 | import os
import sys
import json
import time
import logging
try:
from functools import reduce
except ImportError:
pass
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
GITHUB_TOKEN = 'GITHUB_TOKEN'
# Travis API entry point, there are at least https://api.travis-ci.com and https://api.travis-ci.org
travis_entry = sys.argv[1] if len(sys.argv) > 1 else 'https://api.travis-ci.org'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
gh_token = os.getenv(GITHUB_TOKEN)
# assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
job_number = os.getenv(TRAVIS_JOB_NUMBER)
if not job_number:
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(job_number):
log.info("This is a leader")
else:
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot(travis_token):
"""
:return: Matrix List
"""
headers = {'content-type': 'application/json', 'Authorization': 'token {}'.format(travis_token)}
req = urllib2.Request("{0}/builds/{1}".format(travis_entry, build_id), headers=headers)
response = urllib2.urlopen(req).read()
raw_json = json.loads(response.decode('utf-8'))
matrix_without_leader = [MatrixElement(job) for job in raw_json["matrix"] if not is_leader(job['number'])]
return matrix_without_leader
def wait_others_to_finish(travis_token):
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot(travis_token)
finished = [job.is_finished for job in snapshot if not job.is_leader]
return reduce(lambda a, b: a and b, finished), [job.number for job in snapshot if
not job.is_leader and not job.is_finished]
while True:
finished, waiting_list = others_finished()
if finished:
break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
def get_token():
assert gh_token, 'GITHUB_TOKEN is not set'
data = {"github_token": gh_token}
headers = {'content-type': 'application/json'}
req = urllib2.Request("{0}/auth/github".format(travis_entry), json.dumps(data).encode('utf-8'), headers)
response = urllib2.urlopen(req).read()
travis_token = json.loads(response.decode('utf-8')).get('access_token')
return travis_token
try:
token = get_token()
wait_others_to_finish(token)
final_snapshot = matrix_snapshot(token)
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
|
PAIR-code/lit | refs/heads/main | lit_nlp/examples/models/pretrained_lms.py | 1 | # Lint as: python3
"""Wrapper for HuggingFace models in LIT.
Includes BERT masked LM, GPT-2, and T5.
This wrapper loads a model into memory and implements the a number of helper
functions to predict a batch of examples and extract information such as
hidden states and attention.
"""
import re
from typing import Dict, List, Tuple
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.lib import utils
import numpy as np
import tensorflow as tf
import transformers
def batch_encode_pretokenized(
tokenizer: transformers.tokenization_utils_base.PreTrainedTokenizerBase,
tokenized_inputs: List[List[str]]
) -> transformers.tokenization_utils_base.BatchEncoding:
"""Batch encode pre-tokenized text, without further splitting.
This is necessary because tokenizer(..., is_split_into_words=True) doesn't
guarantee that tokens will stay intact - only that the final tokens will not
span the given boundaries. If the tokenizer is called directly, you'll get
things like: "foo" "##bar" -> "foo" "#" "#" "bar"
Based on the implementation of batch_encode_plus in
https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils_base.py#L2465
but simplified to only handle single-segment inputs.
Args:
tokenizer: Transformers tokenizer
tokenized_inputs: list of tokenized inputs
Returns:
BatchEncoding, suitable for model input
"""
encoded_input = {}
for tokens in tokenized_inputs:
ids = tokenizer.convert_tokens_to_ids(tokens)
encoded = tokenizer.prepare_for_model(
ids,
add_special_tokens=True,
padding="do_not_pad",
truncation="longest_first",
return_attention_mask=False,
pad_to_multiple_of=False)
for k, v in encoded.items():
encoded_input.setdefault(k, []).append(v)
encoded_input = tokenizer.pad(
encoded_input, padding="longest", return_attention_mask=True)
return transformers.tokenization_utils_base.BatchEncoding(
encoded_input, tensor_type="tf")
class BertMLM(lit_model.Model):
"""BERT masked LM using Huggingface Transformers and TensorFlow 2."""
MASK_TOKEN = "[MASK]"
@property
def max_seq_length(self):
return self.model.config.max_position_embeddings
def __init__(self, model_name="bert-base-uncased", top_k=10):
super().__init__()
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
# TODO(lit-dev): switch to TFBertForPreTraining to get the next-sentence
# prediction head as well.
self.model = transformers.TFBertForMaskedLM.from_pretrained(
model_name, output_hidden_states=True, output_attentions=True)
self.top_k = top_k
# TODO(lit-dev): break this out as a helper function, write some tests,
# and de-duplicate code with the other text generation functions.
def _get_topk_tokens(self,
scores: np.ndarray) -> List[List[Tuple[str, float]]]:
"""Convert raw scores to top-k token predictions."""
# scores is [num_tokens, vocab_size]
# Find the vocab indices of top k predictions, at each token.
# np.argpartition is faster than a full argsort for k << V,
# but we need to sort the output after slicing (see below).
index_array = np.argpartition(scores, -self.top_k, axis=1)[:, -self.top_k:]
# These are each [num_tokens, tok_k]
top_tokens = [
self.tokenizer.convert_ids_to_tokens(idxs) for idxs in index_array
]
top_scores = np.take_along_axis(scores, index_array, axis=1)
# Convert to a list of lists of (token, score) pairs,
# where inner lists are sorted in descending order of score.
return [
sorted(list(zip(toks, scores)), key=lambda ab: -ab[1])
for toks, scores in zip(top_tokens, top_scores)
]
# TODO(lit-dev): consider returning indices and a vocab, since repeating
# strings is slow and redundant.
def _postprocess(self, output: Dict[str, np.ndarray]):
"""Postprocess, modifying output dict in-place."""
# Slice to remove padding, omitting initial [CLS] and final [SEP]
slicer = slice(1, output.pop("ntok") - 1)
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[slicer])
probas = output.pop("probas")
# Predictions at every position, regardless of masking.
output["pred_tokens"] = self._get_topk_tokens(probas[slicer])
return output
##
# LIT API implementations
def max_minibatch_size(self) -> int:
# The lit.Model base class handles batching automatically in the
# implementation of predict(), and uses this value as the batch size.
return 8
def predict_minibatch(self, inputs):
"""Predict on a single minibatch of examples."""
# If input has a 'tokens' field, use that. Otherwise tokenize the text.
tokenized_texts = [
ex.get("tokens") or self.tokenizer.tokenize(ex["text"]) for ex in inputs
]
encoded_input = batch_encode_pretokenized(self.tokenizer, tokenized_texts)
# out.logits is a single tensor
# <float32>[batch_size, num_tokens, vocab_size]
# out.hidden_states is a list of num_layers + 1 tensors, each
# <float32>[batch_size, num_tokens, h_dim]
out: transformers.modeling_tf_outputs.TFMaskedLMOutput = \
self.model(encoded_input)
batched_outputs = {
"probas": tf.nn.softmax(out.logits, axis=-1).numpy(),
"input_ids": encoded_input["input_ids"].numpy(),
"ntok": tf.reduce_sum(encoded_input["attention_mask"], axis=1).numpy(),
# last layer, first token
"cls_emb": out.hidden_states[-1][:, 0].numpy(),
}
# List of dicts, one per example.
unbatched_outputs = utils.unbatch_preds(batched_outputs)
# Postprocess to remove padding and decode predictions.
return map(self._postprocess, unbatched_outputs)
def input_spec(self):
return {
"text": lit_types.TextSegment(),
"tokens": lit_types.Tokens(mask_token="[MASK]", required=False),
}
def output_spec(self):
return {
"tokens": lit_types.Tokens(parent="text"),
"pred_tokens": lit_types.TokenTopKPreds(align="tokens"),
"cls_emb": lit_types.Embeddings(),
}
class GPT2LanguageModel(lit_model.Model):
"""Wrapper for a Huggingface Transformers GPT-2 model.
This class loads a tokenizer and model using the Huggingface library and
provides the LIT-required functions plus additional helper functions to
convert and clean tokens and to compute the top_k predictions from logits.
"""
@property
def num_layers(self):
return self.model.config.n_layer
def __init__(self, model_name="gpt2", top_k=10):
"""Constructor for GPT2LanguageModel.
Args:
model_name: gpt2, gpt2-medium, gpt2-large, gpt2-xl, distilgpt2, etc.
top_k: How many predictions to prune.
"""
super().__init__()
# GPT2 is trained without pad_token, so pick arbitrary one and mask out.
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name, pad_token="<pad>")
self.model = transformers.TFGPT2LMHeadModel.from_pretrained(
model_name, output_hidden_states=True, output_attentions=True)
self.top_k = top_k
@staticmethod
def clean_bpe_token(tok):
if not tok.startswith("Ġ"):
return "_" + tok
else:
return tok.replace("Ġ", "")
def _detokenize(self, ids):
tokens = self.tokenizer.convert_ids_to_tokens(ids)
return [self.clean_bpe_token(t) for t in tokens]
def _pred(self, encoded_inputs):
"""Predicts one batch of tokenized text.
Also performs some batch-level post-processing in TF.
Single-example postprocessing is done in _postprocess(), and operates on
numpy arrays.
Each prediction has the following returns:
logits: tf.Tensor (batch_size, sequence_length, config.vocab_size).
past: List[tf.Tensor] of length config.n_layers with each tensor shape
(2, batch_size, num_heads, sequence_length, embed_size_per_head)).
states: Tuple of tf.Tensor (one for embeddings + one for each layer),
with shape (batch_size, sequence_length, hidden_size).
attentions: Tuple of tf.Tensor (one for each layer) with shape
(batch_size, num_heads, sequence_length, sequence_length)
Within this function, we combine each Tuple/List into a single Tensor.
Args:
encoded_inputs: output of self.tokenizer.batch_encode_plus()
Returns:
payload: Dictionary with items described above, each as single Tensor.
"""
out: transformers.modeling_tf_outputs.TFCausalLMOutputWithPast = \
self.model(encoded_inputs["input_ids"])
model_probs = tf.nn.softmax(out.logits, axis=-1)
top_k = tf.math.top_k(model_probs, k=self.top_k, sorted=True, name=None)
batched_outputs = {
"input_ids": encoded_inputs["input_ids"],
"ntok": tf.reduce_sum(encoded_inputs["attention_mask"], axis=1),
"top_k_indices": top_k.indices,
"top_k_probs": top_k.values,
}
# Convert representations for each layer from tuples to single Tensor.
for i in range(len(out.attentions)):
batched_outputs[f"layer_{i:d}_attention"] = out.attentions[i]
for i in range(len(out.hidden_states)):
batched_outputs[f"layer_{i:d}_avg_embedding"] = tf.math.reduce_mean(
out.hidden_states[i], axis=1)
return batched_outputs
def _postprocess(self, preds):
"""Post-process single-example preds. Operates on numpy arrays."""
ntok = preds.pop("ntok")
ids = preds.pop("input_ids")[:ntok]
preds["tokens"] = self._detokenize(ids)
# Decode predicted top-k tokens.
# token_topk_preds will be a List[List[(word, prob)]]
# Initialize prediction for 0th token as N/A.
token_topk_preds = [[("N/A", 1.)]]
pred_ids = preds.pop("top_k_indices")[:ntok] # <int>[num_tokens, k]
pred_probs = preds.pop("top_k_probs")[:ntok] # <float32>[num_tokens, k]
for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):
token_pred_words = self._detokenize(token_pred_ids)
token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))
preds["pred_tokens"] = token_topk_preds
# Process attention.
for key in preds:
if not re.match(r"layer_(\d+)/attention", key):
continue
# Select only real tokens, since most of this matrix is padding.
# <float32>[num_heads, max_seq_length, max_seq_length]
# -> <float32>[num_heads, num_tokens, num_tokens]
preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))
# Make a copy of this array to avoid memory leaks, since NumPy otherwise
# keeps a pointer around that prevents the source array from being GCed.
preds[key] = preds[key].copy()
return preds
##
# LIT API implementations
def max_minibatch_size(self) -> int:
# The lit.Model base class handles batching automatically in the
# implementation of predict(), and uses this value as the batch size.
return 6
def predict_minibatch(self, inputs):
"""Predict on a single minibatch of examples."""
# Preprocess inputs.
texts = [ex["text"] for ex in inputs]
encoded_inputs = self.tokenizer.batch_encode_plus(
texts,
return_tensors="tf",
add_special_tokens=True,
add_prefix_space=True,
padding="longest",
truncation="longest_first")
# Get the predictions.
batched_outputs = self._pred(encoded_inputs)
# Convert to numpy for post-processing.
detached_outputs = {k: v.numpy() for k, v in batched_outputs.items()}
# Split up batched outputs, then post-process each example.
unbatched_outputs = utils.unbatch_preds(detached_outputs)
return map(self._postprocess, unbatched_outputs)
def input_spec(self):
return {"text": lit_types.TextSegment()}
def output_spec(self):
spec = {
# the "parent" keyword tells LIT which field in the input spec we should
# compare this to when computing metrics.
"pred_tokens": lit_types.TokenTopKPreds(align="tokens"),
"tokens": lit_types.Tokens(parent="text"), # all tokens
}
# Add attention and embeddings from each layer.
for i in range(self.num_layers):
spec[f"layer_{i:d}_attention"] = lit_types.AttentionHeads(
align_in="tokens", align_out="tokens")
spec[f"layer_{i:d}_avg_embedding"] = lit_types.Embeddings()
return spec
|
jpaton/xen-4.1-LJX1 | refs/heads/master | tools/python/xen/xend/server/SrvRoot.py | 49 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
from xen.web.SrvDir import SrvDir
class SrvRoot(SrvDir):
"""The root of the xend server.
"""
"""Server sub-components. Each entry is (name, class), where
'name' is the entry name and 'class' is the name of its class.
"""
#todo Get this list from the XendOptions config.
subdirs = [
('node', 'SrvNode' ),
('domain', 'SrvDomainDir' ),
('vnet', 'SrvVnetDir' ),
]
def __init__(self):
SrvDir.__init__(self)
for (name, klass) in self.subdirs:
self.add(name, klass)
for (name, klass) in self.subdirs:
self.get(name)
def __repr__(self):
return "<SrvRoot %x %s>" %(id(self), self.table.keys())
|
Fireblend/chromium-crosswalk | refs/heads/master | media/tools/layout_tests/layouttest_analyzer_helpers.py | 120 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions for the layout test analyzer."""
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import fileinput
import os
import pickle
import re
import smtplib
import socket
import sys
import time
from bug import Bug
from test_expectations_history import TestExpectationsHistory
DEFAULT_TEST_EXPECTATION_PATH = ('trunk/LayoutTests/TestExpectations')
LEGACY_DEFAULT_TEST_EXPECTATION_PATH = (
'trunk/LayoutTests/platform/chromium/test_expectations.txt')
REVISION_LOG_URL = ('http://build.chromium.org/f/chromium/perf/dashboard/ui/'
'changelog_blink.html?url=/trunk/LayoutTests/%s&range=%d:%d')
DEFAULT_REVISION_VIEW_URL = 'http://src.chromium.org/viewvc/blink?revision=%s'
class AnalyzerResultMap:
"""A class to deal with joined result produed by the analyzer.
The join is done between layouttests and the test_expectations object
(based on the test expectation file). The instance variable |result_map|
contains the following keys: 'whole','skip','nonskip'. The value of 'whole'
contains information about all layouttests. The value of 'skip' contains
information about skipped layouttests where it has 'SKIP' in its entry in
the test expectation file. The value of 'nonskip' contains all information
about non skipped layout tests, which are in the test expectation file but
not skipped. The information is exactly same as the one parsed by the
analyzer.
"""
def __init__(self, test_info_map):
"""Initialize the AnalyzerResultMap based on test_info_map.
Test_info_map contains all layouttest information. The job here is to
classify them as 'whole', 'skip' or 'nonskip' based on that information.
Args:
test_info_map: the result map of |layouttests.JoinWithTestExpectation|.
The key of the map is test name such as 'media/media-foo.html'.
The value of the map is a map that contains the following keys:
'desc'(description), 'te_info' (test expectation information),
which is a list of test expectation information map. The key of the
test expectation information map is test expectation keywords such
as "SKIP" and other keywords (for full list of keywords, please
refer to |test_expectations.ALL_TE_KEYWORDS|).
"""
self.result_map = {}
self.result_map['whole'] = {}
self.result_map['skip'] = {}
self.result_map['nonskip'] = {}
if test_info_map:
for (k, value) in test_info_map.iteritems():
self.result_map['whole'][k] = value
if 'te_info' in value:
# Don't count SLOW PASS, WONTFIX, or ANDROID tests as failures.
if any([True for x in value['te_info'] if set(x.keys()) ==
set(['SLOW', 'PASS', 'Bugs', 'Comments', 'Platforms']) or
'WONTFIX' in x or x['Platforms'] == ['ANDROID']]):
continue
if any([True for x in value['te_info'] if 'SKIP' in x]):
self.result_map['skip'][k] = value
else:
self.result_map['nonskip'][k] = value
@staticmethod
def GetDiffString(diff_map_element, type_str):
"""Get difference string out of diff map element.
The difference string shows difference between two analyzer results
(for example, a result for now and a result for sometime in the past)
in HTML format (with colors). This is used for generating email messages.
Args:
diff_map_element: An element of the compared map generated by
|CompareResultMaps()|. The element has two lists of test cases. One
is for test names that are in the current result but NOT in the
previous result. The other is for test names that are in the previous
results but NOT in the current result. Please refer to comments in
|CompareResultMaps()| for details.
type_str: a string indicating the test group to which |diff_map_element|
belongs; used for color determination. Must be 'whole', 'skip', or
'nonskip'.
Returns:
a string in HTML format (with colors) to show difference between two
analyzer results.
"""
if not diff_map_element[0] and not diff_map_element[1]:
return 'No Change'
color = ''
diff = len(diff_map_element[0]) - len(diff_map_element[1])
if diff > 0 and type_str != 'whole':
color = 'red'
else:
color = 'green'
diff_sign = ''
if diff > 0:
diff_sign = '+'
if not diff:
whole_str = 'No Change'
else:
whole_str = '<font color="%s">%s%d</font>' % (color, diff_sign, diff)
colors = ['red', 'green']
if type_str == 'whole':
# Bug 107773 - when we increase the number of tests,
# the name of the tests are in red, it should be green
# since it is good thing.
colors = ['green', 'red']
str1 = ''
for (name, _) in diff_map_element[0]:
str1 += '<font color="%s">%s,</font>' % (colors[0], name)
str2 = ''
for (name, _) in diff_map_element[1]:
str2 += '<font color="%s">%s,</font>' % (colors[1], name)
if str1 or str2:
whole_str += ':'
if str1:
whole_str += str1
if str2:
whole_str += str2
# Remove the last occurrence of ','.
whole_str = ''.join(whole_str.rsplit(',', 1))
return whole_str
def GetPassingRate(self):
"""Get passing rate.
Returns:
layout test passing rate of this result in percent.
Raises:
ValueEror when the number of tests in test group "whole" is equal
or less than that of "skip".
"""
delta = len(self.result_map['whole'].keys()) - (
len(self.result_map['skip'].keys()))
if delta <= 0:
raise ValueError('The number of tests in test group "whole" is equal or '
'less than that of "skip"')
return 100 - len(self.result_map['nonskip'].keys()) * 100.0 / delta
def ConvertToCSVText(self, current_time):
"""Convert |self.result_map| into stats and issues text in CSV format.
Both are used as inputs for Google spreadsheet.
Args:
current_time: a string depicting a time in year-month-day-hour
format (e.g., 2011-11-08-16).
Returns:
a tuple of stats and issues_txt
stats: analyzer result in CSV format that shows:
(current_time, the number of tests, the number of skipped tests,
the number of failing tests, passing rate)
For example,
"2011-11-10-15,204,22,12,94"
issues_txt: issues listed in CSV format that shows:
(BUGWK or BUGCR, bug number, the test expectation entry,
the name of the test)
For example,
"BUGWK,71543,TIMEOUT PASS,media/media-element-play-after-eos.html,
BUGCR,97657,IMAGE CPU MAC TIMEOUT PASS,media/audio-repaint.html,"
"""
stats = ','.join([current_time, str(len(self.result_map['whole'].keys())),
str(len(self.result_map['skip'].keys())),
str(len(self.result_map['nonskip'].keys())),
str(self.GetPassingRate())])
issues_txt = ''
for bug_txt, test_info_list in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
matches = re.match(r'(BUG(CR|WK))(\d+)', bug_txt)
bug_suffix = ''
bug_no = ''
if matches:
bug_suffix = matches.group(1)
bug_no = matches.group(3)
issues_txt += bug_suffix + ',' + bug_no + ','
for test_info in test_info_list:
test_name, te_info = test_info
issues_txt += ' '.join(te_info.keys()) + ',' + test_name + ','
issues_txt += '\n'
return stats, issues_txt
def ConvertToString(self, prev_time, diff_map, issue_detail_mode):
"""Convert this result to HTML display for email.
Args:
prev_time: the previous time string that are compared against.
diff_map: the compared map generated by |CompareResultMaps()|.
issue_detail_mode: includes the issue details in the output string if
this is True.
Returns:
a analyzer result string in HTML format.
"""
return_str = ''
if diff_map:
return_str += (
'<b>Statistics (Diff Compared to %s):</b><ul>'
'<li>The number of tests: %d (%s)</li>'
'<li>The number of failing skipped tests: %d (%s)</li>'
'<li>The number of failing non-skipped tests: %d (%s)</li>'
'<li>Passing rate: %.2f %%</li></ul>') % (
prev_time, len(self.result_map['whole'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['whole'], 'whole'),
len(self.result_map['skip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['skip'], 'skip'),
len(self.result_map['nonskip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['nonskip'], 'nonskip'),
self.GetPassingRate())
if issue_detail_mode:
return_str += '<b>Current issues about failing non-skipped tests:</b>'
for (bug_txt, test_info_list) in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
return_str += '<ul>%s' % Bug(bug_txt)
for test_info in test_info_list:
(test_name, te_info) = test_info
gpu_link = ''
if 'GPU' in te_info:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
return_str += '<li><a href="%s">%s</a> (%s) </li>' % (
dashboard_link, test_name, ' '.join(
[key for key in te_info.keys() if key != 'Platforms']))
return_str += '</ul>\n'
return return_str
def CompareToOtherResultMap(self, other_result_map):
"""Compare this result map with the other to see if there are any diff.
The comparison is done for layouttests which belong to 'whole', 'skip',
or 'nonskip'.
Args:
other_result_map: another result map to be compared against the result
map of the current object.
Returns:
a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
"""
comp_result_map = {}
for name in ['whole', 'skip', 'nonskip']:
if name == 'nonskip':
# Look into expectation to get diff only for non-skipped tests.
lookIntoTestExpectationInfo = True
else:
# Otherwise, only test names are compared to get diff.
lookIntoTestExpectationInfo = False
comp_result_map[name] = GetDiffBetweenMaps(
self.result_map[name], other_result_map.result_map[name],
lookIntoTestExpectationInfo)
return comp_result_map
@staticmethod
def Load(file_path):
"""Load the object from |file_path| using pickle library.
Args:
file_path: the string path to the file from which to read the result.
Returns:
a AnalyzerResultMap object read from |file_path|.
"""
file_object = open(file_path)
analyzer_result_map = pickle.load(file_object)
file_object.close()
return analyzer_result_map
def Save(self, file_path):
"""Save the object to |file_path| using pickle library.
Args:
file_path: the string path to the file in which to store the result.
"""
file_object = open(file_path, 'wb')
pickle.dump(self, file_object)
file_object.close()
def GetListOfBugsForNonSkippedTests(self):
"""Get a list of bugs for non-skipped layout tests.
This is used for generating email content.
Returns:
a mapping from bug modifier text (e.g., BUGCR1111) to a test name and
main test information string which excludes comments and bugs.
This is used for grouping test names by bug.
"""
bug_map = {}
for (name, value) in self.result_map['nonskip'].iteritems():
for te_info in value['te_info']:
main_te_info = {}
for k in te_info.keys():
if k != 'Comments' and k != 'Bugs':
main_te_info[k] = True
if 'Bugs' in te_info:
for bug in te_info['Bugs']:
if bug not in bug_map:
bug_map[bug] = []
bug_map[bug].append((name, main_te_info))
return bug_map
def SendStatusEmail(prev_time, analyzer_result_map, diff_map,
receiver_email_address, test_group_name,
appended_text_to_email, email_content, rev_str,
email_only_change_mode):
"""Send status email.
Args:
prev_time: the date string such as '2011-10-09-11'. This format has been
used in this analyzer.
analyzer_result_map: current analyzer result.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
The values of the map are the result of |GetDiffBetweenMaps()|.
The element has two lists of test cases. One (with index 0) is for
test names that are in the current result but NOT in the previous
result. The other (with index 1) is for test names that are in the
previous results but NOT in the current result.
For example (test expectation information is omitted for
simplicity),
comp_result_map['whole'][0] = ['foo1.html']
comp_result_map['whole'][1] = ['foo2.html']
This means that current result has 'foo1.html' but it is NOT in the
previous result. This also means the previous result has 'foo2.html'
but it is NOT in the current result.
receiver_email_address: receiver's email address.
test_group_name: string representing the test group name (e.g., 'media').
appended_text_to_email: a text which is appended at the end of the status
email.
email_content: an email content string that will be shown on the dashboard.
rev_str: a revision string that contains revision information that is sent
out in the status email. It is obtained by calling
|GetRevisionString()|.
email_only_change_mode: send email only when there is a change if this is
True. Otherwise, always send email after each run.
"""
if rev_str:
email_content += '<br><b>Revision Information:</b>'
email_content += rev_str
localtime = time.asctime(time.localtime(time.time()))
change_str = ''
if email_only_change_mode:
change_str = 'Status Change '
subject = 'Layout Test Analyzer Result %s(%s): %s' % (change_str,
test_group_name,
localtime)
SendEmail('[email protected]', [receiver_email_address],
subject, email_content + appended_text_to_email)
def GetRevisionString(prev_time, current_time, diff_map):
"""Get a string for revision information during the specified time period.
Args:
prev_time: the previous time as a floating point number expressed
in seconds since the epoch, in UTC.
current_time: the current time as a floating point number expressed
in seconds since the epoch, in UTC. It is typically obtained by
time.time() function.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
Returns:
a tuple of strings:
1) full string containing links, author, date, and line for each
change in the test expectation file.
2) shorter string containing only links to the change. Used for
trend graph annotations.
3) last revision number for the given test group.
4) last revision date for the given test group.
"""
if not diff_map:
return ('', '', '', '')
testname_map = {}
for test_group in ['skip', 'nonskip']:
for i in range(2):
for (k, _) in diff_map[test_group][i]:
testname_map[k] = True
rev_infos = TestExpectationsHistory.GetDiffBetweenTimes(prev_time,
current_time,
testname_map.keys())
rev_str = ''
simple_rev_str = ''
rev = ''
rev_date = ''
if rev_infos:
# Get latest revision number and date.
rev = rev_infos[-1][1]
rev_date = rev_infos[-1][3]
for rev_info in rev_infos:
(old_rev, new_rev, author, date, _, target_lines) = rev_info
# test_expectations.txt was renamed to TestExpectations at r119317.
new_path = DEFAULT_TEST_EXPECTATION_PATH
if new_rev < 119317:
new_path = LEGACY_DEFAULT_TEST_EXPECTATION_PATH
old_path = DEFAULT_TEST_EXPECTATION_PATH
if old_rev < 119317:
old_path = LEGACY_DEFAULT_TEST_EXPECTATION_PATH
link = REVISION_LOG_URL % (new_path, old_rev, new_rev)
rev_str += '<ul><a href="%s">%s->%s</a>\n' % (link, old_rev, new_rev)
simple_rev_str = '<a href="%s">%s->%s</a>,' % (link, old_rev, new_rev)
rev_str += '<li>%s</li>\n' % author
rev_str += '<li>%s</li>\n<ul>' % date
for line in target_lines:
# Find *.html pattern (test name) and replace it with the link to
# flakiness dashboard.
test_name_pattern = r'(\S+.html)'
match = re.search(test_name_pattern, line)
if match:
test_name = match.group(1)
gpu_link = ''
if 'GPU' in line:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
line = line.replace(test_name, '<a href="%s">%s</a>' % (
dashboard_link, test_name))
# Find bug text and replace it with the link to the bug.
bug = Bug(line)
if bug.bug_txt:
line = '<li>%s</li>\n' % line.replace(bug.bug_txt, str(bug))
rev_str += line
rev_str += '</ul></ul>'
return (rev_str, simple_rev_str, rev, rev_date)
def SendEmail(sender_email_address, receivers_email_addresses, subject,
message):
"""Send email using localhost's mail server.
Args:
sender_email_address: sender's email address.
receivers_email_addresses: receiver's email addresses.
subject: subject string.
message: email message.
"""
try:
html_top = """
<html>
<head></head>
<body>
"""
html_bot = """
</body>
</html>
"""
html = html_top + message + html_bot
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender_email_address
msg['To'] = receivers_email_addresses[0]
part1 = MIMEText(html, 'html')
smtp_obj = smtplib.SMTP('localhost')
msg.attach(part1)
smtp_obj.sendmail(sender_email_address, receivers_email_addresses,
msg.as_string())
print 'Successfully sent email'
except smtplib.SMTPException, ex:
print 'Authentication failed:', ex
print 'Error: unable to send email'
except (socket.gaierror, socket.error, socket.herror), ex:
print ex
print 'Error: unable to send email'
def FindLatestTime(time_list):
"""Find latest time from |time_list|.
The current status is compared to the status of the latest file in
|RESULT_DIR|.
Args:
time_list: a list of time string in the form of 'Year-Month-Day-Hour'
(e.g., 2011-10-23-23). Strings not in this format are ignored.
Returns:
a string representing latest time among the time_list or None if
|time_list| is empty or no valid date string in |time_list|.
"""
if not time_list:
return None
latest_date = None
for time_element in time_list:
try:
item_date = datetime.strptime(time_element, '%Y-%m-%d-%H')
if latest_date is None or latest_date < item_date:
latest_date = item_date
except ValueError:
# Do nothing.
pass
if latest_date:
return latest_date.strftime('%Y-%m-%d-%H')
else:
return None
def ReplaceLineInFile(file_path, search_exp, replace_line):
"""Replace line which has |search_exp| with |replace_line| within a file.
Args:
file_path: the file that is being replaced.
search_exp: search expression to find a line to be replaced.
replace_line: the new line.
"""
for line in fileinput.input(file_path, inplace=1):
if search_exp in line:
line = replace_line
sys.stdout.write(line)
def FindLatestResult(result_dir):
"""Find the latest result in |result_dir| and read and return them.
This is used for comparison of analyzer result between current analyzer
and most known latest result.
Args:
result_dir: the result directory.
Returns:
A tuple of filename (latest_time) and the latest analyzer result.
Returns None if there is no file or no file that matches the file
patterns used ('%Y-%m-%d-%H').
"""
dir_list = os.listdir(result_dir)
file_name = FindLatestTime(dir_list)
if not file_name:
return None
file_path = os.path.join(result_dir, file_name)
return (file_name, AnalyzerResultMap.Load(file_path))
def GetDiffBetweenMaps(map1, map2, lookIntoTestExpectationInfo=False):
"""Get difference between maps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a tuple of |name1_list| and |name2_list|. |Name1_list| contains all test
name and the test expectation information in |map1| but not in |map2|.
|Name2_list| contains all test name and the test expectation
information in |map2| but not in |map1|.
"""
def GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo):
"""A helper function for GetDiffBetweenMaps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a list of tuples (name, te_info) that are in |map1| but not in |map2|.
"""
name_list = []
for (name, value1) in map1.iteritems():
if name in map2:
if lookIntoTestExpectationInfo and 'te_info' in value1:
list1 = value1['te_info']
list2 = map2[name]['te_info']
te_diff = [item for item in list1 if not item in list2]
if te_diff:
name_list.append((name, te_diff))
else:
name_list.append((name, value1))
return name_list
return (GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo),
GetDiffBetweenMapsHelper(map2, map1, lookIntoTestExpectationInfo))
|
rahushen/ansible | refs/heads/devel | lib/ansible/modules/cloud/vultr/vr_startup_script.py | 39 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vr_startup_script
short_description: Manages startup scripts on Vultr.
description:
- Create, update and remove startup scripts.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- The script name.
required: true
script_type:
description:
- The script type, can not be changed once created.
default: boot
choices: [ boot, pxe ]
aliases: [ type ]
script:
description:
- The script source code.
- Required if (state=present).
state:
description:
- State of the script.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: ensure a pxe script exists, source from a file
local_action:
module: vr_startup_script
name: my_web_script
script_type: pxe
script: "{{ lookup('file', 'path/to/script') }}"
- name: ensure a boot script exists
local_action:
module: vr_startup_script
name: vr_startup_script
script: "#!/bin/bash\necho Hello World > /root/hello"
- name: ensure a script is absent
local_action:
module: vr_startup_script
name: my_web_script
state: absent
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: string
sample: "https://api.vultr.com"
vultr_startup_script:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: ID of the startup script.
returned: success
type: string
sample: 249395
name:
description: Name of the startup script.
returned: success
type: string
sample: my startup script
script:
description: The source code of the startup script.
returned: success
type: string
sample: "#!/bin/bash\necho Hello World > /root/hello"
script_type:
description: The type of the startup script.
returned: success
type: string
sample: pxe
date_created:
description: Date the startup script was created.
returned: success
type: string
sample: "2017-08-26 12:47:48"
date_modified:
description: Date the startup script was modified.
returned: success
type: string
sample: "2017-08-26 12:47:48"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrStartupScript(Vultr):
def __init__(self, module):
super(AnsibleVultrStartupScript, self).__init__(module, "vultr_startup_script")
self.returns = {
'SCRIPTID': dict(key='id'),
'type': dict(key='script_type'),
'name': dict(key='name'),
'script': dict(),
'date_created': dict(),
'date_modified': dict(),
}
def get_script(self):
scripts = self.api_query(path="/v1/startupscript/list")
name = self.module.params.get('name')
if scripts:
for script_id, script_data in scripts.items():
if script_data.get('name') == name:
return script_data
return {}
def present_script(self):
script = self.get_script()
if not script:
script = self._create_script(script)
else:
script = self._update_script(script)
return script
def _create_script(self, script):
self.result['changed'] = True
data = {
'name': self.module.params.get('name'),
'script': self.module.params.get('script'),
'type': self.module.params.get('script_type'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/create",
method="POST",
data=data
)
script = self.get_script()
return script
def _update_script(self, script):
if script['script'] != self.module.params.get('script'):
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
'script': self.module.params.get('script'),
}
self.result['diff']['before'] = script
self.result['diff']['after'] = script.copy()
self.result['diff']['after'].update(data)
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/update",
method="POST",
data=data
)
script = self.get_script()
return script
def absent_script(self):
script = self.get_script()
if script:
self.result['changed'] = True
data = {
'SCRIPTID': script['SCRIPTID'],
}
self.result['diff']['before'] = script
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/startupscript/destroy",
method="POST",
data=data
)
return script
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
script=dict(),
script_type=dict(default='boot', choices=['boot', 'pxe'], aliases=['type']),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
('state', 'present', ['script']),
],
supports_check_mode=True,
)
vr_script = AnsibleVultrStartupScript(module)
if module.params.get('state') == "absent":
script = vr_script.absent_script()
else:
script = vr_script.present_script()
result = vr_script.get_result(script)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
talonchandler/dipsim | refs/heads/master | notes/2017-10-10-voxel-reconstruction/figures/assemble.py | 1 | import matplotlib.pyplot as plt
import numpy as np
from dipsim import util
# Choose files
name_head = '/Users/Talon/Dropbox/20170725_Bob_Actin_results/Cell1_LSimaging_registerred/SPIM'
names = ['A_reg_P1.tif', 'A_reg_P2.tif', 'A_reg_P3.tif', 'A_reg_P4.tif',
'B_reg_P1.tif', 'B_reg_P2.tif', 'B_reg_P3.tif', 'B_reg_P4.tif']
input_files = [name_head + name for name in names]
input_files = np.array(input_files).reshape(2, 4)
# Choose labels
col_labels = [0, 45, 90, 135]
col_labels = ['$'+str(lab)+'^{\circ}$' for lab in col_labels]
row_labels = ['View A: 1.1 NA', 'View B: 0.71 NA']
# Plot array
util.plot_array(input_files, 'octet.pdf',
row_labels=row_labels, col_labels=col_labels,
line_start=(215, 635), line_end=(265, 585), zslice=175,
roi_upper_left=(215, 585), roi_wh=(50, 50))
util.plot_array(input_files, 'roi-octet.pdf',
row_labels=row_labels, col_labels=col_labels,
line_start=(0, 49), line_end=(49, 0), zslice=175,
roi_upper_left=(215, 585), roi_wh=(50, 50), plot_roi=True)
|
MooglyGuy/mame | refs/heads/master | 3rdparty/catch/scripts/approvalTests.py | 38 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
import re
import difflib
import scriptCommon
from scriptCommon import catchPath
rootPath = os.path.join(catchPath, 'projects/SelfTest/Baselines')
filelocParser = re.compile(r'''
.*/
(.+\.[ch]pp) # filename
(?::|\() # : is starting separator between filename and line number on Linux, ( on Windows
([0-9]*) # line number
\)? # Windows also has an ending separator, )
''', re.VERBOSE)
lineNumberParser = re.compile(r' line="[0-9]*"')
hexParser = re.compile(r'\b(0[xX][0-9a-fA-F]+)\b')
durationsParser = re.compile(r' time="[0-9]*\.[0-9]*"')
timestampsParser = re.compile(r' timestamp="\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}Z"')
versionParser = re.compile(r'Catch v[0-9]+\.[0-9]+\.[0-9]+(-develop\.[0-9]+)?')
nullParser = re.compile(r'\b(__null|nullptr)\b')
exeNameParser = re.compile(r'''
\b
(CatchSelfTest|SelfTest) # Expected executable name
(?:.exe)? # Executable name contains .exe on Windows.
\b
''', re.VERBOSE)
# This is a hack until something more reasonable is figured out
specialCaseParser = re.compile(r'file\((\d+)\)')
if len(sys.argv) == 2:
cmdPath = sys.argv[1]
else:
cmdPath = os.path.join(catchPath, scriptCommon.getBuildExecutable())
overallResult = 0
def diffFiles(fileA, fileB):
with open(fileA, 'r') as file:
aLines = file.readlines()
with open(fileB, 'r') as file:
bLines = file.readlines()
shortenedFilenameA = fileA.rsplit(os.sep, 1)[-1]
shortenedFilenameB = fileB.rsplit(os.sep, 1)[-1]
diff = difflib.unified_diff(aLines, bLines, fromfile=shortenedFilenameA, tofile=shortenedFilenameB, n=0)
return [line for line in diff if line[0] in ('+', '-')]
def filterLine(line):
if catchPath in line:
# make paths relative to Catch root
line = line.replace(catchPath + os.sep, '')
# go from \ in windows paths to /
line = line.replace('\\', '/')
# strip source line numbers
m = filelocParser.match(line)
if m:
# note that this also strips directories, leaving only the filename
filename, lnum = m.groups()
lnum = ":<line number>" if lnum else ""
line = filename + lnum + line[m.end():]
else:
line = lineNumberParser.sub(" ", line)
# strip Catch version number
line = versionParser.sub("<version>", line)
# replace *null* with 0
line = nullParser.sub("0", line)
# strip executable name
line = exeNameParser.sub("<exe-name>", line)
# strip hexadecimal numbers (presumably pointer values)
line = hexParser.sub("0x<hex digits>", line)
# strip durations and timestamps
line = durationsParser.sub(' time="{duration}"', line)
line = timestampsParser.sub(' timestamp="{iso8601-timestamp}"', line)
line = specialCaseParser.sub('file:\g<1>', line)
return line
def approve(baseName, args):
global overallResult
args[0:0] = [cmdPath]
if not os.path.exists(cmdPath):
raise Exception("Executable doesn't exist at " + cmdPath)
baselinesPath = os.path.join(rootPath, '{0}.approved.txt'.format(baseName))
rawResultsPath = os.path.join(rootPath, '_{0}.tmp'.format(baseName))
filteredResultsPath = os.path.join(rootPath, '{0}.unapproved.txt'.format(baseName))
f = open(rawResultsPath, 'w')
subprocess.call(args, stdout=f, stderr=f)
f.close()
rawFile = open(rawResultsPath, 'r')
filteredFile = open(filteredResultsPath, 'w')
for line in rawFile:
filteredFile.write(filterLine(line).rstrip() + "\n")
filteredFile.close()
rawFile.close()
os.remove(rawResultsPath)
print()
print(baseName + ":")
if os.path.exists(baselinesPath):
diffResult = diffFiles(baselinesPath, filteredResultsPath)
if diffResult:
print(''.join(diffResult))
print(" \n****************************\n \033[91mResults differed")
if len(diffResult) > overallResult:
overallResult = len(diffResult)
else:
os.remove(filteredResultsPath)
print(" \033[92mResults matched")
print("\033[0m")
else:
print(" first approval")
if overallResult == 0:
overallResult = 1
print("Running approvals against executable:")
print(" " + cmdPath)
# Standard console reporter
approve("console.std", ["~[c++11]~[!nonportable]", "--order", "lex"])
# console reporter, include passes, warn about No Assertions
approve("console.sw", ["~[c++11]~[!nonportable]", "-s", "-w", "NoAssertions", "--order", "lex"])
# console reporter, include passes, warn about No Assertions, limit failures to first 4
approve("console.swa4", ["~[c++11]~[!nonportable]", "-s", "-w", "NoAssertions", "-x", "4", "--order", "lex"])
# junit reporter, include passes, warn about No Assertions
approve("junit.sw", ["~[c++11]~[!nonportable]", "-s", "-w", "NoAssertions", "-r", "junit", "--order", "lex"])
# xml reporter, include passes, warn about No Assertions
approve("xml.sw", ["~[c++11]~[!nonportable]", "-s", "-w", "NoAssertions", "-r", "xml", "--order", "lex"])
if overallResult != 0:
print("If these differenecs are expected run approve.py to approve new baselines")
exit(overallResult)
|
cortesi/mitmproxy | refs/heads/master | examples/simple/add_header.py | 6 | from mitmproxy import http
def response(flow: http.HTTPFlow) -> None:
flow.response.headers["newheader"] = "foo"
|
mationic/pyload | refs/heads/stable | module/plugins/hoster/UlozTo.py | 6 | # -*- coding: utf-8 -*-
import re
import time
from module.common.json_layer import json_loads
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
def convert_decimal_prefix(m):
#: Decimal prefixes used in filesize and traffic
return ("%%.%df" % {'k': 3, 'M': 6, 'G': 9}[m.group(2)] % float(m.group(1))).replace('.', '')
class UlozTo(SimpleHoster):
__name__ = "UlozTo"
__type__ = "hoster"
__version__ = "1.15"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj\.cz|zachowajto\.pl)/(?:live/)?(?P<ID>\w+/[^/?]*)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Uloz.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
NAME_PATTERN = r'(<p>File <strong>|<title>)(?P<N>.+?)(<| \|)'
SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[\d.,]+\s[kMG]?B)</span>'
OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
URL_REPLACEMENTS = [(r'(?<=http://)([^/]+)', "www.ulozto.net")]
SIZE_REPLACEMENTS = [(r'([\d.]+)\s([kMG])B', convert_decimal_prefix)]
CHECK_TRAFFIC = True
ADULT_PATTERN = r'<form action="(.+?)" method="post" id="frm-askAgeForm">'
PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
VIPLINK_PATTERN = r'<a href=".+?\?disclaimer=1" class="linkVip">'
TOKEN_PATTERN = r'<input type="hidden" name="_token_" .*?value="(.+?)"'
def setup(self):
self.chunk_limit = 16 if self.premium else 1
self.multiDL = True
self.resume_download = True
def handle_free(self, pyfile):
action, inputs = self.parse_html_form('id="frm-downloadDialog-freeDownloadForm"')
if not action or not inputs:
self.error(_("Free download form not found"))
self.log_debug("inputs.keys = " + str(inputs.keys()))
#: Get and decrypt captcha
if all(key in inputs for key in ("captcha_value", "captcha_id", "captcha_key")):
#: Old version - last seen 9.12.2013
self.log_debug('Using "old" version')
captcha_value = self.captcha.decrypt("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id'])
self.log_debug("CAPTCHA ID: " + inputs['captcha_id'] + ", CAPTCHA VALUE: " + captcha_value)
inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value})
elif all(key in inputs for key in ("captcha_value", "timestamp", "salt", "hash")):
#: New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013
self.log_debug('Using "new" version')
xapca = self.load("http://www.ulozto.net/reloadXapca.php",
get={'rnd': str(int(time.time()))})
xapca = xapca.replace('sound":"', 'sound":"http:').replace('image":"', 'image":"http:')
self.log_debug("xapca = " + str(xapca))
data = json_loads(xapca)
captcha_value = self.captcha.decrypt(str(data['image']))
self.log_debug("CAPTCHA HASH: " + data['hash'], "CAPTCHA SALT: " + str(data['salt']), "CAPTCHA VALUE: " + captcha_value)
inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value})
else:
self.error(_("CAPTCHA form changed"))
self.download("http://www.ulozto.net" + action, post=inputs)
def handle_premium(self, pyfile):
self.download(pyfile.url, get={'do': "directDownload"})
def check_errors(self):
if re.search(self.ADULT_PATTERN, self.html):
self.log_info(_("Adult content confirmation needed"))
m = re.search(self.TOKEN_PATTERN, self.html)
if m is None:
self.error(_("TOKEN_PATTERN not found"))
self.html = self.load(pyfile.url,
get={'do': "askAgeForm-submit"},
post={'agree': "Confirm", '_token_': m.group(1)})
if self.PASSWD_PATTERN in self.html:
password = self.get_password()
if password:
self.log_info(_("Password protected link, trying ") + password)
self.html = self.load(pyfile.url,
get={'do': "passwordProtectedForm-submit"},
post={'password': password, 'password_send': 'Send'})
if self.PASSWD_PATTERN in self.html:
self.fail(_("Incorrect password"))
else:
self.fail(_("No password found"))
if re.search(self.VIPLINK_PATTERN, self.html):
self.html = self.load(pyfile.url, get={'disclaimer': "1"})
return super(UlozTo, self).check_errors()
def check_file(self):
check = self.check_download({
'wrong_captcha': ">An error ocurred while verifying the user",
'offline' : re.compile(self.OFFLINE_PATTERN),
'passwd' : self.PASSWD_PATTERN,
'server_error' : 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', #: Paralell dl, server overload etc.
'not_found' : "<title>Ulož.to</title>"
})
if check == "wrong_captcha":
self.captcha.invalid()
self.retry(msg=_("Wrong captcha code"))
elif check == "offline":
self.offline()
elif check == "passwd":
self.fail(_("Wrong password"))
elif check == "server_error":
self.log_error(_("Server error, try downloading later"))
self.multiDL = False
self.wait(1 * 60 * 60, True)
self.retry()
elif check == "not_found":
self.fail(_("Server error, file not downloadable"))
return super(UlozTo, self).check_file()
getInfo = create_getInfo(UlozTo)
|
pramasoul/micropython | refs/heads/master | tests/basics/gen_yield_from_ducktype.py | 107 | class MyGen:
def __init__(self):
self.v = 0
def __iter__(self):
return self
def __next__(self):
self.v += 1
if self.v > 5:
raise StopIteration
return self.v
def gen():
yield from MyGen()
def gen2():
yield from gen()
print(list(gen()))
print(list(gen2()))
class Incrementer:
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def send(self, val):
if val is None:
return "Incrementer initialized"
return val + 1
def gen3():
yield from Incrementer()
g = gen3()
print(next(g))
print(g.send(5))
print(g.send(100))
#
# Test proper handling of StopIteration vs other exceptions
#
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen4():
global ret
ret = yield from MyIter()
1//0
ret = None
try:
print(list(gen4()))
except ZeroDivisionError:
print("ZeroDivisionError")
print(ret)
|
arun6582/django | refs/heads/master | tests/basic/tests.py | 22 | import threading
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "'foo' is an invalid keyword argument for this function"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
with self.assertRaises(TypeError):
s.refresh_from_db(unknown_kwarg=10)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
|
laperry1/android_external_chromium_org | refs/heads/cm-12.1 | third_party/pexpect/pxssh.py | 171 | """This class extends pexpect.spawn to specialize setting up SSH connections.
This adds methods for login, logout, and expecting the shell prompt.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from pexpect import *
import pexpect
import time
import os
__all__ = ['ExceptionPxssh', 'pxssh']
# Exception classes used by this module.
class ExceptionPxssh(ExceptionPexpect):
"""Raised for pxssh exceptions.
"""
class pxssh (spawn):
"""This class extends pexpect.spawn to specialize setting up SSH
connections. This adds methods for login, logout, and expecting the shell
prompt. It does various tricky things to handle many situations in the SSH
login process. For example, if the session is your first login, then pxssh
automatically accepts the remote certificate; or if you have public key
authentication setup then pxssh won't wait for the password prompt.
pxssh uses the shell prompt to synchronize output from the remote host. In
order to make this more robust it sets the shell prompt to something more
unique than just $ or #. This should work on most Borne/Bash or Csh style
shells.
Example that runs a few commands on a remote server and prints the result::
import pxssh
import getpass
try:
s = pxssh.pxssh()
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
s.sendline ('uptime') # run a command
s.prompt() # match the prompt
print s.before # print everything before the prompt.
s.sendline ('ls -l')
s.prompt()
print s.before
s.sendline ('df')
s.prompt()
print s.before
s.logout()
except pxssh.ExceptionPxssh, e:
print "pxssh failed on login."
print str(e)
Note that if you have ssh-agent running while doing development with pxssh
then this can lead to a lot of confusion. Many X display managers (xdm,
gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
dialog box popup asking for a password during development. You should turn
off any key agents during testing. The 'force_password' attribute will turn
off public key authentication. This will only work if the remote SSH server
is configured to allow password logins. Example of using 'force_password'
attribute::
s = pxssh.pxssh()
s.force_password = True
hostname = raw_input('hostname: ')
username = raw_input('username: ')
password = getpass.getpass('password: ')
s.login (hostname, username, password)
"""
def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None, logfile=None, cwd=None, env=None):
spawn.__init__(self, None, timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
self.name = '<pxssh>'
#SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
#slightly different string than the regular expression to match it. This
#is because when you set the prompt the command will echo back, but we
#don't want to match the echoed command. So if we make the set command
#slightly different than the regex we eliminate the problem. To make the
#set command different we add a backslash in front of $. The $ doesn't
#need to be escaped, but it doesn't hurt and serves to make the set
#prompt command different than the regex.
# used to match the command-line prompt
self.UNIQUE_PROMPT = "\[PEXPECT\][\$\#] "
self.PROMPT = self.UNIQUE_PROMPT
# used to set shell command-line prompt to UNIQUE_PROMPT.
self.PROMPT_SET_SH = "PS1='[PEXPECT]\$ '"
self.PROMPT_SET_CSH = "set prompt='[PEXPECT]\$ '"
self.SSH_OPTS = ("-o'RSAAuthentication=no'"
+ " -o 'PubkeyAuthentication=no'")
# Disabling host key checking, makes you vulnerable to MITM attacks.
# + " -o 'StrictHostKeyChecking=no'"
# + " -o 'UserKnownHostsFile /dev/null' ")
# Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
# displaying a GUI password dialog. I have not figured out how to
# disable only SSH_ASKPASS without also disabling X11 forwarding.
# Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
#self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
self.force_password = False
self.auto_prompt_reset = True
def levenshtein_distance(self, a,b):
"""This calculates the Levenshtein distance between a and b.
"""
n, m = len(a), len(b)
if n > m:
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def sync_original_prompt (self):
"""This attempts to find the prompt. Basically, press enter and record
the response; press enter again and record the response; if the two
responses are similar then assume we are at the original prompt. This
is a slow function. It can take over 10 seconds. """
# All of these timing pace values are magic.
# I came up with these based on what seemed reliable for
# connecting to a heavily loaded machine I have.
self.sendline()
time.sleep(0.1)
# If latency is worse than these values then this will fail.
try:
# Clear the buffer before getting the prompt.
self.read_nonblocking(size=10000,timeout=1)
except TIMEOUT:
pass
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
x = self.read_nonblocking(size=1000,timeout=1)
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
a = self.read_nonblocking(size=1000,timeout=1)
time.sleep(0.1)
self.sendline()
time.sleep(0.5)
b = self.read_nonblocking(size=1000,timeout=1)
ld = self.levenshtein_distance(a,b)
len_a = len(a)
if len_a == 0:
return False
if float(ld)/len_a < 0.4:
return True
return False
### TODO: This is getting messy and I'm pretty sure this isn't perfect.
### TODO: I need to draw a flow chart for this.
def login (self,server,username,password='',terminal_type='ansi',original_prompt=r"[#$]",login_timeout=10,port=None,auto_prompt_reset=True,ssh_key=None):
"""This logs the user into the given server. It uses the
'original_prompt' to try to find the prompt right after login. When it
finds the prompt it immediately tries to reset the prompt to something
more easily matched. The default 'original_prompt' is very optimistic
and is easily fooled. It's more reliable to try to match the original
prompt as exactly as possible to prevent false matches by server
strings such as the "Message Of The Day". On many systems you can
disable the MOTD on the remote server by creating a zero-length file
called "~/.hushlogin" on the remote server. If a prompt cannot be found
then this will not necessarily cause the login to fail. In the case of
a timeout when looking for the prompt we assume that the original
prompt was so weird that we could not match it, so we use a few tricks
to guess when we have reached the prompt. Then we hope for the best and
blindly try to reset the prompt to something more unique. If that fails
then login() raises an ExceptionPxssh exception.
In some situations it is not possible or desirable to reset the
original prompt. In this case, set 'auto_prompt_reset' to False to
inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
uses a unique prompt in the prompt() method. If the original prompt is
not reset then this will disable the prompt() method unless you
manually set the PROMPT attribute. """
ssh_options = '-q'
if self.force_password:
ssh_options = ssh_options + ' ' + self.SSH_OPTS
if port is not None:
ssh_options = ssh_options + ' -p %s'%(str(port))
if ssh_key is not None:
try:
os.path.isfile(ssh_key)
except:
raise ExceptionPxssh ('private ssh key does not exist')
ssh_options = ssh_options + ' -i %s' % (ssh_key)
cmd = "ssh %s -l %s %s" % (ssh_options, username, server)
# This does not distinguish between a remote server 'password' prompt
# and a local ssh 'passphrase' prompt (for unlocking a private key).
spawn._spawn(self, cmd)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT, "(?i)connection closed by remote host"], timeout=login_timeout)
# First phase
if i==0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
self.sendline("yes")
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==2: # password or passphrase
self.sendline(password)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
if i==4:
self.sendline(terminal_type)
i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT])
# Second phase
if i==0:
# This is weird. This should not happen twice in a row.
self.close()
raise ExceptionPxssh ('Weird error. Got "are you sure" prompt twice.')
elif i==1: # can occur if you have a public key pair set to authenticate.
### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
pass
elif i==2: # password prompt again
# For incorrect passwords, some ssh servers will
# ask for the password again, others return 'denied' right away.
# If we get the password prompt again then this means
# we didn't get the password right the first time.
self.close()
raise ExceptionPxssh ('password refused')
elif i==3: # permission denied -- password was bad.
self.close()
raise ExceptionPxssh ('permission denied')
elif i==4: # terminal type again? WTF?
self.close()
raise ExceptionPxssh ('Weird error. Got "terminal type" prompt twice.')
elif i==5: # Timeout
#This is tricky... I presume that we are at the command-line prompt.
#It may be that the shell prompt was so weird that we couldn't match
#it. Or it may be that we couldn't log in for some other reason. I
#can't be sure, but it's safe to guess that we did login because if
#I presume wrong and we are not logged in then this should be caught
#later when I try to set the shell prompt.
pass
elif i==6: # Connection closed by remote host
self.close()
raise ExceptionPxssh ('connection closed')
else: # Unexpected
self.close()
raise ExceptionPxssh ('unexpected login response')
if not self.sync_original_prompt():
self.close()
raise ExceptionPxssh ('could not synchronize with original prompt')
# We appear to be in.
# set shell prompt to something unique.
if auto_prompt_reset:
if not self.set_unique_prompt():
self.close()
raise ExceptionPxssh ('could not set shell prompt\n'+self.before)
return True
def logout (self):
"""This sends exit to the remote shell. If there are stopped jobs then
this automatically sends exit twice. """
self.sendline("exit")
index = self.expect([EOF, "(?i)there are stopped jobs"])
if index==1:
self.sendline("exit")
self.expect(EOF)
self.close()
def prompt (self, timeout=-1):
"""This matches the shell prompt. This is little more than a short-cut
to the expect() method. This returns True if the shell prompt was
matched. This returns False if a timeout was raised. Note that if you
called login() with auto_prompt_reset set to False then before calling
prompt() you must set the PROMPT attribute to a regex that prompt()
will use for matching the prompt. Calling prompt() will erase the
contents of the 'before' attribute even if no prompt is ever matched.
If timeout is not given or it is set to -1 then self.timeout is used.
"""
if timeout == -1:
timeout = self.timeout
i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
if i==1:
return False
return True
def set_unique_prompt (self):
"""This sets the remote prompt to something more unique than # or $.
This makes it easier for the prompt() method to match the shell prompt
unambiguously. This method is called automatically by the login()
method, but you may want to call it manually if you somehow reset the
shell prompt. For example, if you 'su' to a different user then you
will need to manually reset the prompt. This sends shell commands to
the remote host to set the prompt, so this assumes the remote host is
ready to receive commands.
Alternatively, you may use your own prompt pattern. Just set the PROMPT
attribute to a regular expression that matches it. In this case you
should call login() with auto_prompt_reset=False; then set the PROMPT
attribute. After that the prompt() method will try to match your prompt
pattern."""
self.sendline ("unset PROMPT_COMMAND")
self.sendline (self.PROMPT_SET_SH) # sh-style
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0: # csh-style
self.sendline (self.PROMPT_SET_CSH)
i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
if i == 0:
return False
return True
# vi:ts=4:sw=4:expandtab:ft=python:
|
Zanzibar82/plugin.video.vvvvid | refs/heads/master | resources/lib/utils/keyfactory.py | 208 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parsePEMKey, parseAsPublicKey
"""
from .compat import *
from .rsakey import RSAKey
from .python_rsakey import Python_RSAKey
from tlslite.utils import cryptomath
if cryptomath.m2cryptoLoaded:
from .openssl_rsakey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from .pycrypto_rsakey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.rsakey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse a PEM-formatted public key.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@rtype: L{tlslite.utils.rsakey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
return parsePEMKey(s, public=True)
def parsePrivateKey(s):
"""Parse a PEM-formatted private key.
@type s: str
@param s: A string containing a PEM-encoded private key.
@rtype: L{tlslite.utils.rsakey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
return parsePEMKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
|
jbassen/edx-platform | refs/heads/master | pavelib/paver_tests/__init__.py | 12133432 | |
zofuthan/edx-platform | refs/heads/master | cms/djangoapps/xblock_config/migrations/__init__.py | 12133432 | |
adrian-the-git/mezzanine | refs/heads/master | mezzanine/generic/templatetags/__init__.py | 12133432 | |
veltzer/pycmdtools | refs/heads/master | pycmdtools/__init__.py | 12133432 | |
midma101/m0du1ar | refs/heads/master | .venv/lib/python2.7/site-packages/pkg_resources/_vendor/__init__.py | 12133432 | |
MQQiang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_unittest.py | 197 | import unittest.test
from test import support
def test_main():
# used by regrtest
support.run_unittest(unittest.test.suite())
support.reap_children()
def load_tests(*_):
# used by unittest
return unittest.test.suite()
if __name__ == "__main__":
test_main()
|
frouty/odoo_oph | refs/heads/dev_70 | addons/claim_from_delivery/__openerp__.py | 172 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
'images': ['images/1_claim_link_delivery_order.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
AOSPA-L/android_external_skia | refs/heads/lollipop-mr1 | bench/check_bench_regressions.py | 160 | '''
Created on May 16, 2011
@author: bungeman
'''
import bench_util
import getopt
import httplib
import itertools
import json
import os
import re
import sys
import urllib
import urllib2
import xml.sax.saxutils
# Maximum expected number of characters we expect in an svn revision.
MAX_SVN_REV_LENGTH = 5
# Indices for getting elements from bench expectation files.
# See bench_expectations_<builder>.txt for details.
EXPECTED_IDX = -3
LB_IDX = -2
UB_IDX = -1
# Indices of the tuple of dictionaries containing slower and faster alerts.
SLOWER = 0
FASTER = 1
# URL prefix for the bench dashboard page. Showing recent 15 days of data.
DASHBOARD_URL_PREFIX = 'http://go/skpdash/#15'
def usage():
"""Prints simple usage information."""
print '-a <representation_alg> bench representation algorithm to use. '
print ' Defaults to "25th". See bench_util.py for details.'
print '-b <builder> name of the builder whose bench data we are checking.'
print '-d <dir> a directory containing bench_<revision>_<scalar> files.'
print '-e <file> file containing expected bench builder values/ranges.'
print ' Will raise exception if actual bench values are out of range.'
print ' See bench_expectations_<builder>.txt for data format / examples.'
print '-r <revision> the git commit hash or svn revision for checking '
print ' bench values.'
class Label:
"""The information in a label.
(str, str, str, str, {str:str})"""
def __init__(self, bench, config, time_type, settings):
self.bench = bench
self.config = config
self.time_type = time_type
self.settings = settings
def __repr__(self):
return "Label(%s, %s, %s, %s)" % (
str(self.bench),
str(self.config),
str(self.time_type),
str(self.settings),
)
def __str__(self):
return "%s_%s_%s_%s" % (
str(self.bench),
str(self.config),
str(self.time_type),
str(self.settings),
)
def __eq__(self, other):
return (self.bench == other.bench and
self.config == other.config and
self.time_type == other.time_type and
self.settings == other.settings)
def __hash__(self):
return (hash(self.bench) ^
hash(self.config) ^
hash(self.time_type) ^
hash(frozenset(self.settings.iteritems())))
def create_bench_dict(revision_data_points):
"""Convert current revision data into a dictionary of line data.
Args:
revision_data_points: a list of bench data points
Returns:
a dictionary of this form:
keys = Label objects
values = the corresponding bench value
"""
bench_dict = {}
for point in revision_data_points:
point_name = Label(point.bench,point.config,point.time_type,
point.settings)
if point_name not in bench_dict:
bench_dict[point_name] = point.time
else:
raise Exception('Duplicate expectation entry: ' + str(point_name))
return bench_dict
def read_expectations(expectations, filename):
"""Reads expectations data from file and put in expectations dict."""
for expectation in open(filename).readlines():
elements = expectation.strip().split(',')
if not elements[0] or elements[0].startswith('#'):
continue
if len(elements) != 5:
raise Exception("Invalid expectation line format: %s" %
expectation)
bench_entry = elements[0] + ',' + elements[1]
if bench_entry in expectations:
raise Exception("Dup entries for bench expectation %s" %
bench_entry)
# [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB, EXPECTED)
expectations[bench_entry] = (float(elements[LB_IDX]),
float(elements[UB_IDX]),
float(elements[EXPECTED_IDX]))
def check_expectations(lines, expectations, key_suffix):
"""Check if any bench results are outside of expected range.
For each input line in lines, checks the expectations dictionary to see if
the bench is out of the given range.
Args:
lines: dictionary mapping Label objects to the bench values.
expectations: dictionary returned by read_expectations().
key_suffix: string of <Platform>-<Alg> containing the bot platform and the
bench representation algorithm.
Returns:
No return value.
Raises:
Exception containing bench data that are out of range, if any.
"""
# The platform for this bot, to pass to the dashboard plot.
platform = key_suffix[ : key_suffix.rfind('-')]
# Tuple of dictionaries recording exceptions that are slower and faster,
# respectively. Each dictionary maps off_ratio (ratio of actual to expected)
# to a list of corresponding exception messages.
exceptions = ({}, {})
for line in lines:
line_str = str(line)
line_str = line_str[ : line_str.find('_{')]
# Extracts bench and config from line_str, which is in the format
# <bench-picture-name>.skp_<config>_
bench, config = line_str.strip('_').split('.skp_')
bench_platform_key = line_str + ',' + key_suffix
if bench_platform_key not in expectations:
continue
this_bench_value = lines[line]
this_min, this_max, this_expected = expectations[bench_platform_key]
if this_bench_value < this_min or this_bench_value > this_max:
off_ratio = this_bench_value / this_expected
exception = 'Bench %s out of range [%s, %s] (%s vs %s, %s%%).' % (
bench_platform_key, this_min, this_max, this_bench_value,
this_expected, (off_ratio - 1) * 100)
exception += '\n' + '~'.join([
DASHBOARD_URL_PREFIX, bench, platform, config])
if off_ratio > 1: # Bench is slower.
exceptions[SLOWER].setdefault(off_ratio, []).append(exception)
else:
exceptions[FASTER].setdefault(off_ratio, []).append(exception)
outputs = []
for i in [SLOWER, FASTER]:
if exceptions[i]:
ratios = exceptions[i].keys()
ratios.sort(reverse=True)
li = []
for ratio in ratios:
li.extend(exceptions[i][ratio])
header = '%s benches got slower (sorted by %% difference):' % len(li)
if i == FASTER:
header = header.replace('slower', 'faster')
outputs.extend(['', header] + li)
if outputs:
# Directly raising Exception will have stderr outputs tied to the line
# number of the script, so use sys.stderr.write() instead.
# Add a trailing newline to supress new line checking errors.
sys.stderr.write('\n'.join(['Exception:'] + outputs + ['\n']))
exit(1)
def main():
"""Parses command line and checks bench expectations."""
try:
opts, _ = getopt.getopt(sys.argv[1:],
"a:b:d:e:r:",
"default-setting=")
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
directory = None
bench_expectations = {}
rep = '25th' # bench representation algorithm, default to 25th
rev = None # git commit hash or svn revision number
bot = None
try:
for option, value in opts:
if option == "-a":
rep = value
elif option == "-b":
bot = value
elif option == "-d":
directory = value
elif option == "-e":
read_expectations(bench_expectations, value)
elif option == "-r":
rev = value
else:
usage()
assert False, "unhandled option"
except ValueError:
usage()
sys.exit(2)
if directory is None or bot is None or rev is None:
usage()
sys.exit(2)
platform_and_alg = bot + '-' + rep
data_points = bench_util.parse_skp_bench_data(directory, rev, rep)
bench_dict = create_bench_dict(data_points)
if bench_expectations:
check_expectations(bench_dict, bench_expectations, platform_and_alg)
if __name__ == "__main__":
main()
|
avoinsystems/odoo | refs/heads/8.0 | addons/sale_journal/__init__.py | 443 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_journal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rosmo/ansible | refs/heads/devel | lib/ansible/modules/network/aci/mso_schema_site_anp_epg.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_anp_epg
short_description: Manage site-local Endpoint Groups (EPGs) in schema template
description:
- Manage site-local EPGs in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
epg:
description:
- The name of the EPG to manage.
type: str
aliases: [ name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_site_anp
- module: mso_schema_site_anp_epg_subnet
- module: mso_schema_template_anp_epg
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new site EPG
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: present
delegate_to: localhost
- name: Remove a site EPG
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: absent
delegate_to: localhost
- name: Query a specific site EPGs
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
state: query
delegate_to: localhost
register: query_result
- name: Query all site EPGs
mso_schema_site_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epg']],
['state', 'present', ['epg']],
],
)
schema = module.params['schema']
site = module.params['site']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj['id']
# Get site
site_id = mso.lookup_site(site)
sites = [(s['siteId'], s['templateName']) for s in schema_obj['sites']]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get ANP
anp_ref = mso.anp_ref(schema_id=schema_id, template=template, anp=anp)
anps = [a['anpRef'] for a in schema_obj['sites'][site_idx]['anps']]
if anp_ref not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp_ref)
# Get EPG
epg_ref = mso.epg_ref(schema_id=schema_id, template=template, anp=anp, epg=epg)
epgs = [e['epgRef'] for e in schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs']]
if epg is not None and epg_ref in epgs:
epg_idx = epgs.index(epg_ref)
epg_path = '/sites/{0}/anps/{1}/epgs/{2}'.format(site_template, anp, epg)
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]
if state == 'query':
if epg is None:
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs']
elif not mso.existing:
mso.fail_json(msg="EPG '{epg}' not found".format(epg=epg))
mso.exit_json()
epgs_path = '/sites/{0}/anps/{1}/epgs'.format(site_template, anp)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=epg_path))
elif state == 'present':
payload = dict(
epgRef=dict(
schemaId=schema_id,
templateName=template,
anpName=anp,
epgName=epg,
),
)
mso.sanitize(payload, collate=True)
if not mso.existing:
ops.append(dict(op='add', path=epgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
ckane/crits | refs/heads/master | crits/core/audit.py | 23 | import datetime
from mongoengine import Document, StringField, ObjectIdField
from django.conf import settings
from crits.core.crits_mongoengine import CritsDocument, CritsSchemaDocument
from crits.core.fields import CritsDateTimeField
class AuditLog(CritsDocument, CritsSchemaDocument, Document):
"""
Audit Log Class
"""
meta = {
"allow_inheritance": False,
"crits_type": "AuditLog",
"collection": settings.COL_AUDIT_LOG,
"latest_schema_version": 1,
"schema_doc": {
'value': 'Value of the audit log entry',
'user': 'User the entry is about.',
'date': 'Date of the entry',
'type': 'Type of the audit entry',
'method': 'Method of the audit entry'
}
}
value = StringField()
user = StringField()
date = CritsDateTimeField(default=datetime.datetime.now)
target_type = StringField(db_field='type')
target_id = ObjectIdField()
method = StringField()
|
ejegg/FractalEditorSite | refs/heads/master | fractaleditor/urls.py | 1 | from django.conf.urls import include, url
import django.views.static
import fractaleditor.views
import fractals.views
import os.path
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
project_root = os.path.dirname(os.path.dirname(__file__))
urlpatterns = [
url(r'^fractal/', include('fractals.urls')),
url(r'^$', fractaleditor.views.home, name='home'),
url(r'^app$', fractals.views.app_link),
url(r'^(?P<path>android-chrome-[0-9x]+\.png|apple-touch-icon-[0-9a-z]+\.png|' +
r'browserconfig.xml|favicon-[0-9x]+\.png|favicon.ico|manifest.json|' +
r'safari-pinned-tab.svg|mstile-[0-9x]+.png|.well-known/.*)$',
django.views.static.serve, {
'document_root': project_root + '/static'
}),
url(r'^(?P<path>static/.*)', django.views.static.serve, {
'document_root': project_root
}),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
bzbarsky/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/assertion/__init__.py | 176 | """
support for presenting detailed information in failing assertions.
"""
import py
import os
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite",
metavar="MODE",
help="""control assertion debugging tools. 'plain'
performs no assertion debugging. 'reinterp'
reinterprets assert statements after they failed
to provide assertion expression information.
'rewrite' (the default) rewrites assert
statements in test modules on import to
provide assert expression information. """)
group.addoption('--no-assert',
action="store_true",
default=False,
dest="noassert",
help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic',
action="store_true",
default=False,
help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def _running_on_ci():
"""Check if we're currently running on a CI system."""
env_vars = ['CI', 'BUILD_NUMBER']
return any(var in os.environ for var in env_vars)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are dropped unless -vv was used or
running on a CI.
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
if (sum(len(p) for p in new_expl[1:]) > 80*8 and
item.config.option.verbose < 2 and
not _running_on_ci()):
show_max = 10
truncated_lines = len(new_expl) - show_max
new_expl[show_max:] = [py.builtin._totext(
'Detailed information truncated (%d more lines)'
', use "-vv" to show' % truncated_lines)]
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare
|
tumbl3w33d/ansible | refs/heads/devel | test/units/modules/storage/netapp/test_na_ontap_vscan_on_demand_task.py | 43 | ''' unit tests for Ansible module: na_ontap_vscan_on_demand_task '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vscan_on_demand_task \
import NetAppOntapVscanOnDemandTask as onDemand_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'task':
xml = self.build_onDemand_pool_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_onDemand_pool_info(onDemand_details):
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'vscan-on-demand-task-info': {
'task-name': onDemand_details['task_name'],
'report-directory': onDemand_details['report_directory'],
'scan-paths': {
'string': onDemand_details['scan_paths']
}
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_onDemand = {
'state': 'present',
'vserver': 'test_vserver',
'report_directory': '/',
'task_name': '/',
'scan_paths': '/'
}
def mock_args(self):
return {
'state': self.mock_onDemand['state'],
'vserver': self.mock_onDemand['vserver'],
'report_directory': self.mock_onDemand['report_directory'],
'task_name': self.mock_onDemand['task_name'],
'scan_paths': self.mock_onDemand['scan_paths'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_demand_mock_object(self, kind=None):
scanner_obj = onDemand_module()
scanner_obj.asup_log_for_cserver = Mock(return_value=None)
if kind is None:
scanner_obj.server = MockONTAPConnection()
else:
scanner_obj.server = MockONTAPConnection(kind='task', data=self.mock_onDemand)
return scanner_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
onDemand_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_demand_task(self):
set_module_args(self.mock_args())
result = self.get_demand_mock_object().get_demand_task()
assert not result
def test_get_existing_demand_task(self):
set_module_args(self.mock_args())
result = self.get_demand_mock_object('task').get_demand_task()
assert result
def test_successfully_create(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_demand_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_demand_mock_object('task').apply()
assert not exc.value.args[0]['changed']
def test_successfully_delete(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_demand_mock_object('task').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_demand_mock_object().apply()
assert not exc.value.args[0]['changed']
|
felipetomm/POX-Django | refs/heads/master | pox/topology/topology.py | 41 | # Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Topology module is the root of an object model composed of entities
like switches, hosts, links, etc. This object model is populated by other
modules. For example, openflow.topology populates the topology object
with OpenFlow switches.
Note that this means that you often want to invoke something like:
$ ./pox.py topology openflow.discovery openflow.topology
"""
from pox.lib.revent import *
from pox.core import core
from pox.lib.addresses import *
import traceback
import pickle
class EntityEvent (Event):
def __init__ (self, entity):
Event.__init__(self)
self.entity = entity
class EntityJoin (EntityEvent):
"""
An entity has been added.
Note that if there is a more specific join event defined for a particular
entity, (e.g., SwitchJoin), this event will not be fired.
TODO: or we could always raise EntityJoins along with SwitchJoins, which
seems more intuitive to me.
"""
pass
class EntityLeave (EntityEvent):
"""
An entity has been removed
Note that if there is a more specific leave event defined for a particular
entity, (e.g., SwitchLeave), this event will not be fired.
TODO: or we could always raise EntityLeaves along with SwitchLeaves, which
seems more intuitive to me.
"""
pass
class SwitchEvent (EntityEvent): pass
class SwitchJoin (SwitchEvent):
"""
As opposed to ConnectionUp, SwitchJoin occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
def __init__ (self, switch):
SwitchEvent.__init__(self, switch)
self.switch = switch
class SwitchLeave (SwitchEvent):
"""
As opposed to ConnectionDown, SwitchLeave occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
pass
class SwitchConnectionUp(SwitchEvent):
def __init__(self, switch, connection):
SwitchEvent.__init__(self, switch)
self.switch = switch
self.connection = connection
class SwitchConnectionDown(SwitchEvent): pass
class HostEvent (EntityEvent): pass
class HostJoin (HostEvent): pass
class HostLeave (HostEvent): pass
class Update (Event):
"""
Fired by Topology whenever anything has changed
"""
def __init__ (self, event=None):
Event.__init__(self)
self.event = event
class Entity (object):
"""
Note that the Entity class is intentionally simple; It only serves as a
convenient SuperClass type.
It's up to subclasses to implement specific functionality (e.g.
OpenFlow1.0 switch functionality). The purpose of this design decision
is to prevent protocol specific details from being leaked into this
module... but this design decision does /not/ imply that pox.toplogy
serves to define a generic interface to abstract entity types.
NOTE: /all/ subclasses must call this superconstructor, since
the unique self.id is field is used by Topology
"""
# This is a counter used so that we can get unique IDs for entities.
# Some entities don't need this because they have more meaningful
# identifiers.
_next_id = 101
_all_ids = set()
_tb = {}
def __init__ (self, id=None):
if id:
if id in Entity._all_ids:
print("".join(traceback.format_list(self._tb[id])))
raise Exception("ID %s already taken" % str(id))
else:
while Entity._next_id in Entity._all_ids:
Entity._next_id += 1
id = Entity._next_id
self._tb[id] = traceback.extract_stack()
Entity._all_ids.add(id)
self.id = id
def serialize(self):
return pickle.dumps(self, protocol = 0)
@classmethod
def deserialize(cls):
return pickle.loads(cls, protocol = 0)
class Host (Entity):
"""
A generic Host entity.
"""
def __init__(self,id=None):
Entity.__init__(self, id)
class Switch (Entity):
"""
Subclassed by protocol-specific switch classes,
e.g. pox.openflow.topology.OpenFlowSwitch
"""
def __init__(self, id=None):
# Switches often have something more meaningful to use as an ID
# (e.g., a DPID or MAC address), so they take it as a parameter.
Entity.__init__(self, id)
class Port (Entity):
def __init__ (self, num, hwAddr, name):
Entity.__init__(self)
self.number = num
self.hwAddr = EthAddr(hwAddr)
self.name = name
class Controller (Entity):
def __init__(self, name, handshake_complete=False):
self.id = name
# TODO: python aliases?
self.name = name
self.handshake_complete = handshake_complete
def handshake_completed(self):
self.handshake_complete = True
class Topology (EventMixin):
_eventMixin_events = [
SwitchJoin,
SwitchLeave,
HostJoin,
HostLeave,
EntityJoin,
EntityLeave,
Update
]
_core_name = "topology" # We want to be core.topology
def __init__ (self, name="topology"):
EventMixin.__init__(self)
self._entities = {}
self.name = name
self.log = core.getLogger(name)
# If a client registers a handler for these events after they have
# already occurred, we promise to re-issue them to the newly joined
# client.
self._event_promises = {
SwitchJoin : self._fulfill_SwitchJoin_promise
}
def getEntityByID (self, ID, fail=False):
"""
Raises an exception if fail is True and the entity doesn't exist
See also: The 'entity' property.
"""
if fail:
return self._entities[ID]
else:
return self._entities.get(ID, None)
def removeEntity (self, entity):
del self._entities[entity.id]
self.log.info(str(entity) + " left")
if isinstance(entity, Switch):
self.raiseEvent(SwitchLeave, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostLeave, entity)
else:
self.raiseEvent(EntityLeave, entity)
def addEntity (self, entity):
""" Will raise an exception if entity.id already exists """
if entity.id in self._entities:
raise RuntimeError("Entity exists")
self._entities[entity.id] = entity
self.log.debug(str(entity) + " (id: " + str(entity.id) + ") joined")
if isinstance(entity, Switch):
self.raiseEvent(SwitchJoin, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostJoin, entity)
else:
self.raiseEvent(EntityJoin, entity)
def getEntitiesOfType (self, t=Entity, subtypes=True):
if subtypes is False:
return [x for x in self._entities.itervalues() if type(x) is t]
else:
return [x for x in self._entities.itervalues() if isinstance(x, t)]
def addListener(self, eventType, handler, once=False, weak=False,
priority=None, byName=False):
"""
We interpose on EventMixin.addListener to check if the eventType is
in our promise list. If so, trigger the handler for all previously
triggered events.
"""
if eventType in self._event_promises:
self._event_promises[eventType](handler)
return EventMixin.addListener(self, eventType, handler, once=once,
weak=weak, priority=priority,
byName=byName)
def raiseEvent (self, event, *args, **kw):
"""
Whenever we raise any event, we also raise an Update, so we extend
the implementation in EventMixin.
"""
rv = EventMixin.raiseEvent(self, event, *args, **kw)
if type(event) is not Update:
EventMixin.raiseEvent(self, Update(event))
return rv
def serialize (self):
"""
Picklize our current entities.
Returns a hash: { id -> pickled entitiy }
"""
id2entity = {}
for id in self._entities:
entity = self._entities[id]
id2entity[id] = entity.serialize()
return id2entity
def deserializeAndMerge (self, id2entity):
"""
Given the output of topology.serialize(), deserialize each entity, and:
- insert a new Entry if it didn't already exist here, or
- update a pre-existing entry if it already existed
"""
for entity_id in id2entity.keys():
pickled_entity = id2entity[entity_id].encode('ascii', 'ignore')
entity = pickle.loads(pickled_entity)
entity.id = entity_id.encode('ascii', 'ignore')
try:
# Try to parse it as an int
entity.id = int(entity.id)
except ValueError:
pass
existing_entity = self.getEntityByID(entity.id)
if existing_entity:
self.log.debug("New metadata for %s: %s " % (str(existing_entity), str(entity)))
# TODO: define an Entity.merge method (need to do something about his update!)
else:
self.addEntity(entity)
def _fulfill_SwitchJoin_promise(self, handler):
""" Trigger the SwitchJoin handler for all pre-existing switches """
for switch in self.getEntitiesOfType(Switch, True):
handler(SwitchJoin(switch))
def __len__(self):
return len(self._entities)
def __str__(self):
# TODO: display me graphically
strings = []
strings.append("topology (%d total entities)" % len(self._entities))
for id,entity in self._entities.iteritems():
strings.append("%s %s" % (str(id), str(entity)))
return '\n'.join(strings)
|
osm-fr/osmose-backend | refs/heads/master | osmose_config.py | 3 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <[email protected]> 2009 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
# langue : http://fr.wikipedia.org/wiki/Liste_des_codes_ISO_639-1
# PAYS : http://fr.wikipedia.org/wiki/ISO_3166-1
import os
from collections import OrderedDict
import modules.config
from typing import Dict, Optional
###########################################################################
GEOFABRIK = u"http://download.geofabrik.de/"
OSMFR = u"http://download.openstreetmap.fr/extracts/"
OSMCH = u"https://planet.osm.ch/"
class template_config:
clean_at_end = True
updt_url = modules.config.url_frontend_update
dir_work = modules.config.dir_work
dir_tmp = modules.config.dir_tmp
dir_cache = modules.config.dir_cache
dir_scripts = modules.config.dir_osmose
bin_osmosis = modules.config.bin_osmosis
bin_pyosmium_up_to_date = modules.config.bin_pyosmium_up_to_date
osmosis_pre_scripts = [
dir_scripts + "/osmosis/pgsnapshot_schema_0.6.sql",
# dir_scripts + "/osmosis/osmosis-0.47.4/script/pgsnapshot_schema_0.6_bbox.sql",
dir_scripts + "/osmosis/osmosis-0.47.4/script/pgsnapshot_schema_0.6_linestring.sql",
dir_scripts + "/osmosis/CreateMetainfo.sql",
]
osmosis_import_scripts = [
dir_scripts + "/osmosis/ImportDatabase.sql",
]
osmosis_post_scripts = [
dir_scripts + "/osmosis/CreateTagsIndex.sql",
dir_scripts + "/osmosis/CreateFunctions.sql",
]
osmosis_change_init_post_scripts = [ # Scripts to run on database initialisation
dir_scripts + "/osmosis/pgsimple_schema_0.6_action_drop.sql",
dir_scripts + "/osmosis/osmosis-0.47.4/script/pgsnapshot_schema_0.6_action.sql",
]
osmosis_change_post_scripts = [ # Scripts to run each time the database is updated
dir_scripts + "/osmosis/CreateTouched.sql",
]
osmosis_resume_init_post_scripts = [ # Scripts to run on database initialisation
dir_scripts + "/osmosis/pgsimple_schema_0.6_action_drop.sql",
dir_scripts + "/osmosis/osmosis-0.47.4/script/pgsnapshot_schema_0.6_action.sql",
]
osmosis_resume_post_scripts = [ # Scripts to run each time the database is updated
dir_scripts + "/osmosis/ActionFromTimestamp.sql",
dir_scripts + "/osmosis/CreateTouched.sql",
]
dir_results = modules.config.dir_results
dir_extracts = modules.config.dir_extracts
dir_diffs = modules.config.dir_diffs
db_base: Optional[str] = 'osmose'
db_user: Optional[str] = 'osmose'
db_password: Optional[str] = '-osmose-'
db_host: Optional[str] = os.environ.get('DB_HOST', None) # Use socket by default
db_schema: Optional[str] = None
db_schema_path: Optional[str] = None
db_persistent = False
source_url = 'https://github.com/osm-fr/osmose-backend/blob/master'
def __init__(self, country, polygon_id=None, analyser_options=None, download_repo=GEOFABRIK):
config[country] = self
self.country = country
self.polygon_id = polygon_id # ID of a relation for the country boundary
self.download = {}
self.download_repo = download_repo
self.analyser: OrderedDict[str, str] = OrderedDict()
if analyser_options:
self.analyser_options = analyser_options
else:
self.analyser_options = {}
self.sql_post_scripts = [] # Scripts to run everytime, just before launching analysers
self.db_extension_check = []
self.analyser_updt_url = {}
def init(self):
if "diff" in self.download:
self.download["diff_path"] = os.path.join(self.dir_diffs, self.country)
if "url" in self.download and not "dst" in self.download:
ext = os.path.splitext(self.download["url"])[1]
for e in [".osm.pbf", ".osm.bz2", ".osm.gz"]:
if self.download["url"].endswith(e):
ext = e
break
self.download["dst"] = self.dir_extracts + "/" + self.country + ext
config: Dict[str, template_config] = OrderedDict()
###########################################################################
class default_simple(template_config):
def __init__(self, country, polygon_id=None, analyser_options=None, download_url=None, download_repo=None):
template_config.__init__(self, country, polygon_id, analyser_options, download_repo)
self.db_extension_check += ["fuzzystrmatch", "unaccent"]
self.download = {
"url": download_url
}
self.analyser["sax"] = "xxx"
self.analyser["osmosis_roundabout_reverse"] = "xxx"
self.analyser["osmosis_roundabout_level"] = "xxx"
self.analyser["osmosis_soundex"] = "xxx"
self.analyser["osmosis_roundabout"] = "xxx"
self.analyser["osmosis_boundary_hole"] = "xxx"
self.analyser["osmosis_building_overlaps"] = "xxx"
self.analyser["osmosis_polygon"] = "xxx"
self.analyser["osmosis_highway_vs_building"] = "xxx"
self.analyser["osmosis_orphan_nodes_cluster"] = "xxx"
self.analyser["osmosis_powerline"] = "xxx"
self.analyser["osmosis_double_tagging"] = "xxx"
self.analyser["osmosis_relation_associatedStreet"] = "xxx"
self.analyser["osmosis_highway_link"] = "xxx"
self.analyser["osmosis_highway_broken_level_continuity"] = "xxx"
self.analyser["osmosis_relation_large"] = "xxx"
self.analyser["osmosis_polygon_overlaps"] = "xxx"
self.analyser["osmosis_useless"] = "xxx"
self.analyser["osmosis_relation_multipolygon"] = "xxx"
self.analyser["osmosis_boundary_intersect"] = "xxx"
self.analyser["osmosis_node_like_way"] = "xxx"
self.analyser["osmosis_boundary_administrative"] = "xxx"
self.analyser["osmosis_tag_typo"] = "xxx"
self.analyser["osmosis_cycleway_track"] = "xxx"
self.analyser["osmosis_highway_features"] = "xxx"
self.analyser["osmosis_building_shapes"] = "xxx"
self.analyser["osmosis_highway_deadend"] = "xxx"
self.analyser["osmosis_boundary_relation"] = "xxx"
self.analyser["osmosis_highway_traffic_signals"] = "xxx"
self.analyser["osmosis_relation_restriction"] = "xxx"
self.analyser["osmosis_highway_tunnel_bridge"] = "xxx"
self.analyser["osmosis_waterway"] = "xxx"
self.analyser["osmosis_duplicated_geotag"] = "xxx"
self.analyser["osmosis_highway_noexit"] = "xxx"
self.analyser["osmosis_parking_highway"] = "xxx"
self.analyser["osmosis_highway_bad_intersection"] = "xxx"
self.analyser["osmosis_water"] = "xxx"
self.analyser["osmosis_relation_public_transport"] = "xxx"
self.analyser["osmosis_highway_turn_lanes"] = "xxx"
self.analyser["osmosis_highway_almost_junction"] = "xxx"
self.analyser["osmosis_highway_without_ref"] = "xxx"
self.analyser["osmosis_building_3nodes"] = "xxx"
self.analyser["osmosis_wikipedia"] = "xxx"
self.analyser["osmosis_highway_name_close"] = "xxx"
self.analyser["osmosis_relation_route_access"] = "xxx"
self.analyser["osmosis_highway_floating_islands"] = "xxx"
self.analyser["merge_traffic_signs"] = "xxx"
self.analyser["merge_street_objects"] = "xxx"
self.analyser["merge_street_objects2"] = "xxx"
self.analyser["osmosis_relation_enforcement"] = "xxx"
self.analyser["osmosis_addr_interpolation"] = "xxx"
self.analyser["osmosis_camp_pitch_out_of_camp_site"] = "xxx"
self.analyser["osmosis_relation_open"] = "xxx"
class default_country_simple(default_simple):
def __init__(self, part, country, polygon_id=None, analyser_options=None,
download_repo=GEOFABRIK, download_country=None):
part = part + '/' if part is not None else ''
if not download_country:
download_country = country
country = country.replace("-", "_").replace("/", "_")
analyser_options = dict({"project": "openstreetmap"}, **analyser_options)
default_simple.__init__(self, country, polygon_id, analyser_options, download_repo=download_repo)
self.download.update({
"url": self.download_repo + part + download_country + "-latest.osm.pbf",
"poly": self.download_repo + part + download_country + ".poly",
})
if download_repo == GEOFABRIK:
self.download["diff"] = self.download_repo + part + download_country + "-updates/"
self.download["state.txt"] = self.download["diff"] + "state.txt"
if download_repo == OSMFR:
self.download["poly"] = self.download["poly"].replace("/extracts/", "/polygons/")
self.download["diff"] = self.download_repo + "../replication/" + part + download_country + "/minute/"
self.download["state.txt"] = self.download_repo + part + download_country + ".state.txt"
if download_repo == OSMCH:
self.download["url"] = self.download_repo + download_country + "-padded.osm.pbf"
self.download["poly"] = self.download_repo + "switzerland-padded.poly"
self.download["diff"] = self.download_repo + "replication/hour/"
self.download["state.txt"] = self.download["diff"] + "state.txt"
class default_country(default_country_simple):
def __init__(self, part, country, polygon_id=None, analyser_options=None,
download_repo=GEOFABRIK, download_country=None):
default_country_simple.__init__(self, part, country, polygon_id, analyser_options,
download_repo, download_country)
self.analyser["osmosis_highway_cul-de-sac_level"] = "xxx"
self.analyser["osmosis_way_approximate"] = "xxx"
self.analyser["osmosis_highway_area_access"] = "xxx"
def gen_country(area, path_base=None,
country_base=None, country_code=None, download_repo=GEOFABRIK, include=[], exclude=[], **analyser_options_default):
area_default = area
path_base_default = path_base
country_base_default = country_base
country_base_default = country_base_default or path_base
country_code_default = country_code
download_repo_default = download_repo
include_default = include
exclude_default = exclude
def init(self, path, polygon_id=None, country_code=country_code_default,
area=area_default, country=None, path_base=path_base_default, country_base=country_base_default, download_repo=download_repo_default, include=[], exclude=[], **analyser_options):
ao = {'country': country_code}
ao.update(analyser_options_default)
ao.update(analyser_options)
path = path if isinstance(path, list) else [path]
country = (country or path[-1]).replace('-', '_')
download_country = '/'.join(filter(lambda a: a is not None, [path_base] + path))
default_country.__init__(self, area, country_base + '_' + country, polygon_id, ao, download_repo, download_country)
for analyser in include_default + include:
self.analyser[analyser] = 'xxx'
for analyser in exclude_default + exclude:
del(self.analyser[analyser])
class gen(default_country):
__init__ = init
return gen
france_departement = gen_country('europe', 'france', download_repo=OSMFR, language='fr', proj=2154, municipality_ref='ref:INSEE',
phone_code='33', phone_len=9, phone_format=r'^([+]%s([- ./]*[0-9]){8}[0-9])|[0-9]{4}|[0-9]{6}$', phone_international='00', phone_local_prefix='0',
include=[
'osmosis_building_geodesie_FR',
'osmosis_natural_swimming-pool',
'osmosis_fantoir',
'osmosis_highway_motorway',
'osmosis_highway_zone',
'merge_milestone_FR_metropole',
'merge_shop_FR',
], **{'addr:city-admin_level': '8,9'})
france_departement("alsace/bas_rhin", 7415, "FR-67", include=[
'merge_defibrillators_FR_basrhin',
])
france_departement("alsace/haut_rhin", 7403, "FR-68")
include_aquitaine = [
# Aquitiane
'merge_tourism_FR_aquitaine_camp_caravan',
'merge_tourism_FR_aquitaine_museum',
'merge_sport_FR_aquitaine_equestrian',
'merge_library_FR_aquitaine',
'merge_winery_FR_aquitaine',
]
france_departement("aquitaine/dordogne", 7375, "FR-24", include=include_aquitaine)
france_departement("aquitaine/gironde", 7405, "FR-33", include=include_aquitaine + [
# Bordeaux
'merge_recycling_FR_bm',
'merge_parking_FR_bm',
'merge_bicycle_rental_FR_bm',
'merge_cycle_parking_FR_bm',
'merge_public_equipment_FR_bordeaux_toilets',
'merge_public_transport_FR_tbm',
# Gironde
'merge_public_transport_FR_transgironde',
])
france_departement("aquitaine/landes", 7376, "FR-40", include=include_aquitaine)
france_departement("aquitaine/lot_et_garonne", 1284995, "FR-47", include=include_aquitaine)
france_departement("aquitaine/pyrenees_atlantiques", 7450, "FR-64", include=include_aquitaine + [
# Pau
'merge_recycling_FR_capp_glass',
'merge_recycling_FR_capp_clothes',
'merge_parking_FR_capp',
'merge_bicycle_parking_FR_capp',
])
france_departement("auvergne/allier", 1450201, "FR-03")
france_departement("auvergne/cantal", 7381, "FR-15")
france_departement("auvergne/haute_loire", 7452, "FR-43")
france_departement("auvergne/puy_de_dome", 7406, "FR-63")
france_departement("basse_normandie/calvados", 7453, "FR-14")
france_departement("basse_normandie/manche", 7404, "FR-50")
france_departement("basse_normandie/orne", 7419, "FR-61")
france_departement("bourgogne/cote_d_or", 7424, "FR-21")
france_departement("bourgogne/nievre", 7448, "FR-58")
france_departement("bourgogne/saone_et_loire", 7397, "FR-71", include=[
# Saône-et-Loire
'merge_hydrants_FR',
])
france_departement("bourgogne/yonne", 7392, "FR-89")
france_departement("bretagne/cotes_d_armor", 7398, "FR-22")
france_departement("bretagne/ille_et_vilaine", 7465, "FR-35", include=[
# Rennes
'merge_public_equipment_FR_rennes_toilets',
'merge_public_transport_FR_star',
'merge_defibrillators_FR_montfort',
'merge_defibrillators_FR_saintmalo',
])
france_departement("bretagne/finistere", 102430, "FR-29")
france_departement("bretagne/morbihan", 7447, "FR-56", include=[
'merge_defibrillators_FR_lorient',
])
france_departement("centre/cher", 7456, "FR-18")
france_departement("centre/eure_et_loir", 7374, "FR-28")
france_departement("centre/indre", 7417, "FR-36")
france_departement("centre/indre_et_loire", 7408, "FR-37")
france_departement("centre/loir_et_cher", 7399, "FR-41")
france_departement("centre/loiret", 7440, "FR-45")
france_departement("champagne_ardenne/ardennes", 7395, "FR-08")
france_departement("champagne_ardenne/aube", 7441, "FR-10")
france_departement("champagne_ardenne/marne", 7379, "FR-51")
france_departement("champagne_ardenne/haute_marne", 7396, "FR-52")
france_departement("corse/corse_du_sud", 76932, "FR-2A")
france_departement("corse/haute_corse", 76931, "FR-2B")
france_departement("franche_comte/doubs", 7462, "FR-25")
france_departement("franche_comte/jura", 7460, "FR-39")
france_departement("franche_comte/haute_saone", 7423, "FR-70")
france_departement("franche_comte/territoire_de_belfort", 7410, "FR-90")
france_departement("haute_normandie/eure", 7435, "FR-27")
france_departement("haute_normandie/seine_maritime", 7426, "FR-76", include=[
# Le Havre
'merge_public_equipment_FR_lehavre_toilets',
])
include_ile_de_france = [
# Île-de-france
'merge_public_transport_FR_stif',
'merge_bicycle_rental_FR_IDF',
'merge_parking_FR_IDF',
]
france_departement("ile_de_france/paris", 71525, "FR-75", include=include_ile_de_france + [
# Paris
'merge_bicycle_parking_FR_paris',
'merge_defibrillators_FR_paris',
], exclude=[
'merge_shop_FR',
])
france_departement("ile_de_france/hauts_de_seine", 7449, "FR-92", include=include_ile_de_france + [
# Hauts-de-Seine
'merge_restriction_FR_92',
'merge_defibrillators_FR_issylesmoulineaux',
])
france_departement("ile_de_france/seine_saint_denis", 7389, "FR-93", include=include_ile_de_france)
france_departement("ile_de_france/val_de_marne", 7458, "FR-94", include=include_ile_de_france)
france_departement("ile_de_france/essonne", 7401, "FR-91", include=include_ile_de_france)
france_departement("ile_de_france/seine_et_marne", 7383, "FR-77", include=include_ile_de_france)
france_departement("ile_de_france/val_d_oise", 7433, "FR-95", include=include_ile_de_france)
france_departement("ile_de_france/yvelines", 7457, "FR-78", include=include_ile_de_france)
france_departement("languedoc_roussillon/aude", 7446, "FR-11")
france_departement("languedoc_roussillon/gard", 7461, "FR-30")
france_departement("languedoc_roussillon/herault", 7429, "FR-34", include=[
# Montpellier
'merge_public_equipment_FR_montpellier_toilets',
])
france_departement("languedoc_roussillon/lozere", 7421, "FR-48")
france_departement("languedoc_roussillon/pyrenees_orientales", 7466, "FR-66")
france_departement("limousin/correze", 7464, "FR-19")
france_departement("limousin/creuse", 7459, "FR-23")
france_departement("limousin/haute_vienne", 7418, "FR-87")
france_departement("lorraine/meurthe_et_moselle", 51856, "FR-54", include=[
# Nancy
'merge_public_transport_FR_stan',
])
france_departement("lorraine/meuse", 7382, "FR-55")
france_departement("lorraine/moselle", 51854, "FR-57")
france_departement("lorraine/vosges", 7384, "FR-88")
france_departement("midi_pyrenees/ariege", 7439, "FR-09")
france_departement("midi_pyrenees/aveyron", 7451, "FR-12")
france_departement("midi_pyrenees/haute_garonne", 7413, "FR-31", include=[
# Toulouse
'merge_public_equipment_FR_toulouse_toilets',
'merge_defibrillators_FR_toulouse',
'merge_defibrillators_FR_cugnaux',
])
france_departement("midi_pyrenees/gers", 7422, "FR-32", include=[
'merge_defibrillators_FR_gers',
])
france_departement("midi_pyrenees/lot", 7454, "FR-46")
france_departement("midi_pyrenees/hautes_pyrenees", 7467, "FR-65")
france_departement("midi_pyrenees/tarn", 7442, "FR-81")
france_departement("midi_pyrenees/tarn_et_garonne", 7388, "FR-82")
france_departement("nord_pas_de_calais/nord", 7400, "FR-59")
france_departement("nord_pas_de_calais/pas_de_calais", 7394, "FR-62")
france_departement("pays_de_la_loire/loire_atlantique", 7432, "FR-44", include=[
# Nantes
'merge_recycling_FR_nm_glass',
'merge_public_equipment_FR_nantes_toilets',
'merge_recycling_FR_csma',
'merge_waste_disposal_FR_csma',
])
france_departement("pays_de_la_loire/maine_et_loire", 7409, "FR-49", include=[
# Angers
'merge_public_equipment_FR_angers_toilets',
])
france_departement("pays_de_la_loire/mayenne", 7438, "FR-53")
france_departement("pays_de_la_loire/sarthe", 7443, "FR-72")
france_departement("pays_de_la_loire/vendee", 7402, "FR-85")
france_departement("picardie/aisne", 7411, "FR-02")
france_departement("picardie/oise", 7427, "FR-60")
france_departement("picardie/somme", 7463, "FR-80")
france_departement("poitou_charentes/charente", 7428, "FR-16")
france_departement("poitou_charentes/charente_maritime", 7431, "FR-17")
france_departement("poitou_charentes/deux_sevres", 7455, "FR-79")
france_departement("poitou_charentes/vienne", 7377, "FR-86")
france_departement("provence_alpes_cote_d_azur/alpes_de_haute_provence", 7380, "FR-04")
france_departement("provence_alpes_cote_d_azur/hautes_alpes", 7436, "FR-05", include=[
'merge_defibrillators_FR_hautesalpes',
])
france_departement("provence_alpes_cote_d_azur/alpes_maritimes", 7385, "FR-06")
france_departement("provence_alpes_cote_d_azur/bouches_du_rhone", 7393, "FR-13")
france_departement("provence_alpes_cote_d_azur/var", 7390, "FR-83")
france_departement("provence_alpes_cote_d_azur/vaucluse", 7445, "FR-84")
france_departement("rhone_alpes/ain", 7387, "FR-01")
france_departement("rhone_alpes/ardeche", 7430, "FR-07")
france_departement("rhone_alpes/drome", 7434, "FR-26")
france_departement("rhone_alpes/isere", 7437, "FR-38")
france_departement("rhone_alpes/loire", 7420, "FR-42")
france_departement("rhone_alpes/rhone", 7378, "FR-69", include=[
# Lyon
'merge_public_equipment_FR_lyon_toilets',
])
france_departement("rhone_alpes/savoie", 7425, "FR-73")
france_departement("rhone_alpes/haute_savoie", 7407, "FR-74", include=[
# Annecy
'merge_public_transport_FR_sibra',
])
france_departement_dom = gen_country(None, country_base='france', download_repo=OSMFR, language='fr', municipality_ref='ref:INSEE',
phone_len=9, phone_format=r'^([+]%s([- ./]*[0-9]){8}[0-9])|[0-9]{4}|[0-9]{6}$', phone_international='00', phone_local_prefix='0',
include=[
'osmosis_building_geodesie_FR',
'osmosis_natural_swimming-pool',
'osmosis_fantoir',
'osmosis_highway_motorway',
'osmosis_highway_zone',
'merge_heritage_FR_merimee',
'merge_poste_FR',
'merge_school_FR',
'merge_college_FR',
'merge_service_public_FR',
'merge_pitch_FR',
'merge_police_FR_gn',
'merge_police_FR_pn',
'merge_healthcare_FR_finess',
'merge_postal_code_FR',
'merge_post_box_FR',
'merge_shop_FR',
'merge_wastewater_plant_FR',
'merge_museum_FR',
'merge_radio_support_FR',
'merge_defibrillators_FR',
'merge_defibrillators_FR_aedmap',
'merge_cemetery_FR',
'merge_man_made_FR',
'merge_poi_FR',
'merge_natural_FR',
'merge_reservoir_FR',
'merge_water_FR',
], **{'addr:city-admin_level': '8,9'})
france_departement_dom(["central-america", "guadeloupe"], 1401835, "FR-GP", dep_code=971, proj=32620, phone_code="590")
france_departement_dom(["south-america", "guyane"], 1260551, "FR-GF", dep_code=973, language='fr_GF', proj=2972, phone_code="594")
france_departement_dom(["central-america", "martinique"], 1891495, "FR-MQ", dep_code=972, proj=32620, phone_code="596")
france_departement_dom(["africa", "mayotte"], 1259885, "FR-YT", dep_code=976, proj=32738, phone_code="262")
france_departement_dom(["africa", "reunion"], 1785276, "FR-RE", dep_code=974, proj=2975, phone_code="262")
france_com = gen_country(None, country_base='france', download_repo=OSMFR, language='fr', municipality_ref='ref:INSEE',
phone_len=9, phone_format=r'^([+]%s([- ./]*[0-9]){8}[0-9])|[0-9]{4}|[0-9]{6}$', phone_international='00', phone_local_prefix='0',
include=[
'merge_college_FR',
'merge_service_public_FR',
'merge_pitch_FR',
'merge_police_FR_gn',
'merge_police_FR_pn',
'merge_postal_code_FR',
'merge_radio_support_FR',
], **{'addr:city-admin_level': '8,9'})
france_com(["central-america", "saint_barthelemy"], 537967, "FR-BL", proj=2969, phone_code="590", country="saintbarthelemy")
france_com(["central-america", "saint_martin"], 1891583, "FR-MF", proj=2969, phone_code="590", country="saintmartin")
france_com(["north-america", "saint_pierre_et_miquelon"], 233377, "FR-PM", proj=32621, phone_code="508", country="saintpierreetmiquelon")
france_com(["oceania", "wallis_et_futuna"], 290162, "FR-WF", proj=32701, phone_code="681", country="wallisetfutuna")
france_com(["oceania", "polynesie"], 3412620, "FR-PF", language='fr_PF', proj=32706, phone_code="689", phone_format=None, phone_len=8, phone_len_short=6, phone_local_prefix=None, phone_international='00')
france_com(["australia-oceania", "new-caledonia"], 3407643, "NC", download_repo=GEOFABRIK, proj=3163, country="nouvellecaledonie",
phone_code="687", phone_len=6, phone_format=r"^[+]%s([- ./]*[0-9]){5}[0-9]$", phone_international='00')
default_country("merge", "france_taaf", 6063103, download_repo=OSMFR, analyser_options={"country": "TF", "language": "fr", "proj": 32738})
###########################################################################
france_local_db = template_config("france_local_db", 1403916, {"project": "openstreetmap", "country": "FR", "language": "fr", "proj": 2154})
france_local_db.db_persistent = True
france_local_db.db_base = "osm"
france_local_db.db_user = "osmose"
france_local_db.db_password = "clostAdtoi"
france_local_db.db_schema = "osmosis"
france_local_db.db_schema_path = "\"$user\",osmosis,public"
france_local_db.sql_post_scripts += [
france_local_db.dir_scripts + "/osmosis/CreateFunctions.sql",
france_local_db.dir_scripts + "/osmosis/CreateMergeAnalyserCache.sql",
]
france_local_db.download["diff_path"] = "/data/work/osmosis/" # path to find state.txt
france_local_db.analyser["merge_heritage_FR_merimee"] = "xxx"
france_local_db.analyser["merge_poste_FR"] = "xxx"
france_local_db.analyser["merge_school_FR"] = "xxx"
france_local_db.analyser["merge_railway_level_crossing_FR"] = "xxx"
france_local_db.analyser["merge_railway_railstation_FR"] = "xxx"
france_local_db.analyser["merge_tmc_point_FR"] = "xxx"
france_local_db.analyser["merge_geodesie"] = "xxx"
france_local_db.analyser["merge_college_FR"] = "xxx"
france_local_db.analyser["merge_service_public_FR"] = "xxx"
france_local_db.analyser["merge_pitch_FR"] = "xxx"
france_local_db.analyser["merge_police_FR_gn"] = "xxx"
france_local_db.analyser["merge_police_FR_pn"] = "xxx"
france_local_db.analyser["merge_fuel_FR"] = "xxx"
france_local_db.analyser["merge_healthcare_FR_finess"] = "xxx"
france_local_db.analyser["merge_postal_code_FR"] = "xxx"
france_local_db.analyser["merge_geodesie_support_FR"] = "xxx"
france_local_db.analyser["merge_post_box_FR"] = "xxx"
france_local_db.analyser["merge_power_plant_FR"] = "xxx"
france_local_db.analyser["merge_power_substation_FR"] = "xxx"
france_local_db.analyser["merge_power_tower_FR"] = "xxx"
france_local_db.analyser["merge_restriction_motorway_FR"] = "xxx"
france_local_db.analyser["merge_power_substation_minor_FR"] = "xxx"
france_local_db.analyser["merge_wastewater_plant_FR"] = "xxx"
france_local_db.analyser["merge_museum_FR"] = "xxx"
france_local_db.analyser["merge_radio_support_FR"] = "xxx"
france_local_db.analyser["merge_carpool_FR"] = "xxx"
france_local_db.analyser["merge_charging_station_FR"] = "xxx"
france_local_db.analyser["merge_parking_FR_BNLS"] = "xxx"
france_local_db.analyser["merge_tourism_FR"] = "xxx"
france_local_db.analyser["merge_cemetery_FR"] = "xxx"
france_local_db.analyser["merge_man_made_FR"] = "xxx"
france_local_db.analyser["merge_poi_FR"] = "xxx"
france_local_db.analyser["merge_natural_FR"] = "xxx"
france_local_db.analyser["merge_reservoir_FR"] = "xxx"
france_local_db.analyser["merge_water_FR"] = "xxx"
france_local_db.analyser["merge_defibrillators_FR"] = "xxx"
france_local_db.analyser["merge_defibrillators_FR_aedmap"] = "xxx"
#########################################################################
default_country("europe", "albania", 53292, {"country": "AL", "language": "sq", "proj": 32634})
default_country("europe", "andorra", 9407, {"country": "AD", "language": "ca", "proj": 2154})
default_country("europe", "belarus", 59065, {"country": "BY", "language": ["be", "ru"], "proj": 32635}, download_repo=GEOFABRIK)
default_country("europe", "bosnia-herzegovina", 2528142, {"country": "BA", "language": ["bs", "hr", "sr"], "proj": 32633}, download_repo=GEOFABRIK)
default_country("europe", "bulgaria", 186382, {"country": "BG", "language": "bg", "proj": 32635}, download_repo=GEOFABRIK)
default_country("europe", "croatia", 214885, {"country": "HR", "language": "hr", "proj": 32633}, download_repo=GEOFABRIK)
default_country("europe", "estonia", 79510, {"country": "EE", "language": "et", "proj": 32634}, download_repo=GEOFABRIK)
default_country("europe", "cyprus", 307787, {"country": "CY", "language": ["el", "tr", "en"], "driving_side": "left", "proj": 32636})
default_country("europe", "faroe-islands", 52939, {"country": "FO", "language": "fo", "proj": 2169})
default_country("europe", "greece", 192307, {"country": "GR", "language": "el","proj": 32635}, download_repo=GEOFABRIK)
default_country("europe", "guernesey", 270009, {"country": "GG", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32630}, download_repo=OSMFR)
default_country("europe", "hungary", 21335, {"country": "HU", "language": "hu", "proj": 32633}, download_repo=GEOFABRIK)
default_country("europe", "ireland", 62273, {"country": "IE", "driving_side": "left", "language": ["en", "ga"], "proj": 32629}, download_repo=OSMFR)
default_country("europe", "isle-of-man", 62269, {"country": "IM", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32630})
default_country("europe", "jersey", 367988, {"country": "JE", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32630}, download_repo=OSMFR)
default_country("europe", "kosovo", 2088990, {"country": "XK", "language": ["sq", "sr-Latn"], "proj": 32634, "multilingual-style": "xk"})
default_country("europe", "liechtenstein", 1155955, {"country": "LI", "language": "de", "proj": 32632})
lithuania = default_country("europe", "lithuania", 72596, {"country": "LT", "language": "lt", "proj": 32635, "osmosis_way_approximate": {"highway": ("motorway", "trunk", "primary", "secondary", "tertiary")}}, download_repo=GEOFABRIK)
del(lithuania.analyser["osmosis_highway_cul-de-sac_level"]) # follow official highway classification
del(lithuania.analyser["osmosis_highway_broken_level_continuity"]) # follow official highway classification
default_country("europe", "latvia", 72594, {"country": "LV","language": "lv", "proj": 32634}, download_repo=GEOFABRIK)
luxembourg = default_country("europe", "luxembourg", 2171347, {"country": "LU", "language": "fr_LU", "proj": 2169, "boundary_detail_level": 6})
luxembourg.analyser["merge_emergency_points_LU"] = "xxx"
default_country("europe", "malta", 365307, {"country": "MT", "language": "en", "driving_side": "left", "proj": 32633})
default_country("europe", "macedonia", 53293, {"country": "MK", "language": "sq", "proj": 32634})
default_country("europe", "moldova", 58974, {"country": "MD", "language": "ro", "proj": 32635}, download_repo=GEOFABRIK)
default_country("europe", "monaco", 1124039, {"country": "MC", "language": "fr", "proj": 2154, "phone_code": '377', "phone_len": 8, "phone_format": r'^[+]%s([- ./]*[469])([- ./]*[0-9]){6}[0-9]$', "phone_international": '00'}, download_repo=OSMFR)
default_country("europe", "montenegro", 53296, {"country": "ME", "proj": 32634})
default_country("europe", "romania", 90689, {"country": "RO", "language": "ro", "proj": 31700})
default_country("europe", "san_marino", 54624, {"country": "SM", "language": "it", "proj": 23032}, download_repo=OSMFR)
default_country("europe", "serbia", 1741311, {"country": "RS", "language": "sr", "proj": 32634}, download_repo=GEOFABRIK)
default_country("europe", "slovenia", 218657, {"country": "SI", "language": ["sl", "hu", "it"], "proj": 32633}, download_repo=GEOFABRIK)
default_country("europe", "turkey", 174737, {"country": "TR", "language": "tr", "proj": 32636}, download_repo=GEOFABRIK)
default_country("europe", "vatican_city", 36989, {"country": "VA", "language": "it", "proj": 23032}, download_repo=OSMFR)
default_country("europe", "united_kingdom_akrotiri_and_dhekelia", 3263728, {"country": "GB", "language": ["en", "he"], "driving_side": "left", "proj": 32636}, download_country="cyprus") # British Sovereign Base in Cyprus
default_country("europe", "united_kingdom_gibraltar", 1278736, {"country": "GI", "language": "en", "proj": 32630}, download_repo=OSMFR, download_country="gibraltar")
default_country("europe", "united_kingdom_northern_ireland", 156393, {"country": "GB-NIR", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32629}, download_repo=OSMFR, download_country="united_kingdom/northern_ireland")
default_country("europe", "united_kingdom_wales", 58437, {"country": "GB-WLS", "language": ["en", "cy"], "driving_side": "left", "speed_limit_unit": "mph", "proj": 32630}, download_repo=GEOFABRIK, download_country="great-britain/wales")
default_country("europe", "united_kingdom_scotland", 58446, {"country": "GB-SCT", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32630}, download_repo=GEOFABRIK, download_country="great-britain/scotland")
iceland = default_country("europe","iceland", 299133, {"country": "IS", "language": "is", "proj": 32627}) # 299133
iceland.download["url"] = ""
default_country("europe", "denmark", 50046, {"country": "DK", "language": "da","proj": 32632, "phone_code": '45', "phone_len": 8, "phone_international": '00'}, download_repo=GEOFABRIK)
#########################################################################
be_part = gen_country('europe', 'belgium', download_repo=OSMFR, proj=32631, municipality_ref='ref:INS',
phone_code='32', phone_len=[8, 9], phone_len_short=4, phone_international='00', phone_local_prefix='0')
be_part('brussels_capital_region', 54094, 'BE-BRU', language=['fr', 'nl'], **{'multilingual-style': 'be'})
be_part('flanders', 53134, 'BE-VLG', language='nl')
be_part('wallonia_french_community', 2620920, 'BE-WAL', language='fr')
be_part('wallonia_german_community', 2425209, 'BE-WAL', language='de')
#########################################################################
se_part = gen_country('europe', 'sweden', download_repo=OSMFR, proj=32633, language='sv')
se_part('stockholm', 54391, 'SE-AB')
se_part('vasterbotten', 52825, 'SE-AC')
se_part('norrbotten', 52824, 'SE-BD')
se_part('uppsala', 54220, 'SE-C')
se_part('sodermanland', 54386, 'SE-D')
se_part('ostergotland', 940675, 'SE-E')
se_part('jonkoping', 54374, 'SE-F')
se_part('kronoberg', 54412, 'SE-G')
se_part('kalmar', 54417, 'SE-H')
se_part('gotland', 941530, 'SE-I')
se_part('blekinge', 54413, 'SE-K')
se_part('skane', 54409, 'SE-M')
se_part('halland', 54403, 'SE-N')
se_part('vastra_gotaland', 54367, 'SE-O')
se_part('varmland', 54223, 'SE-S')
se_part('orebro', 54222, 'SE-T')
se_part('vastmanland', 54221, 'SE-U')
se_part('dalarna', 52834, 'SE-W')
se_part('gavleborg', 52832, 'SE-X')
se_part('vasternorrland', 52827, 'SE-Y')
se_part('jamtland', 52826, 'SE-Z')
#########################################################################
ch_part = gen_country('europe', 'switzerland', download_repo=OSMFR, proj=2056, municipality_ref=['swisstopo:BFS_NUMMER', 'swisstopo:BEZIRKSNUM'],
phone_code='41', phone_len=9, phone_international='00', phone_local_prefix='0')
ch_part('aargau', 1686359, 'CH-AG', language='de')
ch_part('appenzell_ausserrhoden', 1686649, 'CH-AR', language='de')
ch_part('appenzell_innerrhoden', 1686666, 'CH-AI', language='de')
ch_part('basel_landschaft', 1686366, 'CH-BL', language='de')
ch_part('basel_stadt', 1699639, 'CH-BS', language='de')
ch_part('bern', 1686344, 'CH-BE', language=['de', 'fr'])
ch_part('fribourg', 1698314, 'CH-FR', language=['fr', 'de'])
ch_part('geneva', 1702419, 'CH-GE', language='fr')
ch_part('glarus', 1685673, 'CH-GL', language='de')
ch_part('grisons', 1686631, 'CH-GR', language=['de', 'it', 'rm'])
ch_part('jura', 1697347, 'CH-JU', language='fr')
ch_part('lucerne', 1685677, 'CH-LU', language='de')
ch_part('neuchatel', 1702420, 'CH-NE', language='fr')
ch_part('nidwalden', 1686449, 'CH-NW', language='de')
ch_part('obwalden', 1686448, 'CH-OW', language='de')
ch_part('schaffhausen', 1696112, 'CH-SH', language='de')
ch_part('schwyz', 1688583, 'CH-SZ', language='de')
ch_part('solothurn', 1701133, 'CH-SO', language='de')
ch_part('saint_gallen', 1687006, 'CH-SG', language='de')
ch_part('thurgau', 1693811, 'CH-TG', language='de')
ch_part('ticino', 1687730, 'CH-TI', language='it')
ch_part('uri', 1693971, 'CH-UR', language='de')
ch_part('valais', 1686699, 'CH-VS', language=['fr', 'de'])
ch_part('vaud', 1702421, 'CH-VD', language='fr')
ch_part('zug', 1686447, 'CH-ZG', language='de')
ch_part('zurich', 1690227, 'CH-ZH', language='de')
#########################################################################
fi_part = gen_country('europe', 'finland', download_repo=OSMFR, proj=32635)
fi_part('lapland', 2541341, 'FI-10', language="fi")
fi_part('north_ostrobothnia', 1724360, 'FI-14', language="fi")
fi_part('kainuu', 1997164, 'FI-05', language="fi")
fi_part('north_karelia', 1999428, 'FI-13', language="fi")
fi_part('north_savo', 918898, 'FI-15', language="fi")
fi_part('south_savo', 918897, 'FI-04', language="fi")
fi_part('south_karelia', 2067231, 'FI-02', language="fi")
fi_part('central_finland', 1701740, 'FI-08', language="fi")
fi_part('south_ostrobothnia', 1702263, 'FI-03', language="fi")
fi_part('ostrobothnia', 2000320, 'FI-12', language=["fi", "sv"])
fi_part('central_ostrobothnia', 1702330, 'FI-07', language=["fi", "sv"])
fi_part('pirkanmaa', 1701741, 'FI-11', language="fi")
fi_part('satakunta', 2000361, 'FI-17', language="fi")
fi_part('paijat_hame', 1703362, 'FI-16', language="fi")
fi_part('kanta_hame', 1473990, 'FI-06', language="fi")
fi_part('kymenlaakso', 2102313, 'FI-09', language="fi")
fi_part('uusimaa', 37355, 'FI-19', language=["fi", "sv"])
fi_part('southwest_finland', 38092, 'FI-19', language=["fi", "sv"])
fi_part('aland', 1650407, 'AX', language="sv")
#########################################################################
default_country("europe", "portugal", 295480, {"country": "PT", "language": "pt", "proj": 32629}, download_repo=GEOFABRIK)
pt_part = gen_country('europe', 'portugal', download_repo=OSMFR, language='pt')
pt_part('azores', 6451096, 'PT', proj=32627)
pt_part('madeira', 6451097, 'PT', proj=32628)
#########################################################################
ua_oblasts = gen_country('europe', 'ukraine', download_repo=OSMFR, language='uk', proj=32636)
ua_oblasts('cherkasy_oblast', 91278, 'UA-71')
ua_oblasts('chernihiv_oblast', 71249, 'UA-74')
ua_oblasts('chernivtsi_oblast', 72526, 'UA-77')
ua_oblasts('dnipropetrovsk_oblast', 101746, 'UA-12')
ua_oblasts('donetsk_oblast', 71973, 'UA-14')
ua_oblasts('ivano-frankivsk_oblast', 72488, 'UA-26')
ua_oblasts('kharkiv_oblast', 71254, 'UA-63')
ua_oblasts('kherson_oblast', 71022, 'UA-65')
ua_oblasts('khmelnytskyi_oblast', 90742, 'UA-68')
ua_oblasts('kiev_oblast', 71248, 'UA-32')
ua_oblasts('kiev', 421866, 'UA-30')
ua_oblasts('kirovohrad_oblast', 101859, 'UA-35')
ua_oblasts('luhansk_oblast', 71971, 'UA-09')
ua_oblasts('lviv_oblast', 72380, 'UA-46')
ua_oblasts('mykolaiv_oblast', 72635, 'UA-48')
ua_oblasts('odessa_oblast', 72634, 'UA-51')
ua_oblasts('poltava_oblast', 91294, 'UA-53')
ua_oblasts('rivne_oblast', 71236, 'UA-56')
ua_oblasts('sumy_oblast', 71250, 'UA-59')
ua_oblasts('ternopil_oblast', 72525, 'UA-61')
ua_oblasts('vinnytsia_oblast', 90726, 'UA-05')
ua_oblasts('volyn_oblast', 71064, 'UA-07')
ua_oblasts('zakarpattia_oblast', 72489, 'UA-21')
ua_oblasts('zaporizhia_oblast', 71980, 'UA-23')
ua_oblasts('zhytomyr_oblast', 71245, 'UA-18')
#########################################################################
no_county = gen_country('europe', 'norway', download_repo=OSMFR, language='no', proj=32632)
no_county('nordland', 408105, 'NO-18')
no_county('troms', 407717, 'NO-19')
no_county('finnmark', 406389, 'NO-20')
no_county('troendelag', 406567, 'NO-23')
no_county('moere_og_romsdal', 406868, 'NO-15')
no_county('sogn_og_fjordane', 407787, 'NO-14')
no_county('hordaland', 404144, 'NO-12')
no_county('rogaland', 405836, 'NO-11')
no_county('aust-agder', 406015, 'NO-09')
no_county('vest-agder', 405929, 'NO-10')
no_county('oslo', 406091, 'NO-03')
no_county('akershus', 406106, 'NO-02')
no_county('oestfold', 406060, 'NO-01')
no_county('vestfold', 404589, 'NO-07')
no_county('telemark', 405156, 'NO-08')
no_county('buskerud', 412297, 'NO-06')
no_county('oppland', 412377, 'NO-05')
no_county('hedmark', 412436, 'NO-04')
no_county('svalbard', 1337397, 'SJ')
no_county('jan_mayen', 1337126, 'SJ')
#########################################################################
default_country_simple("", "antarctica", None, {"proj": 3031}, download_repo=GEOFABRIK)
#########################################################################
default_country("north-america", "greenland", 2184073, {"country": "GL", "language": "kl", "proj": 3184})
default_country("north-america", "united_kingdom_bermuda", 1993208, {"country": "BM", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR, download_country="bermuda")
#########################################################################
mexico_state = gen_country('north-america', 'mexico', download_repo=OSMFR, language='es', proj=32614, exclude=[
"osmosis_highway_name_close" # Complicated Street Numbering
])
mexico_state("aguascalientes", 2610002, "MX-AGU")
mexico_state("baja_california", 2589601, "MX-BCN")
mexico_state("baja_california_sur", 2589611, "MX-BCS")
mexico_state("campeche", 2568834, "MX-CAM")
mexico_state("chiapas", 2556679, "MX-CHP")
mexico_state("chihuahua", 1673425, "MX-CHH")
mexico_state("coahuila", 1661524, "MX-COA")
mexico_state("colima", 2340912, "MX-COL")
mexico_state("durango", 2399740, "MX-DUR")
mexico_state("guanajuato", 2340909, "MX-GUA")
mexico_state("guerrero", 2439316, "MX-GRO")
mexico_state("hidalgo", 1376490, "MX-HID")
mexico_state("jalisco", 2340910, "MX-JAL")
mexico_state("mexico_city", 1376330, "MX-CMX")
mexico_state("michoacan", 2340636, "MX-MIC")
mexico_state("morelos", 1376332, "MX-MOR")
mexico_state("nayarit", 7695827, "MX-NAY")
mexico_state("nuevo_leon", 1661523, "MX-NLE")
mexico_state("oaxaca", 2529822, "MX-OAX")
mexico_state("puebla", 1376491, "MX-PUE")
mexico_state("queretaro", 2340903, "MX-QUE")
mexico_state("quintana_roo", 2614434, "MX-ROO")
mexico_state("san_luis_potosi", 4086617, "MX-SLP")
mexico_state("sinaloa", 2455086, "MX-SIN")
mexico_state("sonora", 1673426, "MX-SON")
mexico_state("state_of_mexico", 1376489, "MX-MEX")
mexico_state("tabasco", 2556680, "MX-TAB")
mexico_state("tamaulipas", 2415518, "MX-TAM")
mexico_state("tlaxcala", 1375274, "MX-TLA")
mexico_state("veracruz", 2415761, "MX-VER")
mexico_state("yucatan", 2614435, "MX-YUC")
mexico_state("zacatecas", 2399704, "MX-ZAC")
#########################################################################
us_state = gen_country('north-america/us', country_base='usa', language='en', speed_limit_unit='mph')
us_state("alabama", 161950, "US-AL", proj=26916)
us_state("alaska", 1116270, "US-AK", proj=26905)
us_state("arizona", 162018, "US-AZ", proj=26912)
us_state("arkansas", 161646, "US-AR", proj=26715)
us_ca_county = gen_country('north-america/us-west/california', country_base='usa_california', download_repo=OSMFR, language='en', proj=26910)
us_ca_county("alameda", 396499, "US-CA-ALA")
us_ca_county("alpine", 396497, "US-CA-ALP")
us_ca_county("amador", 396490, "US-CA-AMA")
us_ca_county("butte", 396508, "US-CA-BUT")
us_ca_county("calaveras", 396470, "US-CA-CAL")
us_ca_county("colusa", 396476, "US-CA-COL")
us_ca_county("contra_costa", 396462, "US-CA-CON")
us_ca_county("del_norte", 396503, "US-CA-DEL")
us_ca_county("el_dorado", 396481, "US-CA-ELD")
us_ca_county("fresno", 396492, "US-CA-FRE")
us_ca_county("glenn", 396493, "US-CA-GLE")
us_ca_county("humboldt", 396458, "US-CA-HUM")
us_ca_county("imperial", 396515, "US-CA-IMP")
us_ca_county("inyo", 396491, "US-CA-INY")
us_ca_county("kern", 396494, "US-CA-KER")
us_ca_county("kings", 396480, "US-CA-KIN")
us_ca_county("lake", 396502, "US-CA-LAK")
us_ca_county("lassen", 396469, "US-CA-LAS")
us_ca_county("los_angeles", 396479, "US-CA-LOS")
us_ca_county("madera", 396488, "US-CA-MAD")
us_ca_county("marin", 396461, "US-CA-MRN")
us_ca_county("mariposa", 396465, "US-CA-MP")
us_ca_county("mendocino", 396489, "US-CA-MEN")
us_ca_county("merced", 396504, "US-CA-MER")
us_ca_county("modoc", 396506, "US-CA-MOD")
us_ca_county("mono", 396472, "US-CA-MNO")
us_ca_county("monterey", 396485, "US-CA-MNT")
us_ca_county("napa", 396463, "US-CA-NAP")
us_ca_county("nevada", 396464, "US-CA-NEV")
us_ca_county("orange", 396466, "US-CA-ORA")
us_ca_county("placer", 396511, "US-CA-PLA")
us_ca_county("plumas", 396477, "US-CA-PLU")
us_ca_county("riverside", 396495, "US-CA-RIV")
us_ca_county("sacramento", 396460, "US-CA-SAC")
us_ca_county("san_benito", 396500, "US-CA-SBT")
us_ca_county("san_bernardino", 396509, "US-CA-SBD")
us_ca_county("san_diego", 396482, "US-CA-SDG")
us_ca_county("san_francisco", 396487, "US-CA-SFO")
us_ca_county("san_joaquin", 396467, "US-CA-SJQ")
us_ca_county("san_luis_obispo", 396496, "US-CA-SLO")
us_ca_county("san_mateo", 396498, "US-CA-SMT")
us_ca_county("santa_barbara", 396510, "US-CA-SBA")
us_ca_county("santa_clara", 396501, "US-CA-SCL")
us_ca_county("santa_cruz", 7870163, "US-CA-SCZ")
us_ca_county("shasta", 396512, "US-CA-SHA")
us_ca_county("sierra", 396474, "US-CA-SIE")
us_ca_county("siskiyou", 396483, "US-CA-SIS")
us_ca_county("solano", 396513, "US-CA-SOL")
us_ca_county("sonoma", 396468, "US-CA-SON")
us_ca_county("stanislaus", 396514, "US-CA-STA")
us_ca_county("sutter", 396478, "US-CA-SUT")
us_ca_county("tehama", 396486, "US-CA-TEH")
us_ca_county("trinity", 396484, "US-CA-TRI")
us_ca_county("tulare", 396459, "US-CA-TUL")
us_ca_county("tuolumne", 396471, "US-CA-TUO")
us_ca_county("ventura", 396505, "US-CA-VEN")
us_ca_county("yolo", 396507, "US-CA-YOL")
us_ca_county("yuba", 396475, "US-CA-YUB")
us_state("colorado", 161961, "US-CO", proj=26713)
us_state("connecticut", 165794, "US-CT", proj=3507)
us_state("delaware", 162110, "US-DE", proj=3509)
us_state("district-of-columbia", 162069, "US-DC", proj=3559)
us_state("florida", 162050, "US-FL", proj=3513)
us_state("georgia", 161957, "US-GA", proj=26917)
us_state("hawaii", 166563, "US-HI", proj=2783) # note: projection for hawaii is the one used for center islands, not for the whole
us_state("idaho", 162116, "US-ID", proj=3741)
us_state("illinois", 122586, "US-IL", proj=3746)
us_state("indiana", 161816, "US-IN", proj=3745)
us_state("iowa", 161650, "US-IA", proj=3745)
us_state("kansas", 161644, "US-KS", proj=3744)
us_state("kentucky", 161655, "US-KY", proj=3088)
us_state("louisiana", 224922, "US-LA", proj=3745, exclude=[
'osmosis_waterway', # Too many swamp, not suitable
])
us_state("maine", 63512, "US-ME", proj=3749)
us_state("maryland", 162112, "US-MD", proj=26985)
us_state("massachusetts", 61315, "US-MA", proj=2805)
us_state("michigan", 165789, "US-MI", proj=3746)
us_state("minnesota", 165471, "US-MN", proj=26992)
us_state("mississippi", 161943, "US-MS", proj=3816)
us_state("missouri", 161638, "US-MO", proj=3601)
us_state("montana", 162115, "US-MT", proj=3604)
us_state("nebraska", 161648, "US-NE", proj=3606)
us_state("nevada", 165473, "US-NV", proj=3607)
us_state("new-hampshire", 67213, "US-NH", proj=3613)
us_state("new-jersey", 224951, "US-NJ", proj=3615)
us_state("new-mexico", 162014, "US-NM", proj=3617)
us_state("new-york", 61320, "US-NY", proj=3623)
us_state("north-carolina", 224045, "US-NC", proj=3631)
us_state("north-dakota", 161653, "US-ND", proj=3633)
us_state("ohio", 162061, "US-OH", proj=26917)
us_state("oklahoma", 161645, "US-OK", proj=3639)
us_state("oregon", 165476, "US-OR", proj=3643)
us_state("pennsylvania", 162109, "US-PA", proj=3651)
us_state("rhode-island", 392915, "US-RI", proj=3653)
us_state("south-carolina", 224040, "US-SC", proj=3655)
us_state("south-dakota", 161652, "US-SD", proj=3659)
us_state("tennessee", 161838, "US-TN", proj=3661)
us_state("texas", 114690, "US-TX", proj=3082)
us_state("utah", 161993, "US-UT", proj=3675)
us_state("vermont", 60759, "US-VT", proj=3684)
us_state("virginia", 224042, "US-VA", proj=3968)
us_state("washington", 165479, "US-WA", proj=3725)
us_state("west-virginia",162068, "US-WV", proj=3747)
us_state("wisconsin", 165466, "US-WI", proj=3695)
us_state("wyoming", 161991, "US-WY", proj=26913)
default_country("oceania", "usa_guam", 306001, {"country": "GU", "language": "en", "proj": 32654}, download_repo=OSMFR, download_country="guam")
default_country("oceania", "usa_northern_mariana_islands", 306004, {"country": "MP", "language": "en", "proj": 32654}, download_repo=OSMFR, download_country="northern_mariana_islands")
default_country("oceania", "usa_american_samoa", 2177187, {"country": "AS", "language": "en", "proj": 32601}, download_repo=OSMFR, download_country="american_samoa")
#########################################################################
canada_options = {'download_repo': OSMFR, 'addr:street_distance': 2000,
'phone_code': '1', 'phone_len': 10, 'phone_format': r"^[+]%s[- ][0-9]{3}[- ][0-9]{3}[- ][0-9]{4}$", 'suffix_separators': "x",
'exclude': [
'osmosis_waterway',
]}
canada_province = gen_country('north-america', 'canada', language='en', **canada_options)
canada_province("alberta", 391186, "CA-AB", proj=32610)
canada_province("british_columbia", 390867, "CA-BC", proj=32609)
canada_province("manitoba", 390841, "CA-MB", proj=32615)
canada_province("new_brunswick", 68942, "CA-NB", proj=32619)
canada_province("newfoundland_and_labrador", 391196, "CA-NL", proj=32621)
canada_province("northwest_territories", 391220, "CA-NT", proj=32612)
canada_province("nova_scotia", 390558, "CA-NS", proj=32620)
canada_province("nunavut", 390840, "CA-NU", proj=32616)
canada_ontario_region = gen_country('north-america', 'canada/ontario', proj=32616, country_code='CA-ON', language='en', **canada_options)
canada_ontario_region('central_ontario', 9330364)
canada_ontario_region('eastern_ontario', 9330323)
canada_ontario_region('golden_horseshoe', 9330407)
canada_ontario_region('northeastern_ontario', 9330447)
canada_ontario_region('northwestern_ontario', 9330452)
canada_ontario_region('southwestern_ontario', 9330436)
canada_province("prince_edward_island", 391115, "CA-PE", proj=32620)
canada_quebec_region = gen_country('north-america', 'canada/quebec', proj=2138, country_code='CA-QC', language='fr', **canada_options)
canada_quebec_region('abitibi_temiscamingue', 8107213, 'CA-QC-ABT')
canada_quebec_region('bas_saint_laurent', 8137316, 'CA-QC-BSL')
canada_quebec_region('capitale_nationale', 8114679, 'CA-QC-CAPN')
canada_quebec_region('centre_du_quebec', 8100165, 'CA-QC-CQC')
canada_quebec_region('chaudiere_appalaches', 8138409, 'CA-QC-CHAPP')
canada_quebec_region('cote_nord', 8126390, 'CA-QC-CN')
canada_quebec_region('estrie', 8098121, 'CA-QC-ESTR')
canada_quebec_region('gaspesie_iles_de_la_madeleine', 7485821, 'CA-QC-GIM')
canada_quebec_region('lanaudiere', 8098959, 'CA-QC-LAN')
canada_quebec_region('laurentides', 8098885, 'CA-QC-LAUR')
canada_quebec_region('laval', 3532125, 'CA-QC-LAV')
canada_quebec_region('mauricie', 8098985, 'CA-QC-MAUR')
canada_quebec_region('monteregie', 8093332, 'CA-QC-MGIE')
canada_quebec_region('montreal', 1571328, 'CA-QC-MTL')
canada_quebec_region('nord_du_quebec', 8118159, 'CA-QC-NQC')
canada_quebec_region('outaouais', 8100164, 'CA-QC-OUT')
canada_quebec_region('saguenay_lac_saint_jean', 8120111, 'CA-QC-SLSJ')
canada_province("saskatchewan", 391178, "CA-SK", proj=32613)
canada_province("yukon", 391455, "CA-YT", proj=32608)
#########################################################################
default_country("africa", "algeria", 192756, {"country": "DZ", "language": ["ar", "fr"], "proj": 32631}, download_repo=OSMFR)
default_country("africa", "angola", 195267, {"country": "AO", "language": "pt", "proj": 32733}, download_repo=OSMFR)
default_country("africa", "benin", 192784, {"country": "BJ", "language": "fr", "proj": 32631, 'phone_code': '229', 'phone_len': 8, 'phone_international': '00'}, download_repo=OSMFR)
default_country("africa", "botswana", 1889339, {"country": "BW", "language": "en", "driving_side": "left", "proj": 32734})
default_country("africa", "burkina_faso", 192783, {"country": "BF", "language": "fr", "proj": 32630}, download_repo=OSMFR)
default_country("africa", "burundi", 195269, {"country": "BI", "language": "fr", "proj": 32735}, download_repo=OSMFR)
default_country("africa", "cameroon", 192830, {"country": "CM", "language": "fr", "proj": 32632}, download_repo=OSMFR)
default_country("africa", "cape_verde", 535774, {"country": "CV", "language": "pt", "proj": 32626}, download_repo=OSMFR)
default_country("africa", "central_african_republic", 192790, {"country": "CF", "language": "fr", "proj": 32634}, download_repo=OSMFR)
default_country("africa", "chad", 2361304, {"country": "TD", "language": ["ar", "fr"], "proj": 32634}, download_repo=OSMFR)
default_country("africa", "comoros", 535790, {"country": "KM", "language": ["ar", "fr"], "proj": 32738}, download_repo=OSMFR)
default_country("africa", "congo_brazzaville", 192794, {"country": "CG", "language": "fr", "proj": 32733}, download_repo=OSMFR)
default_country("africa", "congo_kinshasa", 192795, {"country": "CD", "language": "fr", "proj": 32734}, download_repo=OSMFR)
default_country("africa", "djibouti", 192801, {"country": "DJ", "language": ["fr", "ar"], "proj": 32638, "multilingual-style": "dj"}, download_repo=OSMFR)
default_country("africa", "egypt", 1473947, {"country": "EG", "language": "ar", "proj": 32635})
default_country("africa", "equatorial_guinea", 192791, {"country": "GQ", "language": "es", "proj": 32732}, download_repo=OSMFR)
default_country("africa", "eritrea", 296961, {"country": "ER", "proj": 32637}, download_repo=OSMFR)
default_country("africa", "ethiopia", 192800, {"country": "ET", "proj": 32638})
default_country("africa", "gabon", 192793, {"country": "GA", "language": "fr", "proj": 32732}, download_repo=OSMFR)
default_country("africa", "gambia", 192774, {"country": "GM", "language": "en", "proj": 32628}, download_repo=OSMFR)
default_country("africa", "ghana", 192781, {"country": "GH", "language": "en", "proj": 32630}, download_repo=OSMFR)
default_country("africa", "guinea", 192778, {"country": "GN", "language": "fr", "proj": 32628}, download_repo=OSMFR)
default_country("africa", "guinea-bissau", 192776, {"country": "GW", "language": "pt", "proj": 32628})
default_country("africa", "ivory_coast", 192779, {"country": "CI", "language": "fr", "proj": 32630}, download_repo=OSMFR)
default_country("africa", "kenya", 192798, {"country": "KE", "language": "en", "driving_side": "left", "proj": 32737}, download_repo=OSMFR)
default_country("africa", "lesotho", 2093234, {"country": "LS", "language": "en", "driving_side": "left", "proj": 32735}, download_repo=OSMFR)
default_country("africa", "liberia", 192780, {"country": "LR", "language": "en", "speed_limit_unit": "mph", "proj": 32629})
default_country("africa", "libya", 192758, {"country": "LY", "language": "ar", "proj": 32633})
default_country("africa", "madagascar", 447325, {"country": "MG", "language": ["fr", "mg"], "proj": 32738}, download_repo=GEOFABRIK)
default_country("africa", "malawi", 195290, {"country": "MW", "language": "en", "driving_side": "left", "proj": 32736}, download_repo=OSMFR)
default_country("africa", "mali", 192785, {"country": "ML", "language": "fr", "proj": 32630}, download_repo=OSMFR)
default_country("africa", "mauritania", 192763, {"country": "MR", "language": "ar", "proj": 32628}, download_repo=OSMFR)
default_country("africa", "mauritius", 535828, {"country": "MU", "language": ["en", "fr"], "driving_side": "left", "proj": 32740}, download_repo=OSMFR)
default_country("africa", "morocco", 3630439, {"country": "MA", "language": ["ar", "fr", "zgh", "ber"], "proj": 32629, "multilingual-style": "ma"})
default_country("africa", "mozambique", 195273, {"country": "MZ", "language": "pt", "driving_side": "left", "proj": 32736}, download_repo=OSMFR)
default_country("africa", "namibia", 195266, {"country": "NA", "language": "en", "driving_side": "left", "proj": 32733}, download_repo=OSMFR)
default_country("africa", "niger", 192786, {"country": "NE", "language": "fr", "proj": 32632}, download_repo=OSMFR)
default_country("africa", "nigeria", 192787, {"country": "NG", "language": "en", "proj": 32633})
default_country("africa", "norway_bouvet_island", 2425963, {"country": "BV", "language": "no", "proj": 32729}, download_repo=OSMFR, download_country="bouvet_island")
default_country("africa", "rwanda", 171496, {"country": "RW", "language": ["en", "fr"], "proj": 32735}, download_repo=OSMFR)
default_country("africa", "sao_tome_and_principe", 535880, {"country": "ST", "language": "pt", "proj": 32632}, download_repo=OSMFR)
default_country("africa", "senegal", 192775, {"country": "SN", "language": "fr", "proj": 32628}, download_repo=OSMFR)
default_country("africa", "seychelles", 536765, {"country": "SC", "language": ["en", "fr"], "driving_side": "left", "proj": 32739}, download_repo=OSMFR)
default_country("africa", "sierra-leone", 192777, {"country": "SL", "language": "en", "proj": 32629})
default_country("africa", "somalia", 192799, {"country": "SO", "language": "so", "proj": 32638})
default_country("africa", "south_africa", 87565, {"country": "ZA", "language": "en", "driving_side": "left", "proj": 32735}, download_repo=OSMFR)
default_country("africa", "south_sudan", 1656678, {"country": "SS", "language": "en", "proj": 32635}, download_repo=OSMFR)
default_country("africa", "sudan", 192789, {"country": "SD", "language": ["ar", "en"], "proj": 32636}, download_repo=OSMFR)
default_country("africa", "swaziland", 88210, {"country": "SZ", "language": "en", "driving_side": "left", "proj": 32736}, download_repo=OSMFR)
default_country("africa", "tanzania", 195270, {"country": "TZ", "language": "en", "driving_side": "left", "proj": 32736})
default_country("africa", "togo", 192782, {"country": "TG", "language": "fr", "proj": 32631}, download_repo=OSMFR)
default_country("africa", "tunisia", 192757, {"country": "TN", "language": ["ar", "fr"], "proj": 32632}, download_repo=OSMFR)
default_country("africa", "uganda", 192796, {"country": "UG", "language": "en", "driving_side": "left", "proj": 32636}, download_repo=OSMFR)
default_country("africa", "united_kingdom_saint_helena_ascension_tristan_da_cunha", 1964272, {"country": "SH", "language": "en", "driving_side": "left", "proj": 32729}, download_repo=OSMFR, download_country="saint_helena_ascension_tristan_da_cunha")
default_country("africa", "western_sahara", 2559126, {"country": "EH", "proj": 32629}, download_repo=OSMFR)
default_country("africa", "zambia", 195271, {"country": "ZM", "language": "en", "driving_side": "left", "proj": 32736}, download_repo=OSMFR)
default_country("africa", "zimbabwe", 195272, {"country": "ZW", "language": "en", "driving_side": "left", "proj": 32736}, download_repo=OSMFR)
config["chad"].analyser["osmosis_way_approximate"] = "xxx"
config["djibouti"].analyser["osmosis_way_approximate"] = "xxx"
config["kenya"].analyser["osmosis_way_approximate"] = "xxx"
config["madagascar"].analyser["osmosis_way_approximate"] = "xxx"
config["mali"].analyser["osmosis_way_approximate"] = "xxx"
config["senegal"].analyser["osmosis_way_approximate"] = "xxx"
config["togo"].analyser["osmosis_way_approximate"] = "xxx"
for country, c in config.items():
if c.download and "url" in c.download and "/africa/" in c.download["url"] and not ("mayotte" in c.download["url"] or "reunion" in c.download["url"]):
del(c.analyser["osmosis_building_shapes"])
#########################################################################
default_country("asia", "afghanistan", 303427, {"country": "AF", "proj": 32641}, download_repo=OSMFR)
default_country("asia", "armenia", 364066, {"country": "AM", "language": "hy", "proj": 32641}, download_repo=OSMFR)
default_country("asia", "azerbaijan", 364110, {"country": "AZ", "language": "az", "proj": 32638})
default_country("asia", "bangladesh", 184640, {"country": "BD", "language": "bn", "driving_side": "left", "proj": 32646})
default_country("asia", "bahrain", 378734, {"country": "BH", "language": "ar","proj": 32639}, download_repo=OSMFR)
default_country("asia", "bhutan", 184629, {"country": "BT", "language": ["dz", "en"], "proj": 32646}, download_repo=OSMFR)
default_country("asia", "brunei", 2103120, {"country": "BN", "driving_side": "left", "language": "ms", "proj": 32650}, download_repo=OSMFR)
default_country("asia", "cambodia", 49898, {"country": "KHM", "language": "km", "proj": 32648}, download_repo=OSMFR)
default_country("asia", "east_timor", 305142, {"country": "TL", "language": "pt", "proj": 32651}, download_repo=OSMFR)
default_country("asia", "georgia", 28699, {"country": "GE", "language": "ka", "proj": 32637}, download_repo=OSMFR)
default_country("asia", "israel", 1473946, {"country": "IL", "language": ["he", "ar"], "proj": 32636}, download_repo=OSMFR)
default_country("asia", "iran", 304938, {"country": "IR", "language": "fa","proj": 32640}, download_repo=GEOFABRIK)
default_country("asia", "iraq", 304934, {"country": "IQ", "language": "ar", "proj": 32638})
default_country("asia", "jordan", 184818, {"country": "JO", "language": "ar", "proj": 32637})
default_country("asia", "kazakhstan", 214665, {"country": "KZ", "proj": 32640}, download_repo=GEOFABRIK)
default_country("asia", "kuwait", 305099, {"country": "KW", "language": "ar","proj": 32639}, download_repo=OSMFR)
default_country("asia", "kyrgyzstan", 178009, {"country": "KG", "language": ["ky", "ru"], "proj": 32643})
default_country("asia", "laos", 49903, {"country": "LA", "language": ["lo", "en"], "proj": 32648}, download_repo=OSMFR)
default_country("asia", "lebanon", 184843, {"country": "LB", "language": "ar", "proj": 32636})
default_country("asia", "malaysia", 2108121, {"country": "MY", "language": "ms", "driving_side": "left", "proj": 32649}, download_repo=OSMFR)
default_country("asia", "maldives", 536773, {"country": "MV", "language": "dv", "proj": 32643}, download_repo=OSMFR)
default_country("asia", "mongolia", 161033, {"country": "MN", "language": "mn", "proj": 32648})
default_country("asia", "myanmar", 50371, {"country": "MM", "language": "my", "proj": 32646}, download_repo=OSMFR)
default_country("asia", "north_korea", 192734, {"country": "KP", "language": "ko", "proj": 32652}, download_country="north-korea")
default_country("asia", "nepal", 184633, {"country": "NP", "language": "ne", "driving_side": "left", "proj": 32645})
default_country("asia", "oman", 305138, {"country": "OM", "language": "ar","proj": 32640}, download_repo=OSMFR)
default_country("asia", "pakistan", 307573, {"country": "PK", "language": ["en", "ur"], "driving_side": "left", "proj": 32642})
default_country("asia", "palestine", 1703814, {"country": "PS", "language": "ar", "proj": 32636}, download_repo=OSMFR)
default_country("asia", "philippines", 2850940, {"country": "PH", "language": "en", "proj": 32651, 'phone_code': '63', 'phone_len': [7, 8], 'phone_international': '00'}, download_repo=GEOFABRIK)
default_country("asia", "qatar", 305095, {"country": "QA", "language": "ar","proj": 32639}, download_repo=OSMFR)
default_country("asia", "saudi_arabia", 307584, {"country": "SA", "language": "ar","proj": 32637}, download_repo=OSMFR)
default_country("asia", "singapore", 536780, {"country": "SG", "language": "en", "driving_side": "left", "proj": 32648}, download_repo=OSMFR)
default_country("asia", "sri-lanka", 536807, {"country": "LK", "language": ["en", "si", "ta"], "driving_side": "left", "proj": 32644})
default_country("asia", "south_korea", 307756, {"country": "KR", "language": "ko", "proj": 32652}, download_country="south-korea")
default_country("asia", "syria", 184840, {"country": "SY", "language": "ar", "proj": 32637})
default_country("asia", "tajikistan", 214626, {"country": "TJ", "language": "tg", "proj": 32642})
default_country("asia", "taiwan", 3777248, {"country": "TW", "language": ["zh_TW", "en"], "proj": 32651}, download_repo=GEOFABRIK)
default_country("asia", "thailand", 2067731, {"country": "TH", "language": "th", "proj": 32647, "driving_side": "left"})
default_country("asia", "turkmenistan", 223026, {"country": "TM", "language": "tk", "proj": 32640})
united_arab_emirates = default_country("asia", "united_arab_emirates", 307763, {"country": "AE", "language": "ar","proj": 32640}, download_repo=OSMFR)
del(united_arab_emirates.analyser["osmosis_highway_name_close"]) # Complicated Street Numbering
default_country("asia", "united_kingdom_british_indian_ocean_territory", 1993867, {"country": "IO", "language": "en", "driving_side": "left", "proj": 32742}, download_repo=OSMFR, download_country="british_indian_ocean_territory")
default_country("asia", "uzbekistan", 196240, {"country": "UZ", "proj": 32640}, download_repo=GEOFABRIK)
default_country("asia", "vietnam", 49915, {"country": "VN", "language": "vi", "proj": 32648}, download_repo=GEOFABRIK)
default_country("asia", "yemen", 305092, {"country": "YE", "language": "ar","proj": 32638}, download_repo=GEOFABRIK)
#########################################################################
id_province = gen_country('asia', 'indonesia', download_repo=OSMFR, language='id', proj=23837)
id_province("aceh", 2390836, "ID-AC")
id_province("bali", 1615621, "ID-BA")
id_province("bangka_belitung_islands", 3797243, "ID-BB")
id_province("banten", 2388356, "ID-BT")
id_province("bengkulu", 2390837, "ID-BE")
id_province("central_java", 2388357, "ID-JT")
id_province("central_kalimantan", 2388613, "ID-KT")
id_province("central_sulawesi", 2388664, "ID-ST")
id_province("east_java", 3438227, "ID-JI")
id_province("east_kalimantan", 5449459, "ID-KI")
id_province("east_nusa_tenggara", 2396778, "ID-NT")
id_province("gorontalo", 2388665, "ID-GO")
id_province("jakarta", 6362934, "ID-JK")
id_province("jambi", 2390838, "ID-JA")
id_province("lampung", 2390839, "ID-LA")
id_province("maluku", 2396795, "ID-MA")
id_province("north_kalimantan", 5449460, "ID-KU")
id_province("north_maluku", 2396796, "ID-MU")
id_province("north_sulawesi", 2388666, "ID-SA")
id_province("north_sumatra", 2390843, "ID-SU")
id_province("papua", 4521144, "ID-PA")
id_province("riau", 2390840, "ID-RI")
id_province("riau_islands", 3797244, "ID-KR")
id_province("southeast_sulawesi", 2388668, "ID-SG")
id_province("south_kalimantan", 2388615, "ID-KS")
id_province("south_sulawesi", 2388667, "ID-SN")
id_province("south_sumatra", 2390842, "ID-SS")
id_province("west_java", 2388361, "ID-JB")
id_province("west_kalimantan", 2388616, "ID-KB")
id_province("west_nusa_tenggara", 1615622, "ID-NB")
id_province("west_papua", 4521145, "ID-PB")
id_province("west_sulawesi", 2388669, "ID-SR")
id_province("west_sumatra", 2390841, "ID-SB")
id_province("yogyakarta", 5616105, "ID-YO")
#########################################################################
# central america
default_country("central-america", "belize", 287827, {"country": "BZ", "language": "en", "speed_limit_unit": "mph", "proj": 32616})
default_country("central-america", "costa_rica", 287667, {"country": "CR", "language": "es", "proj": 32617}, download_repo=OSMFR)
default_country("central-america", "el_salvador", 1520612, {"country": "SV", "language": "es", "proj": 32616}, download_repo=OSMFR)
default_country("central-america", "guatemala", 1521463, {"country": "GT", "language": "es", "proj": 32616})
default_country("central-america", "honduras", 287670, {"country": "HN", "language": "es", "proj": 32616}, download_repo=OSMFR)
default_country("central-america", "panama", 287668, {"country": "PA", "language": "es", "proj": 32617}, download_repo=OSMFR)
default_country("central-america", "trinidad_and_tobago", 555717, {"country": "TT", "language": "en", "driving_side": "left","proj": 32620}, download_repo=OSMFR)
# caribbean
default_country("central-america", "haiti", 307829, {"country": "HT", "language": "fr", "proj": 32618},
download_repo=GEOFABRIK, download_country="haiti-and-domrep")
config["haiti"].analyser["osmosis_way_approximate"] = "xxx"
default_country("central-america", "antigua_and_barbuda", 536900, {"country": "BB", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "barbados", 547511, {"country": "BB", "language": "en", "driving_side": "left", "proj": 32621}, download_repo=OSMFR)
default_country("central-america", "bahamas", 547469, {"country": "BS", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "cuba", 307833, {"country": "CU", "language": "es", "proj": 32617, "phone_code": "53", "phone_len": 8, "phone_international": "011", "phone_local_prefix": "0"})
default_country("central-america", "dominica", 307823, {"country": "DM", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "dominican_republic", 307828, {"country": "DO", "language": "es", "proj": 32619}, download_repo=GEOFABRIK, download_country="haiti-and-domrep")
default_country("central-america", "grenada", 550727, {"country": "GD", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "jamaica", 555017, {"country": "JM", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "nicaragua", 287666, {"country": "NI", "language": "es", "proj": 32616}, download_repo=OSMFR)
default_country("central-america", "saint_lucia", 550728, {"country": "LC", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "saint_vincent_and_the_grenadines", 550725, {"country": "VC", "language": "en", "proj": 32620}, download_repo=OSMFR)
default_country("central-america", "saint_kitts_and_nevis", 536899, {"country": "KN", "language": "en", "driving_side": "left", "proj": 2005}, download_repo=OSMFR)
default_country("central-america", "united_kingdom_anguilla", 2177161, {"country": "AI", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR, download_country="anguilla")
default_country("central-america", "united_kingdom_cayman_islands", 2185366, {"country": "KY", "language": "en", "driving_side": "left", "proj": 32617}, download_repo=OSMFR, download_country="cayman_islands")
default_country("central-america", "united_kingdom_montserrat", 537257, {"country": "MS", "language": "en", "driving_side": "left", "proj": 2005}, download_repo=OSMFR, download_country="montserrat")
default_country("central-america", "united_kingdom_turks_and_caicos_islands", 547479, {"country": "TC", "language": "en", "driving_side": "left", "proj": 32619}, download_repo=OSMFR, download_country="turks_and_caicos_islands")
default_country("central-america", "united_kingdom_virgin_islands", 285454, {"country": "VG", "language": "en", "driving_side": "left", "proj": 32620}, download_repo=OSMFR, download_country="british_virgin_islands")
default_country("central-america", "usa_puerto_rico", 4422604, {"country": "PR", "language": ["es", "en"], "proj": 32619, "boundary_detail_level": 6}, download_repo=OSMFR, download_country="puerto_rico")
default_country("central-america", "usa_virgin_islands", 286898, {"country": "VI", "language": "en", "driving_side": "left", "proj": 4437}, download_repo=OSMFR, download_country="usa_virgin_islands")
#########################################################################
default_country("australia-oceania", "new-zealand", 556706, {"country": "NZ", "language": "en", "proj": 32759, "driving_side": "left", "addr:street_distance": 2000})
default_country("oceania", "cook_islands", 2184233, {"country": "CK", "language": "en", "driving_side": "left", "proj": 32603}, download_repo=OSMFR)
default_country("oceania", "marshall_islands", 571771, {"country": "MH", "language": "en", "proj": 32660}, download_repo=OSMFR)
default_country("oceania", "nauru", 571804, {"country": "NR", "language": "en", "driving_side": "left", "proj": 32659}, download_repo=OSMFR)
default_country("oceania", "niue", 1558556, {"country": "NU", "language": "en", "driving_side": "left", "proj": 32602}, download_repo=OSMFR)
default_country("oceania", "palau", 571805, {"country": "PW", "language": "en", "proj": 32653}, download_repo=OSMFR)
default_country("oceania", "micronesia", 571802, {"country": "FM", "language": "en", "speed_limit_unit": "mph", "proj": 32656}, download_repo=OSMFR)
default_country("oceania", "papua_new_guinea", 307866, {"country": "PG", "language": "en","proj": 32755}, download_repo=OSMFR)
default_country("oceania", "samoa", 1872673, {"country": "WS", "language": "en", "driving_side": "left", "speed_limit_unit": "mph", "proj": 32602}, download_repo=OSMFR)
default_country("oceania", "solomon_islands", 1857436, {"country": "SB", "language": "en", "driving_side": "left", "proj": 32657}, download_repo=OSMFR)
default_country("oceania", "new_zealand_tokelau", 2186600, {"country": "TK", "language": "en", "driving_side": "left", "proj": 32602}, download_repo=OSMFR, download_country="tokelau")
default_country("oceania", "tonga", 2186665, {"country": "TO", "language": "en", "driving_side": "left", "proj": 32601}, download_repo=OSMFR)
default_country("oceania", "tuvalu", 2177266, {"country": "TV", "language": "en", "driving_side": "left", "proj": 32660}, download_repo=OSMFR)
default_country("oceania", "united_kingdom_pitcairn", 2185375, {"country": "PN", "language": "en", "driving_side": "left", "proj": 32709}, download_repo=OSMFR, download_country="pitcairn")
default_country("oceania", "vanuatu", 2177246, {"country": "VU", "language": ["en", "fr"], "proj": 32658}, download_repo=OSMFR)
#########################################################################
default_country("merge", "fiji", 571747, {"country": "FJ", "language": "en", "driving_side": "left", "proj": 32660}, download_repo=OSMFR)
default_country("merge", "kiribati", 571178, {"country": "KL", "language": "en", "driving_side": "left", "proj": 32660}, download_repo=OSMFR)
#########################################################################
au_state = gen_country('oceania', 'australia', download_repo=OSMFR, language='en', driving_side='left')
au_state("australian_capital_territory", 2354197, "AU-ACT", proj=32755)
au_state("new_south_wales", 2316593, "AU-NSW", proj=32755)
au_state("northern_territory", 2316594, "AU-NT", proj=32753)
au_state("western_australia", 2316598, "AU-WA", proj=32750)
au_state("south_australia", 2316596, "AU-SA", proj=32753)
au_state("victoria", 2316741, "AU-VIC", proj=32755)
au_state("queensland", 2316595, "AU-QLD", proj=32755)
au_state("tasmania", 2369652, "AU-TAS", proj=32755)
au_state("christmas_island", 2177207, "CX", proj=32648)
au_state("cocos_islands", 82636, "CC", proj=32646)
au_state("coral_sea_islands", 3225677, "AU", proj=32655)
au_state("norfolk_island", 2574988, "NF", proj=32658)
#########################################################################
default_country("south-america", "bolivia", 252645, {"country": "BO", "language": "es", "proj": 32720})
default_country("south-america", "chile", 167454, {"country": "CL", "language": "es", "proj": 32718})
colombia = default_country("south-america", "colombia", 120027, {"country": "CO", "language": "es", "proj": 32618})
del(colombia.analyser["osmosis_highway_name_close"]) # Complicated Street Numbering
default_country("south-america", "ecuador", 108089, {"country": "EC", "language": "es", "proj": 32727})
default_country("south-america", "guyana", 287083, {"country": "GY", "language": "en", "driving_side": "left", "proj": 32621}, download_repo=OSMFR)
default_country("south-america", "paraguay", 287077, {"country": "PY", "language": "es", "proj": 32721}, download_repo=OSMFR)
default_country("south-america", "peru", 288247, {"country": "PE", "language": "es", "proj": 32718})
default_country("south-america", "suriname", 287082, {"country": "SR", "language": "nl", "driving_side": "left", "proj": 32621}, download_repo=OSMFR)
default_country("south-america", "united_kingdom_falkland", 2185374, {"country": "FK", "language": "en", "driving_side": "left", "proj": 32721}, download_repo=OSMFR, download_country="falkland")
default_country("south-america", "united_kingdom_south_georgia_and_south_sandwich", 1983628, {"country": "GS", "language": "en", "driving_side": "left", "proj": 32725}, download_repo=OSMFR, download_country="south_georgia_and_south_sandwich")
default_country("south-america", "uruguay", 287072, {"country": "UY", "language": "es", "proj": 32721})
default_country("south-america", "venezuela", 272644, {"country": "VE", "language": "es", "proj": 32620}, download_repo=OSMFR)
#########################################################################
ar_state = gen_country('south-america', 'argentina', download_repo=OSMFR, language='es', proj=32720,
phone_code='54', phone_local_prefix='0', phone_len=10, phone_international=00, suffix_separators='INT')
ar_state('buenos_aires_city', 1224652, 'AR-C')
ar_state('buenos_aires', 1632167, 'AR-B')
ar_state('catamarca', 153545, 'AR-K')
ar_state('chaco', 153554, 'AR-H')
ar_state('chubut', 153548, 'AR-CU')
ar_state('cordoba', 3592494, 'AR-X')
ar_state('corrientes', 153552, 'AR-W')
ar_state('entre_rios', 153551, 'AR-E')
ar_state('formosa', 2849847, 'AR-P')
ar_state('jujuy', 153556, 'AR-Y')
ar_state('la_pampa', 153541, 'AR-L')
ar_state('la_rioja', 153536, 'AR-F')
ar_state('mendoza', 153540, 'AR-M')
ar_state('misiones', 153553, 'AR-N')
ar_state('neuquen', 1606727, 'AR-Q')
ar_state('rio_negro', 153547, 'AR-R')
ar_state('salta', 2405230, 'AR-A')
ar_state('san_juan', 153539, 'AR-J')
ar_state('san_luis', 153538, 'AR-D')
ar_state('santa_cruz', 153549, 'AR-Z')
ar_state('santa_fe', 153543, 'AR-S')
ar_state('santiago_del_estero', 153544, 'AR-G')
ar_state('tierra_del_fuego', 153550, 'AR-V')
ar_state('tucuman', 153558, 'AR-T')
#########################################################################
br_region = gen_country('south-america', 'brazil', download_repo=OSMFR, language='pt', proj=32722, exclude=[
'osmosis_highway_name_close', # Complicated Street Numbering
])
br_region(["north", "acre"], 326266, "BR-AC")
br_region(["northeast", "alagoas"], 303781, "BR-AL")
br_region(["north", "amapa"], 331463, "BR-AP")
br_region(["north", "amazonas"], 332476, "BR-AM")
br_region(["northeast", "bahia"], 362413, "BR-BA")
br_region(["northeast", "ceara"], 302635, "BR-CE")
br_region(["central-west", "distrito-federal"], 421151, "BR-DF")
br_region(["southeast", "espirito-santo"], 54882, "BR-ES")
br_region(["central-west", "goias"], 334443, "BR-GO")
br_region(["northeast", "maranhao"], 332924, "BR-MA")
br_region(["central-west", "mato-grosso"], 333597, "BR-MT")
br_region(["central-west", "mato-grosso-do-sul"], 334051, "BR-MS")
br_region(["southeast", "minas-gerais"], 315173, "BR-MG")
br_region(["north", "para"], 185579, "BR-PA")
br_region(["northeast", "paraiba"], 301464, "BR-PB")
br_region(["south", "parana"], 297640, "BR-PR")
br_region(["northeast", "pernambuco"], 303702, "BR-PE")
br_region(["northeast", "piaui"], 302819, "BR-PI")
br_region(["southeast", "rio-de-janeiro"], 57963, "BR-RJ")
br_region(["northeast", "rio-grande-do-norte"], 301079, "BR-RN")
br_region(["south", "rio-grande-do-sul"], 242620, "BR-RS")
br_region(["north", "rondonia"], 325866, "BR-RO")
br_region(["north", "roraima"], 326287, "BR-RR")
br_region(["south", "santa-catarina"], 296584, "BR-SC")
br_region(["southeast", "sao-paulo"], 298204, "BR-SP")
br_region(["northeast", "sergipe"], 303940, "BR-SE")
br_region(["north", "tocantins"], 336819, "BR-TO")
#########################################################################
it_region = gen_country('europe', 'italy', download_repo=OSMFR, language='it', proj=23032, municipality_ref='ref:ISTAT',
phone_code='39', phone_len=[6, 11], phone_len_short=[3, 4], phone_international='00', phone_format=r"^(?:(?:[+]%s[- ]*[03])|[18])[0-9]+(?:[- ][0-9]+)?(?:(?:[- ][0-9]+)|$)$", include=[
'merge_fuel_IT',
'merge_pharmacy_IT',
'merge_parapharmacy_IT',
])
it_region("abruzzo", 53937, "IT-65")
it_region("basilicata", 40137, "IT-77")
it_region("calabria", 1783980, "IT-78")
it_region("campania", 40218, "IT-72")
it_region("emilia_romagna", 42611, "IT-45")
it_region("friuli_venezia_giulia", 179296, "IT-36")
it_region("lazio", 40784, "IT-62")
it_region("liguria", 301482, "IT-42")
it_region("lombardia", 44879, "IT-25")
it_region("marche", 53060, "IT-57")
it_region("molise", 41256, "IT-67")
it_region("piemonte", 44874, "IT-21")
it_region("puglia", 40095, "IT-75")
it_region("sardegna", 7361997, "IT-88")
it_region("sicilia", 39152, "IT-82")
it_region("toscana", 41977, "IT-52")
it_region("trentino_alto_adige", 45757, "IT-32", language=["it","de"])
it_region("umbria", 42004, "IT-55")
it_region("valle_aosta", 45155, "IT-23")
it_region("veneto", 43648, "IT-34")
#########################################################################
nl_province = gen_country('europe', 'netherlands', download_repo=OSMFR, language='nl', proj=23032)
nl_province("zuid_holland", 47772, "NL-ZH")
nl_province("zeeland", 47806, "NL-ZE")
nl_province("noord_brabant", 47696, "NL-NB")
nl_province("limburg", 47793, "NL-LI")
nl_province("gelderland", 47554, "NL-GE")
nl_province("overijssel", 47608, "NL-OV")
nl_province("drenthe", 47540, "NL-DR")
nl_province("friesland", 47381, "NL-FR", language=["nl", "fy"])
nl_province("groningen", 47826, "NL-GR")
nl_province("flevoland", 47407, "NL-FL")
nl_province("utrecht", 47667, "NL-UT")
nl_province("noord_holland", 47654, "NL-NH")
nl_province("aruba", 1231749, "AW", area="central-america", path_base=None, proj=32620)
nl_province("curacao", 1216719, "CW", area="central-america", path_base=None, proj=32620)
nl_province("sint_maarten", 1231790, "SX", area="central-america", path_base=None, proj=32620)
nl_province("caribbean", 1216720, "NL", area="central-america", path_base=None, proj=32620)
#########################################################################
cz_kraj = gen_country('europe', 'czech_republic', download_repo=OSMFR, language='cs', proj=32633)
cz_kraj("praha", 435514, "CZ-PR")
cz_kraj("stredocesky", 442397, "CZ-ST")
cz_kraj("jihocesky", 442321, "CZ-JC")
cz_kraj("plzensky", 442466, "CZ-PL")
cz_kraj("karlovarsky", 442314, "CZ-KA")
cz_kraj("ustecky", 442452, "CZ-US")
cz_kraj("liberecky", 442455, "CZ-LI")
cz_kraj("kralovehradecky", 442463, "CZ-KR")
cz_kraj("pardubicky", 442460, "CZ-PA")
cz_kraj("vysocina", 442453, "CZ-VY")
cz_kraj("jihomoravsky", 442311, "CZ-JM")
cz_kraj("olomoucky", 442459, "CZ-OL")
cz_kraj("moravskoslezsky", 442461, "CZ-MO")
cz_kraj("zlinsky", 442449, "CZ-ZL")
#########################################################################
pl_province = gen_country('europe', 'poland', download_repo=OSMFR, language='pl', proj=32634,
phone_code='48', phone_len=9, phone_international='00')
pl_province("dolnoslaskie", 224457, "PL-DS")
pl_province("kujawsko_pomorskie", 223407, "PL-KP")
pl_province("lubelskie", 130919, "PL-LU")
pl_province("lubuskie", 130969, "PL-LB")
pl_province("lodzkie", 224458, "PL-LD")
pl_province("malopolskie", 224459, "PL-MA")
pl_province("mazowieckie", 130935, "PL-MZ")
pl_province("opolskie", 224460, "PL-OP")
pl_province("podkarpackie", 130957, "PL-PK")
pl_province("podlaskie", 224461, "PL-PD")
pl_province("pomorskie", 130975, "PL-PM")
pl_province("slaskie", 224462, "PL-SL")
pl_province("swietokrzyskie", 130914, "PL-SK")
pl_province("warminsko_mazurskie", 223408, "PL-WN")
pl_province("wielkopolskie", 130971, "PL-WP")
pl_province("zachodniopomorskie", 104401, "PL-ZP")
#########################################################################
de_state = gen_country('europe', 'germany', language='de', proj=32632, municipality_ref='de:regionalschluessel',
phone_code='49', phone_international='00', phone_local_prefix='0', phone_values_separators=[','],
include=[
'osmosis_highway_zone'
]
)
#de_state("baden-wuerttemberg", 62611, "DE-BW")
for (name, rel_id) in [("freiburg-regbez", 2106112),
("karlsruhe-regbez", 22027),
("stuttgart-regbez", 22041),
("tuebingen-regbez", 2811874)]:
de_state("baden-wuerttemberg/" + name, rel_id, "DE-BW", download_repo=GEOFABRIK)
#de_state("bayern", 2145268, "DE-BY")
for (name, rel_id) in [("mittelfranken", 17614),
("niederbayern", 17593),
("oberbayern", 2145274),
("oberfranken", 17592),
("oberpfalz", 17596),
("schwaben", 17657),
("unterfranken", 17585)]:
de_state("bayern/" + name, rel_id, "DE-BY", download_repo=GEOFABRIK)
de_state("berlin", 62422, "DE-BE")
de_state("brandenburg", 62504, "DE-BB")
de_state("bremen", 62718, "DE-HB")
de_state("hamburg", 62782, "DE-HH")
de_state("hessen", 62650, "DE-HE")
de_state("mecklenburg-vorpommern", 28322, "DE-MV")
de_state("niedersachsen", 454192, "DE-NI")
#de_state("nordrhein-westfalen", 62761, "DE-NW")
for (name, rel_id) in [("arnsberg", 73340),
("detmold", 73347),
("dusseldorf", 63306),
("koln", 72022),
("munster", 63594)]:
de_state("nordrhein_westfalen/" + name, rel_id, "DE-NW", download_repo=OSMFR)
de_state("rheinland-pfalz", 62341, "DE-RP")
de_state("saarland", 62372, "DE-SL")
de_state("sachsen-anhalt", 62607, "DE-ST")
de_state("sachsen", 62467, "DE-SN")
de_state("schleswig-holstein", 51529, "DE-SH")
de_state("thueringen", 62366, "DE-TH")
#########################################################################
at_state = gen_country('europe', 'austria', download_repo=OSMFR, language='de', proj=32633)
at_state("niederosterreich", 77189, "AT-3")
at_state("burgenland", 76909, "AT-1")
at_state("karnten", 52345, "AT-2")
at_state("oberosterreich", 102303, "AT-4")
at_state("salzburg", 86539, "AT-5")
at_state("steiermark", 35183, "AT-6")
at_state("tirol", 52343, "AT-7")
at_state("wien", 109166, "AT-9")
at_state("vorarlberg", 74942, "AT-8")
#########################################################################
es_comm = gen_country('europe', 'spain', download_repo=OSMFR, language='es', proj=32629, municipality_ref='ine:municipio', phone_code='34', phone_len=9, phone_len_short=[3, 4, 5], phone_international='00')
es_comm("andalucia", 349044, "ES-AN", proj=32629)
es_comm("aragon", 349045, "ES-AR", proj=32630)
es_comm("asturias", 349033, "ES-AS", proj=32629)
es_comm("illes_balears", 348981, "ES-IB", proj=32630, language="ca")
es_comm("cantabria", 349013, "ES-CB", proj=32630)
es_comm("castilla_la_mancha", 349052, "ES-CM", proj=32630)
es_comm("castilla_y_leon", 349041, "ES-CL", proj=32629)
es_comm("catalunya", 349053, "ES-CT", proj=32630, language="ca")
es_comm("comunitat_valenciana", 349043, "ES-VC", proj=32630, language=["es", "ca"])
es_comm("extremadura", 349050, "ES-EX", proj=32629)
es_comm("galicia", 349036, "ES-GA", proj=32629, language=["es", "gl"])
es_comm("la_rioja", 348991, "ES-RI", proj=32630)
es_comm("comunidad_de_madrid", 349055, "ES-MD", proj=32630)
es_comm("comunidad_foral_de_navarra", 349027, "ES-NC", proj=32630)
es_comm("euskadi", 349042, "ES-PV", proj=32630, language=["es", "eu"])
es_comm("region_de_murcia", 349047, "ES-MC", proj=32630)
es_comm("canarias", 349048, "ES-CN", proj=32628, area="africa")
es_comm("ceuta", 1154756, "ES-CE", proj=32630, area="africa")
es_comm("melilla", 1154757, "ES-ML", proj=32628, area="africa")
#########################################################################
en_region = gen_country('europe', 'united_kingdom/england', download_repo=OSMFR, country_code='GB-ENG', language='en', proj=32630, driving_side='left', speed_limit_unit='mph')
en_region("east_midlands", 151279)
en_region("east", 151336)
en_region("greater_london", 175342)
en_region("north_east", 151164)
en_region("north_west", 151261)
en_region("south_east", 151304)
en_region("south_west", 151339, language=["en", "kw"])
en_region("west_midlands", 151283)
en_region("yorkshire_and_the_humber", 151012)
#########################################################################
sk_kraj = gen_country('europe', 'slovakia', download_repo=OSMFR, language='sk', proj=32634)
sk_kraj("trnavsky", 388266, "SK-TA")
sk_kraj("trenciansky", 388267, "SK-TC")
sk_kraj("presovsky", 388271, "SK-PV")
sk_kraj("nitriansky", 388268, "SK-NI")
sk_kraj("kosicky", 388272, "SK-KI")
sk_kraj("zilinsky", 388269, "SK-ZI")
sk_kraj("banskobystricky", 388270, "SK-BC")
sk_kraj("bratislavsky", 388265, "SK-BL")
#########################################################################
india_state = gen_country('asia', 'india', download_repo=OSMFR, language=['hi', 'en'], proj=32644, driving_side='left')
india_state("andhra_pradesh", 2022095, "IN-AP", proj=32644)
india_state("arunachal_pradesh",2027346, "IN-AR", proj=32646)
india_state("assam", 2025886, "IN-AS", proj=32646)
india_state("bihar", 1958982, "IN-BR", proj=32645)
india_state("chhattisgarh", 1972004, "IN-CT", proj=32644)
india_state("goa", 11251493, "IN-GA", proj=32643)
india_state("gujarat", 1949080, "IN-GJ", proj=32643)
india_state("haryana", 1942601, "IN-HR", proj=32643)
india_state("himachal_pradesh", 364186, "IN-HP", proj=32643)
india_state("jammu_and_kashmir", 1943188, "IN-JK", proj=32643)
india_state("jharkhand", 1960191, "IN-JH", proj=32645)
india_state("karnataka", 2019939, "IN-KA", proj=32643)
india_state("kerala", 2018151, "IN-KL", proj=32643)
india_state("madhya_pradesh", 1950071, "IN-MP", proj=32643)
india_state("maharashtra", 1950884, "IN-MH", proj=32643)
india_state("manipur", 2027869, "IN-MN", proj=32646)
india_state("meghalaya", 2027521, "IN-ML", proj=32646)
india_state("mizoram", 2029046, "IN-MZ", proj=32646)
india_state("nagaland", 2027973, "IN-NL", proj=32646)
india_state("odisha", 1984022, "IN-OR", proj=32645)
india_state("punjab", 1942686, "IN-PB", proj=32643)
india_state("rajasthan", 1942920, "IN-RJ", proj=32643)
india_state("sikkim", 1791324, "IN-SK", proj=32645)
india_state("tamil_nadu", 96905, "IN-TN", proj=32644)
india_state("telangana", 3250963, "IN-TG", proj=32646)
india_state("tripura", 2026458, "IN-TR", proj=32644)
india_state("uttar_pradesh", 1942587, "IN-UP", proj=32644)
india_state("uttarakhand", 9987086, "IN-UT", proj=32644)
india_state("west_bengal", 1960177, "IN-WB", proj=32645)
india_state("andaman_and_nicobar_islands", 2025855, "IN-AN", proj=32646)
india_state("chandigarh", 1942809, "IN-CH", proj=32643)
india_state("dadra_and_nagar_haveli_and_daman_and_diu", 1952530, "IN-DH", proj=32643)
india_state("lakshadweep", 2027460, "IN-LD", proj=32643)
india_state("national_capital_territory_of_delhi", 1942586, "IN-DL", proj=32643)
india_state("puducherry", 107001, "IN-PY", proj=32643)
#########################################################################
russia_region = gen_country(None, 'russia', download_repo=OSMFR, language='ru')
russia_region(["central_federal_district", "belgorod_oblast"], 83184, "RU-BEL", proj=32637)
russia_region(["central_federal_district", "bryansk_oblast"], 81997, "RU-BRY", proj=32636)
russia_region(["central_federal_district", "ivanovo_oblast"], 85617, "RU-IVA", proj=32637)
russia_region(["central_federal_district", "kaluga_oblast"], 81995, "RU-KLU", proj=32636)
russia_region(["central_federal_district", "kostroma_oblast"], 85963, "RU-KOS", proj=32637)
russia_region(["central_federal_district", "kursk_oblast"], 72223, "RU-KRS", proj=32637)
russia_region(["central_federal_district", "lipetsk_oblast"], 72169, "RU-LIP", proj=32637)
russia_region(["central_federal_district", "moscow_oblast"], 51490, "RU-MOS", proj=32637)
russia_region(["central_federal_district", "moscow"], 102269, "RU-MOW", proj=32637)
russia_region(["central_federal_district", "oryol_oblast"], 72224, "RU-ORL", proj=32637)
russia_region(["central_federal_district", "ryazan_oblast"], 71950, "RU-RYA", proj=32637)
russia_region(["central_federal_district", "smolensk_oblast"], 81996, "RU-SMO", proj=32636)
russia_region(["central_federal_district", "tambov_oblast"], 72180, "RU-TAM", proj=32637)
russia_region(["central_federal_district", "tula_oblast"], 81993, "RU-TUL", proj=32637)
russia_region(["central_federal_district", "tver_oblast"], 2095259, "RU-TVE", proj=32637)
russia_region(["central_federal_district", "vladimir_oblast"], 72197, "RU-VLA", proj=32637)
russia_region(["central_federal_district", "voronezh_oblast"], 72181, "RU-VOR", proj=32637)
russia_region(["central_federal_district", "yaroslavl_oblast"], 81994, "RU-YAR", proj=32637)
russia_region(["far_eastern_federal_district", "amur_oblast"], 147166, "RU-AMU", proj=32652)
russia_region(["far_eastern_federal_district", "chukotka_autonomous_okrug"], 151231, "RU-CHU", proj=32659)
russia_region(["far_eastern_federal_district", "jewish_autonomous_oblast"], 147167, "RU-YEV", proj=32653)
russia_region(["far_eastern_federal_district", "kamchatka_krai"], 151233, "RU-KAM", proj=32658)
russia_region(["far_eastern_federal_district", "khabarovsk_krai"], 151223, "RU-KHA", proj=32653)
russia_region(["far_eastern_federal_district", "magadan_oblast"], 151228, "RU-MAG", proj=32656)
russia_region(["far_eastern_federal_district", "primorsky_krai"], 151225, "RU-PRI", proj=32653)
russia_region(["far_eastern_federal_district", "sakha_republic"], 151234, "RU-SA", proj=32652)
russia_region(["far_eastern_federal_district", "sakhalin_oblast"], 394235, "RU-SAK", proj=32654)
russia_region(["north_caucasian_federal_district", "chechen_republic"], 109877, "RU-CE", proj=32638)
russia_region(["north_caucasian_federal_district", "dagestan_republic"], 109876, "RU-DA", proj=32638)
russia_region(["north_caucasian_federal_district", "ingushetia_republic"], 253252, "RU-IN", proj=32638)
russia_region(["north_caucasian_federal_district", "kabardino_balkar_republic"], 109879, "RU-KB", proj=32638)
russia_region(["north_caucasian_federal_district", "karachay_cherkess_republic"], 109878, "RU-KC", proj=32638)
russia_region(["north_caucasian_federal_district", "north_ossetia_alania_republic"], 110032, "RU-SE", proj=32638)
russia_region(["north_caucasian_federal_district", "stavropol_krai"], 108081, "RU-STA", proj=32638)
russia_region(["northwestern_federal_district", "arkhangelsk_oblast"], 140337, "RU-ARK", proj=32638)
russia_region(["northwestern_federal_district", "kaliningrad_oblast"], 103906, "RU-KGD", proj=32634)
russia_region(["northwestern_federal_district", "karelia_republic"], 393980, "RU-KR", proj=32636)
russia_region(["northwestern_federal_district", "komi_republic"], 115136, "RU-KO", proj=32640)
russia_region(["northwestern_federal_district", "leningrad_oblast"], 176095, "RU-LEN", proj=32636)
russia_region(["northwestern_federal_district", "murmansk_oblast"], 2099216, "RU-MUR", proj=32636)
russia_region(["northwestern_federal_district", "nenets_autonomous_okrug"], 274048, "RU-NEN", proj=32639)
russia_region(["northwestern_federal_district", "novgorod_oblast"], 89331, "RU-NGR", proj=32636)
russia_region(["northwestern_federal_district", "pskov_oblast"], 155262, "RU-PSK", proj=32636)
russia_region(["northwestern_federal_district", "saint_petersburg"], 337422, "RU-SPE", proj=32636)
russia_region(["northwestern_federal_district", "vologda_oblast"], 115106, "RU-VLG", proj=32637)
russia_region(["siberian_federal_district", "altai_krai"], 144764, "RU-ALT", proj=32644)
russia_region(["siberian_federal_district", "altai_republic"], 145194, "RU-AL", proj=32645)
russia_region(["siberian_federal_district", "buryatia_republic"], 145729, "RU-BU", proj=32647)
russia_region(["siberian_federal_district", "irkutsk_oblast"], 145454, "RU-IRK", proj=32648)
russia_region(["siberian_federal_district", "kemerovo_oblast"], 144763, "RU-KEM", proj=32645)
russia_region(["siberian_federal_district", "khakassia_republic"], 190911, "RU-KK", proj=32646)
russia_region(["siberian_federal_district", "krasnoyarsk_krai"], 190090, "RU-KYA", proj=32646)
russia_region(["siberian_federal_district", "novosibirsk_oblast"], 140294, "RU-NVS", proj=32644)
russia_region(["siberian_federal_district", "omsk_oblast"], 140292, "RU-OMS", proj=32643)
russia_region(["siberian_federal_district", "tomsk_oblast"], 140295, "RU-TOM", proj=32644)
russia_region(["siberian_federal_district", "tuva_republic"], 145195, "RU-TY", proj=32646)
russia_region(["siberian_federal_district", "zabaykalsky_krai"], 145730, "RU-ZAB", proj=32650)
russia_region(["southern_federal_district", "crimea_republic"], 3795586, "RU-CR", proj=32636)
russia_region(["southern_federal_district", "adygea_republic"], 253256, "RU-AD", proj=32637)
russia_region(["southern_federal_district", "astrakhan_oblast"], 112819, "RU-AST", proj=32638)
russia_region(["southern_federal_district", "kalmykia_republic"], 108083, "RU-KL", proj=32638)
russia_region(["southern_federal_district", "krasnodar_krai"], 108082, "RU-KDA", proj=32637)
russia_region(["southern_federal_district", "rostov_oblast"], 85606, "RU-ROS", proj=32637)
russia_region(["southern_federal_district", "sevastopol"], 1574364, "RU", proj=32636)
russia_region(["southern_federal_district", "volgograd_oblast"], 77665, "RU-VGG", proj=32638)
russia_region(["ural_federal_district", "chelyabinsk_oblast"], 77687, "RU-CHE", proj=32641)
russia_region(["ural_federal_district", "khanty_mansi_autonomous_okrug"], 140296, "RU-KHM", proj=32642)
russia_region(["ural_federal_district", "kurgan_oblast"], 140290, "RU-KGN", proj=32641)
russia_region(["ural_federal_district", "sverdlovsk_oblast"], 79379, "RU-SVE", proj=32641)
russia_region(["ural_federal_district", "tyumen_oblast"], 140291, "RU-TYU", proj=32642)
russia_region(["ural_federal_district", "yamalo_nenets_autonomous_okrug"], 191706, "RU-YAN", proj=32643)
russia_region(["volga_federal_district", "bashkortostan_republic"], 77677, "RU-BA", proj=32640)
russia_region(["volga_federal_district", "chuvash_republic"], 80513, "RU-CU", proj=32639)
russia_region(["volga_federal_district", "kirov_oblast"], 115100, "RU-KIR", proj=32639)
russia_region(["volga_federal_district", "mari_el_republic"], 115114, "RU-ME", proj=32639)
russia_region(["volga_federal_district", "mordovia_republic"], 72196, "RU-MO", proj=32638)
russia_region(["volga_federal_district", "nizhny_novgorod_oblast"], 72195, "RU-NIZ", proj=32638)
russia_region(["volga_federal_district", "orenburg_oblast"], 77669, "RU-ORE", proj=32640)
russia_region(["volga_federal_district", "penza_oblast"], 72182, "RU-PNZ", proj=32638)
russia_region(["volga_federal_district", "perm_krai"], 115135, "RU-PER", proj=32640)
russia_region(["volga_federal_district", "samara_oblast"], 72194, "RU-SAM", proj=32639)
russia_region(["volga_federal_district", "saratov_oblast"], 72193, "RU-SAR", proj=32638)
russia_region(["volga_federal_district", "tatarstan_republic"], 79374, "RU-TA", proj=32639)
russia_region(["volga_federal_district", "udmurt_republic"], 115134, "RU-UD", proj=32639)
russia_region(["volga_federal_district", "ulyanovsk_oblast"], 72192, "RU-ULY", proj=32639)
#########################################################################
japan_region = gen_country('asia', 'japan', download_repo=OSMFR, country_code='JP', language='ja', proj=32654, driving_side='left')
japan_region("hokkaido", 3795658, proj=32654)
japan_region("tohoku", 1835900, proj=32654)
japan_region("kanto", 1803923, proj=32654)
japan_region("chubu", 532759, proj=32654)
japan_region("kansai", 357113, proj=32653)
japan_region("chugoku", 1842114, proj=32653)
japan_region("shikoku", 1847663, proj=32653)
japan_region("kyushu", 1842245, proj=32652)
#########################################################################
china_province = gen_country('asia', 'china', download_repo=OSMFR, language='zh')
china_province("anhui", 913011, "CN-34", proj=32650)
china_province("fujian", 553303, "CN-35", proj=32650)
china_province("gansu", 153314, "CN-62", proj=32648)
china_province("guangdong", 911844, "CN-44", proj=32649)
china_province("guizhou", 286937, "CN-52", proj=32648)
china_province("hainan", 2128285, "CN-46", proj=32649)
china_province("hebei", 912998, "CN-13", proj=32650)
china_province("heilongjiang", 199073, "CN-23", proj=32652)
china_province("henan", 407492, "CN-41", proj=32650)
china_province("hubei", 913106, "CN-42", proj=32649)
china_province("hunan", 913073, "CN-43", proj=32649)
china_province("jiangsu", 913012, "CN-32", proj=32650)
china_province("jiangxi", 913109, "CN-36", proj=32650)
china_province("jilin", 198590, "CN-22", proj=32652)
china_province("liaoning", 912942, "CN-21", proj=32651)
china_province("qinghai", 153269, "CN-63", proj=32647)
china_province("shaanxi", 913100, "CN-61", proj=32649)
china_province("shandong", 913006, "CN-37", proj=32650)
china_province("shanxi", 913105, "CN-14", proj=32650)
china_province("sichuan", 913068, "CN-51", proj=32648)
china_province("yunnan", 913094, "CN-53", proj=32648)
china_province("zhejiang", 553302, "CN-33", proj=32651)
china_province("tibet", 153292, "CN-54", proj=32645)
china_province("xinjiang", 153310, "CN-65", proj=32645)
china_province("guangxi", 286342, "CN-45", proj=32649)
china_province("inner_mongolia", 161349, "CN-15", proj=32650)
china_province("ningxia", 913101, "CN-64", proj=32648)
china_province("beijing", 912940, "CN-11", proj=32650)
china_province("tianjin", 912999, "CN-12", proj=32650)
china_province("shanghai", 913067, "CN-31", proj=32651)
china_province("chongqing", 913069, "CN-50", proj=32649)
china_province("hong_kong", 913110, "CN-91", proj=32650, language=["zh", "en"], driving_side="left")
china_province("macau", 1867188, "CN-92", proj=32649, language=["zh", "pt"])
#########################################################################
ogf = default_simple("ogf", None, {"project": "opengeofiction"},
download_url=u"http://opengeofiction.net/backup/ogf_latest.osm.pbf")
del(ogf.analyser["osmosis_soundex"])
###########################################################################
# Merge analysers are uploaded to a different frontend server
for country in config.keys():
config[country].analyser_updt_url = {}
# NOTE: commented, as opendata.osmose causes timeout issues
# for k in config[country].analyser.keys():
# if k.startswith("merge_"):
# config[country].analyser_updt_url[k] = [modules.config.url_frontend_update, modules.config.url_frontend_opendata_update]
#########################################################################
# Passwords are stored in separate file, not on git repository
import osmose_config_password
osmose_config_password.set_password(config)
###########################################################################
if __name__ == "__main__":
import json
j = []
for (k,v) in config.items():
j.append(dict(v.__dict__, **{"country": k}))
print(json.dumps(j, indent=4))
|
josejamilena/virtualagc | refs/heads/trunk | Tools/pagecounter.py | 10 | #!/usr/bin/env python
# Copyright 2010 Jim lawton <jim dot lawton at gmail dot com>
#
# This file is part of yaAGC.
#
# yaAGC is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# yaAGC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with yaAGC; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Python script to check the page meta-comments in AGC source modules.
# Looks for all .agc files in the current directory, and searches them for '## Page'
# directives. It checks the directives to verify that there are no incorrect page numbers
# (missing, extra, duplicated, out of sequence).
#
# While the page numbers do not form part of the original AGC source, they are very important
# in the code conversion process, and in the debugging of errors in the rope binary files.
import sys
import glob
def main():
sfiles = glob.glob('*.agc')
if len(sfiles) == 0:
print >>sys.stderr, "Error, no AGC source files found!"
sys.exit(1)
errors = 0
for sfile in sfiles:
if sfile == "Template.agc":
continue
page = 0
linenum = 0
start = True
for line in open(sfile):
linenum += 1
sline = line.strip()
if not sline.startswith('#'):
continue
if not "Page" in sline or ("Page" in sline and ("scans" in sline or "Pages" in sline)):
continue
fields = sline
if sline.startswith('#Page'):
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, sline)
errors += 1
fields = sline[1:]
elif sline.startswith('# Page'):
fields = sline[2:]
else:
continue
try:
if fields[4] == ' ':
pagenum = fields.split()[1]
else:
pagenum = fields[4:]
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, sline)
except:
print "Error processing line: \"%s\"" % (sline)
raise
if pagenum.isdigit():
pagenum = int(pagenum)
if start:
page = pagenum
start = False
else:
page += 1
if page != pagenum:
print >>sys.stderr, "%s, line %d: page number mismatch, expected %d, got %d" % (sfile, linenum, page, pagenum)
errors += 1
else:
print >>sys.stderr, "%s, line %d: invalid page number \"%s\"" % (sfile, linenum, pagenum)
errors += 1
if errors != 0:
print >>sys.stderr, "%d errors found" % (errors)
else:
print "No errors found"
if __name__=="__main__":
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.