repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
40423106/2016fallcadp_ag4
|
publishconf.py
|
251
|
1705
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主.
# 將所有靜態 html 檔案移到 blog 子目錄
SITEURL = 'blog'
# 此設定用於將資料送到 gh-pages, 因此使用絕對 URL 設定 (嘗試用 相對目錄設定)
RELATIVE_URLS = True
# 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme
THEME = 'theme/pelican-bootstrap3'
#BOOTSTRAP_THEME = 'readable'
#BOOTSTRAP_THEME = 'readable-old'
BOOTSTRAP_THEME = 'united'
#PYGMENTS_STYLE = 'paraiso-drak'
#PYGMENTS_STYLE = 'fruity'
# 為了同時兼容 render_math, 必須放棄 fruity
PYGMENTS_STYLE = 'monokai'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
# 必須使用各網誌用戶各自 disqus 討論版設定
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
# 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定
DEFAULT_DATE = 'fs'
# 遠端的 code hightlight
MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)']
# 若要依照日期存檔呼叫
#ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
#ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}/index.html'
SHOW_ARTICLE_AUTHOR = True
|
agpl-3.0
| -4,302,581,132,641,894,000 | -5,525,850,524,078,890,000 | 27.26 | 86 | 0.717622 | false |
citrix-openstack-build/swift
|
swift/container/sync.py
|
2
|
19536
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from time import ctime, time
from random import random, shuffle
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.container import server as container_server
from swiftclient import ClientException, delete_object, put_object, \
quote
from swift.container.backend import ContainerBroker
from swift.common.direct_client import direct_get_object
from swift.common.ring import Ring
from swift.common.utils import get_logger, config_true_value, \
validate_sync_to, whataremyips, FileLikeIter
from swift.common.ondisk import audit_location_generator, hash_path
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
.. note::
Container sync will sync object POSTs only if the proxy server is set
to use "object_post_as_copy = true" which is the default. So-called
fast object posts, "object_post_as_copy = false" do not update the
container listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
:param object_ring: If None, the <swift_dir>/object.ring.gz will be loaded.
This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, object_ring=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a conatiner sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: The list of hosts we're allowed to send syncs to.
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.proxy = conf.get('sync_proxy')
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers that didn't have sync turned on.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Time of last stats report.
self.reported = time()
swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
#: swift.common.ring.Ring for locating objects.
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self._myips = whataremyips()
self._myport = int(conf.get('bind_port', 6001))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
def run_forever(self):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR,
'.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
all_locs = audit_location_generator(self.devices,
container_server.DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
info = broker.get_info()
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if node['ip'] in self._myips and node['port'] == self._myport:
break
else:
return
if not broker.is_deleted():
sync_to = None
sync_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.iteritems():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
sync_key = value
if not sync_to or not sync_key:
self.container_skips += 1
self.logger.increment('skips')
return
sync_to = sync_to.rstrip('/')
err = validate_sync_to(sync_to, self.allowed_sync_hosts)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': broker.db_file,
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
stop_at = time() + self.container_time
next_sync_point = None
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.) and will skip
# problematic rows as needed in case of faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the nodes
# didn't succeed.
if not self.container_sync_row(row, sync_to, sync_key,
broker, info):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None, next_sync_point)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed or in case it failed to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(row, sync_to, sync_key,
broker, info)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout) as err:
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker.db_file if broker else path)
def container_sync_row(self, row, sync_to, sync_key, broker, info):
"""
Sends the update the row indicates to the sync_to container.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param sync_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:returns: True on success
"""
try:
start_time = time()
if row['deleted']:
try:
delete_object(sync_to, name=row['name'],
headers={'x-timestamp': row['created_at'],
'x-container-sync-key': sync_key},
proxy=self.proxy)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
part, nodes = self.object_ring.get_nodes(
info['account'], info['container'],
row['name'])
shuffle(nodes)
exc = None
looking_for_timestamp = float(row['created_at'])
timestamp = -1
headers = body = None
for node in nodes:
try:
these_headers, this_body = direct_get_object(
node, part, info['account'], info['container'],
row['name'], resp_chunk_size=65536)
this_timestamp = float(these_headers['x-timestamp'])
if this_timestamp > timestamp:
timestamp = this_timestamp
headers = these_headers
body = this_body
except ClientException as err:
# If any errors are not 404, make sure we report the
# non-404 one. We don't want to mistakenly assume the
# object no longer exists just because one says so and
# the others errored for some other reason.
if not exc or exc.http_status == HTTP_NOT_FOUND:
exc = err
except (Exception, Timeout) as err:
exc = err
if timestamp < looking_for_timestamp:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: %(node)r '
'%(account)r %(container)r %(object)r'),
{'node': node, 'part': part,
'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
headers['x-timestamp'] = row['created_at']
headers['x-container-sync-key'] = sync_key
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.proxy)
self.container_puts += 1
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': broker.db_file, 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout) as err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': broker.db_file, 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
|
apache-2.0
| 856,583,146,513,120,400 | 3,739,829,675,298,686,500 | 46.417476 | 79 | 0.532658 | false |
scieloorg/scielo-manager
|
scielomanager/editorialmanager/notifications.py
|
3
|
3509
|
# coding: utf-8
import logging
from django.core.exceptions import ObjectDoesNotExist
from scielomanager.tools import get_users_by_group_by_collections, user_receive_emails
from scielomanager import notifications
logger = logging.getLogger(__name__)
class IssueBoardMessage(notifications.Message):
EMAIL_DATA_BY_ACTION = {
'issue_add_no_replicated_board': {
'subject_sufix': "Issue Board can't be replicated",
'template_path': 'email/issue_add_no_replicated_board.txt',
},
'issue_add_replicated_board': {
'subject_sufix': "Issue has a new replicated board",
'template_path': 'email/issue_add_replicated_board.txt',
},
}
def set_recipients(self, issue):
editor = getattr(issue.journal, 'editor', None)
if editor:
if user_receive_emails(editor):
self.recipients = [editor.email, ]
else:
logger.info("[IssueBoardMessage.set_recipients] editor (user.pk: %s) does not have a profile or decides to not receive emails." % editor.pk)
else:
logger.error("[IssueBoardMessage.set_recipients] Can't prepare a message, issue.journal.editor is None or empty. Issue pk == %s" % issue.pk)
class BoardMembersMessage(notifications.Message):
EMAIL_DATA_BY_ACTION = {
'board_add_member': {
'subject_sufix': "Member of the journal board, was added",
'template_path': 'email/board_add_member.txt',
},
'board_edit_member': {
'subject_sufix': "Member of the journal board, was edited",
'template_path': 'email/board_edit_member.txt',
},
'board_delete_member': {
'subject_sufix': "Member of the journal board, was deleted",
'template_path': 'email/board_delete_member.txt',
}
}
def set_recipients(self):
""" emails must be sent as BCC """
self.recipients = []
def set_bcc_recipients(self, member):
""" recipients must belong to the same collection as member """
collections_of_board_member = member.board.issue.journal.collections.all()
if collections_of_board_member:
librarians = get_users_by_group_by_collections('Librarian', collections_of_board_member)
else:
logger.error("[BoardMembersMessage.set_bcc_recipients] Can't define the collection of member (pk: %s), to filter bcc_recipients" % member.pk)
return
if librarians:
filtered_librarians = [librarian for librarian in librarians if user_receive_emails(librarian)]
self.bcc_recipients = map(lambda u: u.email, filtered_librarians)
else:
logger.error("[BoardMembersMessage.set_bcc_recipients] Can't prepare a message, Can't retrieve a list of Librarian Users.")
def issue_board_replica(issue, action):
message = IssueBoardMessage(action=action,)
message.set_recipients(issue)
extra_context = {'issue': issue,}
message.render_body(extra_context)
return message.send_mail()
def board_members_send_email_by_action(member, user, audit_log_msg, action):
message = BoardMembersMessage(action=action)
message.set_recipients()
message.set_bcc_recipients(member)
extra_context = {
'user': user,
'member': member,
'issue': member.board.issue,
'message': audit_log_msg,
}
message.render_body(extra_context)
return message.send_mail()
|
bsd-2-clause
| 4,689,060,043,395,182,000 | -5,309,747,829,527,172,000 | 36.731183 | 156 | 0.639783 | false |
online-behaviour/machine-learning
|
getTweetText.py
|
1
|
1768
|
#!/usr/bin/python3 -W all
"""
getTweetText.py: extract tweet text from json file
usage: getTweetText.py < file
20170418 erikt(at)xs4all.nl
"""
import csv
import json
import re
import sys
# command name for error messages
COMMAND = sys.argv[0]
patternNewline = re.compile("\n")
# open csv output
with sys.stdout as csvfile:
outFile = csv.writer(csvfile,delimiter=",",quotechar='"')
# repeat for each input line
for line in sys.stdin:
# convert the line to a json dictionary
jsonLine = json.loads(line)
# test for presence of required fields
if not "id_str" in jsonLine: sys.exit(COMMAND+" missing id_str field")
if not "text" in jsonLine: sys.exit(COMMAND+" missing text field")
if not "user" in jsonLine: sys.exit(COMMAND+" missing user field")
if not "screen_name" in jsonLine["user"]:
sys.exit(COMMAND+" missing screen_name field")
if not "created_at" in jsonLine["user"]:
sys.exit(COMMAND+" missing created_at field")
if not "in_reply_to_status_id_str" in jsonLine:
sys.exit(COMMAND+" missing in_reply_to_status_id_str field")
# print the text in csv format
thisId = jsonLine["id_str"]
replyId = jsonLine["in_reply_to_status_id_str"]
if replyId == None and "retweeted_status" in jsonLine and \
"in_reply_to_status_id_str" in jsonLine["retweeted_status"]:
replyId = jsonLine["retweeted_status"]["in_reply_to_status_id_str"]
screenName = jsonLine["user"]["screen_name"]
date = jsonLine["created_at"]
text = jsonLine["text"]
text = patternNewline.sub(" ",text)
outFile.writerow([thisId,replyId,date,screenName,text])
csvfile.close()
|
apache-2.0
| 7,191,420,205,995,813,000 | 8,027,749,950,706,194,000 | 38.288889 | 79 | 0.639706 | false |
jlegendary/scikit-learn
|
sklearn/decomposition/tests/test_dict_learning.py
|
47
|
8095
|
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
|
bsd-3-clause
| 7,584,912,503,316,812,000 | 7,304,979,907,979,227,000 | 36.304147 | 79 | 0.633354 | false |
kiritoe/pokeapi
|
data/v2/build.py
|
7
|
51228
|
# To build out the data you'll need to jump into the Django shell
#
# $ python manage.py shell
#
# and run the build script with
#
# $ execfile('data/v2/build.py')
#
# Each time the build script is run it will iterate over each table in the database,
# wipe it and rewrite each row using the data found in data/v2/csv.
# If you don't need all of the data just go into data/v2/build.py and
# comment out everything but what you need to build the tables you're looking for.
# This might be useful because some of the csv files are massive
# (pokemon_moves expecially) and it can take about 30 minutes to build everything.
import csv
import os
from django.db import migrations, connection
from pokemon_v2.models import *
data_location = 'data/v2/csv/'
db_cursor = connection.cursor()
db_vendor = connection.vendor
def loadData(fileName):
return csv.reader(open(data_location + fileName, 'rb'), delimiter=',')
def clearTable(model):
table_name = model._meta.db_table
model.objects.all().delete()
print 'building ' + table_name
# Reset DB auto increments to start at 1
if db_vendor == 'sqlite':
db_cursor.execute("DELETE FROM sqlite_sequence WHERE name = " + "'" + table_name + "'" )
else:
db_cursor.execute("SELECT setval(pg_get_serial_sequence(" + "'" + table_name + "'" + ",'id'), 1, false);")
##############
# LANGUAGE #
##############
clearTable(Language)
data = loadData('languages.csv')
for index, info in enumerate(data):
if index > 0:
language = Language (
id = int(info[0]),
iso639 = info[1],
iso3166 = info[2],
name = info[3],
official = bool(info[4]),
order = info[5],
)
language.save()
clearTable(LanguageName)
data = loadData('language_names.csv')
for index, info in enumerate(data):
if index > 0:
languageName = LanguageName (
language = Language.objects.get(pk = int(info[0])),
local_language_id = int(info[1]),
name = info[2]
)
languageName.save()
############
# REGION #
############
clearTable(Region)
data = loadData('regions.csv')
for index, info in enumerate(data):
if index > 0:
model = Region (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(RegionName)
data = loadData('region_names.csv')
for index, info in enumerate(data):
if index > 0:
model = RegionName (
region = Region.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
################
# GENERATION #
################
clearTable(Generation)
data = loadData('generations.csv')
for index, info in enumerate(data):
if index > 0:
model = Generation (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(GenerationName)
data = loadData('generation_names.csv')
for index, info in enumerate(data):
if index > 0:
model = GenerationName (
generation = Generation.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# VERSION #
#############
clearTable(VersionGroup)
data = loadData('version_groups.csv')
for index, info in enumerate(data):
if index > 0:
versionGroup = VersionGroup (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
order = int(info[3])
)
versionGroup.save()
clearTable(VersionGroupRegion)
data = loadData('version_group_regions.csv')
for index, info in enumerate(data):
if index > 0:
versionGroupRegion = VersionGroupRegion (
version_group = VersionGroup.objects.get(pk = int(info[0])),
region = Region.objects.get(pk = int(info[1])),
)
versionGroupRegion.save()
clearTable(Version)
data = loadData('versions.csv')
for index, info in enumerate(data):
if index > 0:
version = Version (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
name = info[2]
)
version.save()
clearTable(VersionName)
data = loadData('version_names.csv')
for index, info in enumerate(data):
if index > 0:
versionName = VersionName (
version = Version.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
versionName.save()
##################
# DAMAGE CLASS #
##################
clearTable(MoveDamageClass)
data = loadData('move_damage_classes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveDamageClass (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveDamageClassDescription)
data = loadData('move_damage_class_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveDamageClassDescription (
move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
###########
# STATS #
###########
clearTable(Stat)
data = loadData('stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = Stat (
id = int(info[0]),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_battle_only = bool(info[3]),
game_index = int(info[4]) if info[4] else 0,
)
stat.save()
clearTable(StatName)
data = loadData('stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = StatName (
stat = Stat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
clearTable(PokeathlonStat)
data = loadData('pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
stat = PokeathlonStat (
id = int(info[0]),
name = info[1],
)
stat.save()
clearTable(PokeathlonStatName)
data = loadData('pokeathlon_stat_names.csv')
for index, info in enumerate(data):
if index > 0:
statName = PokeathlonStatName (
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
statName.save()
###############
# ABILITIES #
###############
clearTable(Ability)
data = loadData('abilities.csv')
for index, info in enumerate(data):
if index > 0:
ability = Ability (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
is_main_series = bool(info[3])
)
ability.save()
clearTable(AbilityName)
data = loadData('ability_names.csv')
for index, info in enumerate(data):
if index > 0:
abilityName = AbilityName (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
abilityName.save()
clearTable(AbilityDescription)
data = loadData('ability_prose.csv')
for index, info in enumerate(data):
if index > 0:
abilityDesc = AbilityDescription (
ability = Ability.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = info[2],
effect = info[3]
)
abilityDesc.save()
clearTable(AbilityFlavorText)
data = loadData('ability_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
abilityFlavorText = AbilityFlavorText (
ability = Ability.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
abilityFlavorText.save()
####################
# CHARACTERISTIC #
####################
clearTable(Characteristic)
data = loadData('characteristics.csv')
for index, info in enumerate(data):
if index > 0:
model = Characteristic (
id = int(info[0]),
stat = Stat.objects.get(pk = int(info[1])),
gene_mod_5 = int(info[2])
)
model.save()
clearTable(CharacteristicDescription)
data = loadData('characteristic_text.csv')
for index, info in enumerate(data):
if index > 0:
model = CharacteristicDescription (
characteristic = Characteristic.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
###############
# EGG GROUP #
###############
clearTable(EggGroup)
data = loadData('egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroup (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(EggGroupName)
data = loadData('egg_group_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EggGroupName (
egg_group = EggGroup.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#################
# GROWTH RATE #
#################
clearTable(GrowthRate)
data = loadData('growth_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRate (
id = int(info[0]),
name = info[1],
formula = info[2]
)
model.save()
clearTable(GrowthRateDescription)
data = loadData('growth_rate_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = GrowthRateDescription (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
clearTable(ItemPocket)
data = loadData('item_pockets.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocket (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(ItemPocketName)
data = loadData('item_pocket_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemPocketName (
item_pocket = ItemPocket.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(ItemFlingEffect)
data = loadData('item_fling_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffect (
id = int(info[0])
)
model.save()
clearTable(ItemFlingEffectDescription)
data = loadData('item_fling_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlingEffectDescription (
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = info[2]
)
model.save()
clearTable(ItemCategory)
data = loadData('item_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategory (
id = int(info[0]),
item_pocket = ItemPocket.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(ItemCategoryName)
data = loadData('item_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemCategoryName (
item_category = ItemCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(Item)
data = loadData('items.csv')
for index, info in enumerate(data):
if index > 0:
model = Item (
id = int(info[0]),
name = info[1],
item_category = ItemCategory.objects.get(pk = int(info[2])),
cost = int(info[3]),
fling_power = int(info[4]) if info[4] != '' else None,
item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[5])) if info[5] != '' else None
)
model.save()
clearTable(ItemName)
data = loadData('item_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemName (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(ItemDescription)
data = loadData('item_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemDescription (
item = Item.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
short_effect = info[2],
effect = info[3]
)
model.save()
clearTable(ItemGameIndex)
data = loadData('item_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemGameIndex (
item = Item.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clearTable(ItemFlavorText)
data = loadData('item_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlavorText (
item = Item.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clearTable(ItemFlag)
data = loadData('item_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlag (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(ItemFlagDescription)
data = loadData('item_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlagDescription (
item_flag = ItemFlag.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
clearTable(ItemFlagMap)
data = loadData('item_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlagMap (
item = Item.objects.get(pk = int(info[0])),
item_flag = ItemFlag.objects.get(pk = int(info[1]))
)
model.save()
clearTable(ItemFlagDescription)
data = loadData('item_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ItemFlagDescription (
item_flag = ItemFlag.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
###########
# TYPES #
###########
clearTable(Type)
data = loadData('types.csv')
for index, info in enumerate(data):
if index > 0:
type = Type (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[3])) if info[3] != '' else None
)
type.save()
clearTable(TypeName)
data = loadData('type_names.csv')
for index, info in enumerate(data):
if index > 0:
typeName = TypeName (
type = Type.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
typeName.save()
clearTable(TypeGameIndex)
data = loadData('type_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
typeGameIndex = TypeGameIndex (
type = Type.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
typeGameIndex.save()
clearTable(TypeEfficacy)
data = loadData('type_efficacy.csv')
for index, info in enumerate(data):
if index > 0:
typeEfficacy = TypeEfficacy (
damage_type_id = int(info[0]),
target_type_id = int(info[1]),
damage_factor = int(info[2])
)
typeEfficacy.save()
###########
# MOVES #
###########
clearTable(MoveEffect)
data = loadData('move_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffect (
id = int(info[0])
)
model.save()
clearTable(MoveEffectDescription)
data = loadData('move_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectDescription (
move_effect = MoveEffect.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[1])),
short_effect = info[2],
effect = info[3]
)
model.save()
clearTable(MoveEffectChange)
data = loadData('move_effect_changelog.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChange (
id = int(info[0]),
move_effect = MoveEffect.objects.get(pk = int(info[1])),
version_group = VersionGroup.objects.get(pk = int(info[2]))
)
model.save()
clearTable(MoveEffectChangeDescription)
data = loadData('move_effect_changelog_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveEffectChangeDescription (
move_effect_change = MoveEffectChange.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
effect = info[2]
)
model.save()
clearTable(MoveTarget)
data = loadData('move_targets.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveTarget (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveTargetDescription)
data = loadData('move_target_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveTargetDescription (
move_target = MoveTarget.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
clearTable(Move)
data = loadData('moves.csv')
for index, info in enumerate(data):
if index > 0:
model = Move (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
type = Type.objects.get(pk = int(info[3])),
power = int(info[4]) if info[4] != '' else None,
pp = int(info[5]) if info[5] != '' else None,
accuracy = int(info[6]) if info[6] != '' else None,
priority = int(info[7]) if info[7] != '' else None,
move_target = MoveTarget.objects.get(pk = int(info[8])),
move_damage_class = MoveDamageClass.objects.get(pk = int(info[9])),
move_effect = MoveEffect.objects.get(pk = int(info[10])),
move_effect_chance = int(info[11]) if info[11] != '' else None,
contest_type_id = int(info[12]) if info[12] != '' else None,
contest_effect_id = int(info[13]) if info[13] != '' else None,
super_contest_effect_id = int(info[14]) if info[14] != '' else None
)
model.save()
clearTable(MoveName)
data = loadData('move_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveName (
move = Move.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(MoveFlavorText)
data = loadData('move_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlavorText (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clearTable(MoveChange)
data = loadData('move_changelog.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveChange (
move = Move.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
type = Type.objects.get(pk = int(info[2])) if info[2] != '' else None,
power = int(info[3]) if info[3] != '' else None,
pp = int(info[4]) if info[4] != '' else None,
accuracy = int(info[5]) if info[5] != '' else None,
move_effect = MoveEffect.objects.get(pk = int(info[6])) if info[6] != '' else None,
move_effect_chance = int(info[7]) if info[7] != '' else None
)
model.save()
clearTable(MoveBattleStyle)
data = loadData('move_battle_styles.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyle (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveBattleStyleName)
data = loadData('move_battle_style_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveBattleStyleName (
move_battle_style = MoveBattleStyle.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(MoveFlag)
data = loadData('move_flags.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlag (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveFlagMap)
data = loadData('move_flag_map.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlagMap (
move = Move.objects.get(pk = int(info[0])),
move_flag = MoveFlag.objects.get(pk = int(info[1])),
)
model.save()
clearTable(MoveFlagDescription)
data = loadData('move_flag_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveFlagDescription (
move_flag = MoveFlag.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
clearTable(MoveMetaAilment)
data = loadData('move_meta_ailments.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilment (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveMetaAilmentName)
data = loadData('move_meta_ailment_names.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaAilmentName (
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(MoveMetaCategory)
data = loadData('move_meta_categories.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategory (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(MoveMetaCategoryDescription)
data = loadData('move_meta_category_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaCategoryDescription (
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
clearTable(MoveMeta)
data = loadData('move_meta.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMeta (
move = Move.objects.get(pk = int(info[0])),
move_meta_category = MoveMetaCategory.objects.get(pk = int(info[1])),
move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[2])),
min_hits = int(info[3]) if info[3] != '' else None,
max_hits = int(info[4]) if info[4] != '' else None,
min_turns = int(info[5]) if info[5] != '' else None,
max_turns = int(info[6]) if info[6] != '' else None,
drain = int(info[7]) if info[7] != '' else None,
healing = int(info[8]) if info[8] != '' else None,
crit_rate = int(info[9]) if info[9] != '' else None,
ailment_chance = int(info[10]) if info[10] != '' else None,
flinch_chance = int(info[11]) if info[11] != '' else None,
stat_chance = int(info[12]) if info[12] != '' else None
)
model.save()
clearTable(MoveMetaStatChange)
data = loadData('move_meta_stat_changes.csv')
for index, info in enumerate(data):
if index > 0:
model = MoveMetaStatChange (
move = Move.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
change = int(info[2])
)
model.save()
#############
# CONTEST #
#############
clearTable(ContestType)
data = loadData('contest_types.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestType (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(ContestTypeName)
data = loadData('contest_type_names.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestTypeName (
contest_type = ContestType.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
flavor = info[3],
color = info[4]
)
model.save()
clearTable(ContestEffect)
data = loadData('contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffect (
id = int(info[0]),
appeal = int(info[1]),
jam = int(info[2])
)
model.save()
clearTable(ContestEffectDescription)
data = loadData('contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestEffectDescription (
contest_effect = ContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2],
effect = info[3]
)
model.save()
clearTable(ContestCombo)
data = loadData('contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = ContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
clearTable(SuperContestEffect)
data = loadData('super_contest_effects.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffect (
id = int(info[0]),
appeal = int(info[1])
)
model.save()
clearTable(SuperContestEffectDescription)
data = loadData('super_contest_effect_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestEffectDescription (
super_contest_effect = SuperContestEffect.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
flavor_text = info[2]
)
model.save()
clearTable(SuperContestCombo)
data = loadData('super_contest_combos.csv')
for index, info in enumerate(data):
if index > 0:
model = SuperContestCombo (
first_move = Move.objects.get(pk = int(info[0])),
second_move = Move.objects.get(pk = int(info[1]))
)
model.save()
#############
# BERRIES #
#############
clearTable(BerryFirmness)
data = loadData('berry_firmness.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmness (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(BerryFirmnessName)
data = loadData('berry_firmness_names.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFirmnessName (
berry_firmness = BerryFirmness.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(Berry)
data = loadData('berries.csv')
for index, info in enumerate(data):
if index > 0:
model = Berry (
id = int(info[0]),
item = Item.objects.get(pk = int(info[1])),
berry_firmness = BerryFirmness.objects.get(pk = int(info[2])),
natural_gift_power = int(info[3]),
nature = None,
size = int(info[5]),
max_harvest = int(info[6]),
growth_time = int(info[7]),
soil_dryness = int(info[8]),
smoothness = int(info[9])
)
model.save()
clearTable(BerryFlavor)
data = loadData('berry_flavors.csv')
for index, info in enumerate(data):
if index > 0:
model = BerryFlavor (
berry = Berry.objects.get(pk = int(info[0])),
contest_type = ContestType.objects.get(pk = int(info[1])),
flavor = int(info[2])
)
model.save()
############
# NATURE #
############
clearTable(Nature)
data = loadData('natures.csv')
for index, info in enumerate(data):
if index > 0:
nature = Nature (
id = int(info[0]),
name = info[1],
decreased_stat_id = Stat.objects.get(pk = int(info[2])),
increased_stat_id = Stat.objects.get(pk = int(info[3])),
hates_flavor_id = BerryFlavor.objects.get(pk = int(info[4])),
likes_flavor_id = BerryFlavor.objects.get(pk = int(info[5])),
game_index = info[6]
)
nature.save()
#Berry/Nature associations
data = loadData('berries.csv')
for index, info in enumerate(data):
if index > 0:
berry = Berry.objects.get(pk = int(info[0]))
berry.nature = Nature.objects.get(pk = int(info[4]))
berry.save()
clearTable(NatureName)
data = loadData('nature_names.csv')
for index, info in enumerate(data):
if index > 0:
natureName = NatureName (
nature = Nature.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
natureName.save()
clearTable(NaturePokeathlonStat)
data = loadData('nature_pokeathlon_stats.csv')
for index, info in enumerate(data):
if index > 0:
naturePokeathlonStat = NaturePokeathlonStat (
nature = Nature.objects.get(pk = int(info[0])),
pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[1])),
max_change = info[2]
)
naturePokeathlonStat.save()
clearTable(NatureBattleStylePreference)
data = loadData('nature_battle_style_preferences.csv')
for index, info in enumerate(data):
if index > 0:
model = NatureBattleStylePreference (
nature = Nature.objects.get(pk = int(info[0])),
move_battle_style_id = int(info[1]),
low_hp_preference = info[2],
high_hp_preference = info[3]
)
model.save()
############
# GENDER #
############
clearTable(Gender)
data = loadData('genders.csv')
for index, info in enumerate(data):
if index > 0:
model = Gender (
id = int(info[0]),
name = info[1]
)
model.save()
################
# EXPERIENCE #
################
clearTable(Experience)
data = loadData('experience.csv')
for index, info in enumerate(data):
if index > 0:
model = Experience (
growth_rate = GrowthRate.objects.get(pk = int(info[0])),
level = int(info[1]),
experience = int(info[2])
)
model.save()
##############
# MACHINES #
##############
clearTable(Machine)
data = loadData('machines.csv')
for index, info in enumerate(data):
if index > 0:
model = Machine (
machine_number = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
move = Move.objects.get(pk = int(info[3])),
)
model.save()
###############
# EVOLUTION #
###############
clearTable(EvolutionChain)
data = loadData('evolution_chains.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionChain (
id = int(info[0]),
baby_evolution_item = Item.objects.get(pk = int(info[1])) if info[1] != '' else None,
)
model.save()
clearTable(EvolutionTrigger)
data = loadData('evolution_triggers.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTrigger (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(EvolutionTriggerName)
data = loadData('evolution_trigger_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EvolutionTriggerName (
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
#############
# POKEDEX #
#############
clearTable(Pokedex)
data = loadData('pokedexes.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokedex (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2],
is_main_series = bool(info[3])
)
model.save()
clearTable(PokedexDescription)
data = loadData('pokedex_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokedexDescription (
pokedex = Pokedex.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
clearTable(PokedexVersionGroup)
data = loadData('pokedex_version_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokedexVersionGroup (
pokedex = Pokedex.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1]))
)
model.save()
#############
# POKEMON #
#############
clearTable(PokemonColor)
data = loadData('pokemon_colors.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColor (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(PokemonColorName)
data = loadData('pokemon_color_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonColorName (
pokemon_color = PokemonColor.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(PokemonShape)
data = loadData('pokemon_shapes.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShape (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(PokemonShapeName)
data = loadData('pokemon_shape_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonShapeName (
pokemon_shape = PokemonShape.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
awesome_name = info[3]
)
model.save()
clearTable(PokemonHabitat)
data = loadData('pokemon_habitats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitat (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(PokemonSpecies)
data = loadData('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpecies (
id = int(info[0]),
name = info[1],
generation = Generation.objects.get(pk = int(info[2])),
evolves_from_species = None,
evolution_chain = EvolutionChain.objects.get(pk = int(info[4])),
pokemon_color = PokemonColor.objects.get(pk = int(info[5])),
pokemon_shape = PokemonShape.objects.get(pk = int(info[6])),
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[7])) if info[7] != '' else None,
gender_rate = int(info[8]),
capture_rate = int(info[9]),
base_happiness = int(info[10]),
is_baby = bool(info[11]),
hatch_counter = int(info[12]),
has_gender_differences = bool(info[13]),
growth_rate = GrowthRate.objects.get(pk = int(info[14])),
forms_switchable = bool(info[15]),
order = int(info[16])
)
model.save()
data = loadData('pokemon_species.csv')
for index, info in enumerate(data):
if index > 0:
evolves = PokemonSpecies.objects.get(pk = int(info[3])) if info[3] != '' else None
if evolves:
species = PokemonSpecies.objects.get(pk = int(info[0]))
species.evolves_from_species = evolves
species.save()
clearTable(PokemonSpeciesName)
data = loadData('pokemon_species_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesName (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
genus = info[3]
)
model.save()
clearTable(PokemonSpeciesDescription)
data = loadData('pokemon_species_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesDescription (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
description = info[2]
)
model.save()
clearTable(PokemonSpeciesFlavorText)
data = loadData('pokemon_species_flavor_text.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonSpeciesFlavorText (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
language = Language.objects.get(pk = int(info[2])),
flavor_text = info[3]
)
model.save()
clearTable(Pokemon)
data = loadData('pokemon.csv')
for index, info in enumerate(data):
if index > 0:
model = Pokemon (
id = int(info[0]),
name = info[1],
pokemon_species = PokemonSpecies.objects.get(pk = int(info[2])),
height = int(info[3]),
weight = int(info[4]),
base_experience = int(info[5]),
order = int(info[6]),
is_default = bool(info[7])
)
model.save()
clearTable(PokemonAbility)
data = loadData('pokemon_abilities.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonAbility (
pokemon = Pokemon.objects.get(pk = int(info[0])),
ability = Ability.objects.get(pk = int(info[1])),
is_hidden = bool(info[2]),
slot = int(info[3])
)
model.save()
clearTable(PokemonDexNumber)
data = loadData('pokemon_dex_numbers.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonDexNumber (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pokedex = Pokedex.objects.get(pk = int(info[1])),
pokedex_number = int(info[2])
)
model.save()
clearTable(PokemonEggGroup)
data = loadData('pokemon_egg_groups.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEggGroup (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
egg_group = EggGroup.objects.get(pk = int(info[1]))
)
model.save()
clearTable(PokemonEvolution)
data = loadData('pokemon_evolution.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonEvolution (
id = int(info[0]),
evolved_species = PokemonSpecies.objects.get(pk = int(info[1])),
evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[2])),
evolution_item = Item.objects.get(pk = int(info[3])) if info[3] != '' else None,
min_level = int(info[4]) if info[4] != '' else None,
gender = Gender.objects.get(pk = int(info[5])) if info[5] != '' else None,
location_id = int(info[6]) if info[6] != '' else None,
held_item = Item.objects.get(pk = int(info[7])) if info[7] != '' else None,
time_of_day = info[8],
known_move = Move.objects.get(pk = int(info[9])) if info[9] != '' else None,
known_move_type = Type.objects.get(pk = int(info[10])) if info[10] != '' else None,
min_happiness = int(info[11]) if info[11] != '' else None,
min_beauty = int(info[12]) if info[12] != '' else None,
min_affection = int(info[13]) if info[13] != '' else None,
relative_physical_stats = int(info[14]) if info[14] != '' else None,
party_species = PokemonSpecies.objects.get(pk = int(info[15])) if info[15] != '' else None,
party_type = Type.objects.get(pk = int(info[16])) if info[16] != '' else None,
trade_species = PokemonSpecies.objects.get(pk = int(info[17])) if info[17] != '' else None,
needs_overworld_rain = bool(info[18]),
turn_upside_down = bool(info[19])
)
model.save()
clearTable(PokemonForm)
data = loadData('pokemon_forms.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonForm (
id = int(info[0]),
name = info[1],
form_identifier = info[2],
pokemon = Pokemon.objects.get(pk = int(info[3])),
introduced_in_version_group = VersionGroup.objects.get(pk = int(info[4])),
is_default = bool(info[5]),
is_battle_only = bool(info[6]),
is_mega = bool(info[7]),
form_order = int(info[8]),
order = int(info[9])
)
model.save()
clearTable(PokemonFormName)
data = loadData('pokemon_form_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormName (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
pokemon_name = info[3]
)
model.save()
clearTable(PokemonFormGeneration)
data = loadData('pokemon_form_generations.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonFormGeneration (
pokemon_form = PokemonForm.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clearTable(PokemonGameIndex)
data = loadData('pokemon_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonGameIndex (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clearTable(PokemonGameIndex)
data = loadData('pokemon_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonGameIndex (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clearTable(PokemonHabitatName)
data = loadData('pokemon_habitat_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonHabitatName (
pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(PokemonItem)
data = loadData('pokemon_items.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonItem (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version = Version.objects.get(pk = int(info[1])),
item = Item.objects.get(pk = int(info[2])),
rarity = int(info[3])
)
model.save()
clearTable(PokemonMoveMethod)
data = loadData('pokemon_move_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonMoveMethod (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(PokemonMoveMethodName)
data = loadData('pokemon_move_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonMoveMethodName (
pokemon_move_method = PokemonMoveMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
description = info[3]
)
model.save()
clearTable(PokemonMove)
data = loadData('pokemon_moves.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonMove (
pokemon = Pokemon.objects.get(pk = int(info[0])),
version_group = VersionGroup.objects.get(pk = int(info[1])),
move = Move.objects.get(pk = int(info[2])),
pokemon_move_method = PokemonMoveMethod.objects.get(pk = int(info[3])),
level = int(info[4]),
order = int(info[5]) if info[5] != '' else None,
)
model.save()
clearTable(PokemonStat)
data = loadData('pokemon_stats.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonStat (
pokemon = Pokemon.objects.get(pk = int(info[0])),
stat = Stat.objects.get(pk = int(info[1])),
base_stat = int(info[2]),
effort = int(info[3])
)
model.save()
clearTable(PokemonType)
data = loadData('pokemon_types.csv')
for index, info in enumerate(data):
if index > 0:
model = PokemonType (
pokemon = Pokemon.objects.get(pk = int(info[0])),
type = Type.objects.get(pk = int(info[1])),
slot = int(info[2])
)
model.save()
##############
# ENCOUNTER #
##############
clearTable(Location)
data = loadData('locations.csv')
for index, info in enumerate(data):
if index > 0:
model = Location (
id = int(info[0]),
region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None,
name = info[2]
)
model.save()
clearTable(LocationName)
data = loadData('location_names.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationName (
location = Location.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(LocationGameIndex)
data = loadData('location_game_indices.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationGameIndex (
location = Location.objects.get(pk = int(info[0])),
generation = Generation.objects.get(pk = int(info[1])),
game_index = int(info[2])
)
model.save()
clearTable(LocationArea)
data = loadData('location_areas.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationArea (
id = int(info[0]),
location = Location.objects.get(pk = int(info[1])),
game_index = int(info[2]),
name = info[3]
)
model.save()
clearTable(LocationAreaName)
data = loadData('location_area_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaName (
location_area = LocationArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(LocationAreaEncounterRate)
data = loadData('location_area_encounter_rates.csv')
for index, info in enumerate(data):
if index > 0:
model = LocationAreaEncounterRate (
location_area = LocationArea.objects.get(pk = int(info[0])),
encounter_method = None,
version = Version.objects.get(pk = int(info[2])),
rate = int(info[3])
)
model.save()
###############
# ENCOUNTER #
###############
clearTable(EncounterMethod)
data = loadData('encounter_methods.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethod (
id = int(info[0]),
name = info[1],
order = int(info[2])
)
model.save()
clearTable(EncounterMethodName)
data = loadData('encounter_method_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterMethodName (
encounter_method = EncounterMethod.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(EncounterSlot)
data = loadData('encounter_slots.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterSlot (
id = int(info[0]),
version_group = VersionGroup.objects.get(pk = int(info[1])),
encounter_method = EncounterMethod.objects.get(pk = int(info[2])),
slot = int(info[3]) if info[3] != '' else None,
rarity = int(info[4])
)
model.save()
clearTable(EncounterCondition)
data = loadData('encounter_conditions.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterCondition (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(EncounterConditionName)
data = loadData('encounter_condition_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionName (
encounter_condition = EncounterCondition.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(Encounter)
data = loadData('encounters.csv')
for index, info in enumerate(data):
if index > 0:
model = Encounter (
id = int(info[0]),
version = Version.objects.get(pk = int(info[1])),
location_area = LocationArea.objects.get(pk = int(info[2])),
encounter_slot = EncounterSlot.objects.get(pk = int(info[3])),
pokemon = Pokemon.objects.get(pk = int(info[4])),
min_level = int(info[5]),
max_level = int(info[6])
)
model.save()
clearTable(EncounterConditionValue)
data = loadData('encounter_condition_values.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValue (
id = int(info[0]),
encounter_condition = EncounterCondition.objects.get(pk = int(info[1])),
name = info[2],
is_default = bool(info[3])
)
model.save()
clearTable(EncounterConditionValueName)
data = loadData('encounter_condition_value_prose.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueName (
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2],
)
model.save()
clearTable(EncounterConditionValueMap)
data = loadData('encounter_condition_value_map.csv')
for index, info in enumerate(data):
if index > 0:
model = EncounterConditionValueMap (
encounter = Encounter.objects.get(pk = int(info[0])),
encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[1]))
)
model.save()
#Location/Encounter associations
data = loadData('location_area_encounter_rates.csv')
for index, info in enumerate(data):
if index > 0:
laer = LocationAreaEncounterRate.objects.get(pk = int(info[0]))
laer.encounter_method = EncounterMethod.objects.get(pk = int(info[1]))
laer.save()
##############
# PAL PARK #
##############
clearTable(PalParkArea)
data = loadData('pal_park_areas.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkArea (
id = int(info[0]),
name = info[1]
)
model.save()
clearTable(PalParkAreaName)
data = loadData('pal_park_area_names.csv')
for index, info in enumerate(data):
if index > 0:
model = PalParkAreaName (
pal_park_area = PalParkArea.objects.get(pk = int(info[0])),
language = Language.objects.get(pk = int(info[1])),
name = info[2]
)
model.save()
clearTable(PalPark)
data = loadData('pal_park.csv')
for index, info in enumerate(data):
if index > 0:
model = PalPark (
pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])),
pal_park_area = PalParkArea.objects.get(pk = int(info[1])),
rate = int(info[2])
)
model.save()
|
bsd-3-clause
| -1,691,108,735,484,536,800 | 6,557,875,180,506,514,000 | 22.306642 | 110 | 0.603557 | false |
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py
|
63
|
5962
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_array_equal, assert_allclose, \
run_module_suite, assert_raises
from scipy.interpolate import griddata
class TestGriddata(object):
def test_fill_value(self):
x = [(0,0), (0,1), (1,0)]
y = [1, 2, 3]
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
assert_array_equal(yi, [-1., -1, 1])
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
assert_array_equal(yi, [np.nan, np.nan, 1])
def test_alternative_call(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
rescale=rescale)
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
def test_multivalue_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, x, method=method, rescale=rescale)
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
def test_multipoint_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert_equal(yi.shape, (5, 3), err_msg=msg)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
def test_complex_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 2j*y[::-1]
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert_equal(yi.shape, (5, 3), err_msg=msg)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
def test_1d(self):
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-14)
def test_1d_unsorted(self):
x = np.array([2.5, 1, 4.5, 5, 6, 3])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-10)
def test_square_rescale_manual(self):
points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double)
points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double)
values = np.array([1., 2., -3., 5., 9.], dtype=np.double)
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
np.linspace(0, 100, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
for method in ('nearest', 'linear', 'cubic'):
msg = method
zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
method=method)
zi_rescaled = griddata(points, values, xi, method=method,
rescale=True)
assert_allclose(zi, zi_rescaled, err_msg=msg,
atol=1e-12)
def test_xi_1d(self):
# Check that 1-D xi is interpreted as a coordinate
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 2j*y[::-1]
xi = np.array([0.5, 0.5])
for method in ('nearest', 'linear', 'cubic'):
p1 = griddata(x, y, xi, method=method)
p2 = griddata(x, y, xi[None,:], method=method)
assert_allclose(p1, p2, err_msg=method)
xi1 = np.array([0.5])
xi3 = np.array([0.5, 0.5, 0.5])
assert_raises(ValueError, griddata, x, y, xi1,
method=method)
assert_raises(ValueError, griddata, x, y, xi3,
method=method)
if __name__ == "__main__":
run_module_suite()
|
gpl-2.0
| -7,918,053,419,958,103,000 | 6,597,277,410,342,222,000 | 39.835616 | 94 | 0.472996 | false |
pyload/pyload
|
src/pyload/plugins/downloaders/SpeedyshareCom.py
|
2
|
1461
|
# -*- coding: utf-8 -*-
#
# Test links:
# http://speedy.sh/ep2qY/Zapp-Brannigan.jpg
import re
from ..base.simple_downloader import SimpleDownloader
class SpeedyshareCom(SimpleDownloader):
__name__ = "SpeedyshareCom"
__type__ = "downloader"
__version__ = "0.11"
__status__ = "testing"
__pattern__ = r"https?://(?:www\.)?(speedyshare\.com|speedy\.sh)/\w+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Speedyshare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "[email protected]")]
NAME_PATTERN = r"class=downloadfilename>(?P<N>.*)</span></td>"
SIZE_PATTERN = r"class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>"
OFFLINE_PATTERN = r"class=downloadfilenamenotfound>.*</span>"
LINK_FREE_PATTERN = r"<a href=\'(.*)\'><img src=/gf/slowdownload\.png alt=\'Slow Download\' border=0"
def setup(self):
self.multi_dl = False
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is None:
self.link = m.group(1)
|
agpl-3.0
| -7,909,895,523,224,376,000 | -2,679,820,942,046,227,000 | 31.466667 | 105 | 0.590691 | false |
knowmetools/km-api
|
km_api/know_me/migrations/0006_subscription.py
|
1
|
2495
|
# Generated by Django 2.0.6 on 2018-10-20 23:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import permission_utils.model_mixins
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("know_me", "0005_kmuser_is_legacy_user"),
]
operations = [
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"is_active",
models.BooleanField(
help_text="A boolean indicating if the subscription is active.",
verbose_name="is active",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time that the subscription instance was created.",
verbose_name="creation time",
),
),
(
"time_updated",
models.DateTimeField(
auto_now=True,
help_text="The time of the subscription's last update.",
verbose_name="last update time",
),
),
(
"user",
models.OneToOneField(
help_text="The user who has a Know Me subscription",
on_delete=django.db.models.deletion.CASCADE,
related_name="know_me_subscription",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
],
options={
"verbose_name": "Know Me subscription",
"verbose_name_plural": "Know Me subscriptions",
"ordering": ("time_created",),
},
bases=(
permission_utils.model_mixins.IsAuthenticatedMixin,
models.Model,
),
)
]
|
apache-2.0
| -8,573,148,578,647,658,000 | -703,974,708,912,788,100 | 33.178082 | 89 | 0.420441 | false |
yawnosnorous/python-for-android
|
python-modules/twisted/twisted/web/test/test_distrib.py
|
52
|
12485
|
# Copyright (c) 2008-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import log, filepath
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import http, distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class MySite(server.Site):
def stopFactory(self):
if hasattr(self, "logFile"):
if self.logFile != log.logfile:
self.logFile.close()
del self.logFile
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class DistribTest(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
http._logDateTimeStop()
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild("there", static.Data("root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild("here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
d = client.getPage("http://127.0.0.1:%d/here/there" % \
self.port2.getHost().port)
d.addCallback(self.failUnlessEqual, 'root')
return d
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{getPage} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
distribRoot = resource.Resource()
distribRoot.putChild("child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return client.getPage("http://%s:%s/child" % (
mainAddr.host, mainAddr.port), **kwargs)
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
class ReportRequestHeaders(resource.Resource):
def render(self, request):
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return ""
request = self._requestTest(
ReportRequestHeaders(), headers={'foo': 'bar'})
def cbRequested(result):
self.assertEquals(requestHeaders['Foo'], ['bar'])
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write('x' * SIZE_LIMIT + 'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return 'x' * SIZE_LIMIT + 'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1)
d.addCallback(cbRendered)
return d
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent("")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
|
apache-2.0
| -4,188,339,355,442,689,000 | 1,084,261,703,724,531,300 | 33.584488 | 83 | 0.620104 | false |
ZHAW-INES/rioxo-uClinux-dist
|
user/python/python-2.4.4/Lib/hotshot/stats.py
|
252
|
2582
|
"""Statistics analyzer for HotShot."""
import profile
import pstats
import hotshot.log
from hotshot.log import ENTER, EXIT
def load(filename):
return StatsLoader(filename).load()
class StatsLoader:
def __init__(self, logfn):
self._logfn = logfn
self._code = {}
self._stack = []
self.pop_frame = self._stack.pop
def load(self):
# The timer selected by the profiler should never be used, so make
# sure it doesn't work:
p = Profile()
p.get_time = _brokentimer
log = hotshot.log.LogReader(self._logfn)
taccum = 0
for event in log:
what, (filename, lineno, funcname), tdelta = event
if tdelta > 0:
taccum += tdelta
# We multiply taccum to convert from the microseconds we
# have to the seconds that the profile/pstats module work
# with; this allows the numbers to have some basis in
# reality (ignoring calibration issues for now).
if what == ENTER:
frame = self.new_frame(filename, lineno, funcname)
p.trace_dispatch_call(frame, taccum * .000001)
taccum = 0
elif what == EXIT:
frame = self.pop_frame()
p.trace_dispatch_return(frame, taccum * .000001)
taccum = 0
# no further work for line events
assert not self._stack
return pstats.Stats(p)
def new_frame(self, *args):
# args must be filename, firstlineno, funcname
# our code objects are cached since we don't need to create
# new ones every time
try:
code = self._code[args]
except KeyError:
code = FakeCode(*args)
self._code[args] = code
# frame objects are create fresh, since the back pointer will
# vary considerably
if self._stack:
back = self._stack[-1]
else:
back = None
frame = FakeFrame(code, back)
self._stack.append(frame)
return frame
class Profile(profile.Profile):
def simulate_cmd_complete(self):
pass
class FakeCode:
def __init__(self, filename, firstlineno, funcname):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = self.__name__ = funcname
class FakeFrame:
def __init__(self, code, back):
self.f_back = back
self.f_code = code
def _brokentimer():
raise RuntimeError, "this timer should not be called"
|
gpl-2.0
| 816,207,042,162,001,000 | 7,519,047,264,598,932,000 | 26.763441 | 74 | 0.572812 | false |
mrksu/flac2m
|
src/paths.py
|
1
|
4790
|
#!/usr/bin/env python3
from typing import List, Tuple
import os
from common import error_exit
MusicDir = Tuple[str, List[str]] # A tuple of dir name and all of its files
MusicMap = List[MusicDir] # List of dirs containing music
def find_music(roots: List[str]) -> MusicMap:
music_dirs = []
for root in roots:
# Use absolute paths otherwise first letter can be lost somewhere
root_abs = os.path.abspath(root)
for directory in os.walk(root_abs):
dir_name, cont_dirs, cont_files = directory
for f in cont_files:
if f.endswith(".flac"):
# print("Music found: {} in {}".format(f, dir_name))
music_dirs.append((dir_name, cont_files))
break
return music_dirs
# This function is similar to os.path.commonpath except for the 1-element case.
# I discovered os.path.common_path only after writing this, now too proud
# to replace it. It was a good excercise.
def greatest_common_dir(directories: List[str]) -> str:
"""
Compares directory paths in list and returns the part that all of them
have in common; i.e. ["/usr/bin", "/usr/share"] -> "/usr"
If there is only one directory, returns all except the innermost element;
i.e. ["/usr/share/man"] -> "/usr/share"
"""
# The list of directories should never be empty
assert len(directories) != 0, "No music directories to analyze"
# If there is only one directory in the list, return the innermost
# directory immediately containing music files
if len(directories) == 1:
split_dir = directories[0].split("/")
all_except_containing_dir = split_dir[:-1]
return "/".join(all_except_containing_dir)
split_dirs = [d.split("/") for d in directories]
common_elements = []
common = True
index = 0
while common:
first_dir = split_dirs[0]
path_element = first_dir[index]
for d in split_dirs:
if d[index] != path_element:
common = False
break
if common:
common_elements.append(path_element)
index += 1
common_path = "/".join(common_elements)
return common_path
def get_flac_files(all_files: List[str]) -> List[str]:
flacs = [f for f in all_files if f.endswith("flac")]
return flacs
def get_files_to_copy(all_files: List[str], c_template: List[str]) -> List[str]:
# Not a list comprehension here because this can potentially be faster
# considering there should only be a few covers / copy file templates
# and many actual files
to_copy = []
for c in c_template:
for f in all_files:
if f == c:
to_copy.append(f)
return to_copy
def subtract_common_path(full_path: str, common_path: str) -> str:
assert full_path.startswith(common_path), "No common path to subtract"
common_length = len(common_path)
subtracted = full_path[common_length+1:]
return subtracted
SubsPair = Tuple[str, str] # A pair of strings to use in substitution
def evaluate_substitution(subs: str) -> SubsPair:
split_subs = subs.split("/")
if len(split_subs) != 2:
error_exit("‘{}’: invalid substitution format. "\
"Expected ‘old/new’.".format(subs))
return (split_subs[0], split_subs[1])
InOutPair = Tuple[str, str] # A pair of input path and output path
InOutList = List[InOutPair] # A list of said in/out pairs
def create_in_out_paths(music_map: MusicMap, out_root: str,
subsf: SubsPair, subsd: SubsPair,
copy=False, c_template=None) -> InOutList:
all_dirs = [t[0] for t in music_map]
common_path = greatest_common_dir(all_dirs)
in_out_list = []
for music_dir in music_map:
dir_path, files = music_dir
if copy:
sel_files = get_files_to_copy(files, c_template)
else:
sel_files = get_flac_files(files)
unique_path = subtract_common_path(dir_path, common_path)
# TODO: process substitutions in a separate function beforehand
if subsd:
old, new = subsd
unique_path = unique_path.replace(old, new)
for f in sel_files:
if subsf:
old, new = subsf
f.replace(old, new)
in_path = os.path.join(dir_path, f)
out_path = os.path.join(os.path.abspath(out_root), unique_path, f)
in_out_list.append((in_path, out_path))
return in_out_list
def check_access(path, write=False):
acc = os.access
if write:
return acc(path, os.W_OK) and acc(path, os.X_OK)
else:
return acc(path, os.R_OK) and acc(path, os.X_OK)
|
gpl-3.0
| 4,532,384,910,369,622,500 | -8,939,680,754,970,952,000 | 29.653846 | 80 | 0.604559 | false |
julienbou/heroku-buildpack-serpan
|
vendor/pip-1.3.1/pip/vcs/subversion.py
|
63
|
10620
|
import os
import re
from pip.backwardcompat import urlparse
from pip import InstallationError
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
|
mit
| -7,216,872,319,163,334,000 | 2,665,252,319,033,148,400 | 38.044118 | 101 | 0.533804 | false |
opensourcechipspark/platform_external_chromium_org
|
third_party/tlslite/tlslite/utils/PyCrypto_RSAKey.py
|
361
|
1814
|
"""PyCrypto RSA implementation."""
from cryptomath import *
from RSAKey import *
from Python_RSAKey import Python_RSAKey
if pycryptoLoaded:
from Crypto.PublicKey import RSA
class PyCrypto_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if not d:
self.rsa = RSA.construct( (n, e) )
else:
self.rsa = RSA.construct( (n, e, d, p, q) )
def __getattr__(self, name):
return getattr(self.rsa, name)
def hasPrivateKey(self):
return self.rsa.has_private()
def hash(self):
return Python_RSAKey(self.n, self.e).hash()
def _rawPrivateKeyOp(self, m):
s = numberToString(m)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
c = stringToNumber(self.rsa.decrypt((s,)))
return c
def _rawPublicKeyOp(self, c):
s = numberToString(c)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
m = stringToNumber(self.rsa.encrypt(s, None)[0])
return m
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = PyCrypto_RSAKey()
def f(numBytes):
return bytesToString(getRandomBytes(numBytes))
key.rsa = RSA.generate(bits, f)
return key
generate = staticmethod(generate)
|
bsd-3-clause
| 147,062,048,152,731,780 | 6,680,632,450,047,965,000 | 28.737705 | 72 | 0.509372 | false |
freedesktop-unofficial-mirror/telepathy__telepathy-idle
|
tests/twisted/servicetest.py
|
2
|
21869
|
"""
Infrastructure code for testing connection managers.
"""
from twisted.internet import glib2reactor
from twisted.internet.protocol import Protocol, Factory, ClientFactory
glib2reactor.install()
import sys
import time
import os
import pprint
import unittest
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
from twisted.internet import reactor
import constants as cs
tp_name_prefix = cs.PREFIX
tp_path_prefix = '/' + cs.PREFIX.replace('.', '/')
class DictionarySupersetOf (object):
"""Utility class for expecting "a dictionary with at least these keys"."""
def __init__(self, dictionary):
self._dictionary = dictionary
def __repr__(self):
return "DictionarySupersetOf(%s)" % self._dictionary
def __eq__(self, other):
"""would like to just do:
return set(other.items()).issuperset(self._dictionary.items())
but it turns out that this doesn't work if you have another dict
nested in the values of your dicts"""
try:
for k,v in self._dictionary.items():
if k not in other or other[k] != v:
return False
return True
except TypeError: # other is not iterable
return False
class Event(object):
def __init__(self, type, **kw):
self.__dict__.update(kw)
self.type = type
(self.subqueue, self.subtype) = type.split ("-", 1)
def __str__(self):
return '\n'.join([ str(type(self)) ] + format_event(self))
def format_event(event):
ret = ['- type %s' % event.type]
for key in sorted(dir(event)):
if key != 'type' and not key.startswith('_'):
ret.append('- %s: %s' % (
key, pprint.pformat(getattr(event, key))))
if key == 'error':
ret.append('%s' % getattr(event, key))
return ret
class EventPattern:
def __init__(self, type, **properties):
self.type = type
self.predicate = None
if 'predicate' in properties:
self.predicate = properties['predicate']
del properties['predicate']
self.properties = properties
(self.subqueue, self.subtype) = type.split ("-", 1)
def __repr__(self):
properties = dict(self.properties)
if self.predicate is not None:
properties['predicate'] = self.predicate
return '%s(%r, **%r)' % (
self.__class__.__name__, self.type, properties)
def match(self, event):
if event.type != self.type:
return False
for key, value in self.properties.iteritems():
try:
if getattr(event, key) != value:
return False
except AttributeError:
return False
if self.predicate is None or self.predicate(event):
return True
return False
class TimeoutError(Exception):
pass
class ForbiddenEventOccurred(Exception):
def __init__(self, event):
Exception.__init__(self)
self.event = event
def __str__(self):
return '\n' + '\n'.join(format_event(self.event))
class BaseEventQueue:
"""Abstract event queue base class.
Implement the wait() method to have something that works.
"""
def __init__(self, timeout=None):
self.verbose = False
self.forbidden_events = set()
self.event_queues = {}
if timeout is None:
self.timeout = 5
else:
self.timeout = timeout
def log(self, s):
if self.verbose:
print s
def log_queues(self, queues):
self.log ("Waiting for event on: %s" % ", ".join(queues))
def log_event(self, event):
self.log('got event:')
if self.verbose:
map(self.log, format_event(event))
def forbid_events(self, patterns):
"""
Add patterns (an iterable of EventPattern) to the set of forbidden
events. If a forbidden event occurs during an expect or expect_many,
the test will fail.
"""
self.forbidden_events.update(set(patterns))
def unforbid_events(self, patterns):
"""
Remove 'patterns' (an iterable of EventPattern) from the set of
forbidden events. These must be the same EventPattern pointers that
were passed to forbid_events.
"""
self.forbidden_events.difference_update(set(patterns))
def unforbid_all(self):
"""
Remove all patterns from the set of forbidden events.
"""
self.forbidden_events.clear()
def _check_forbidden(self, event):
for e in self.forbidden_events:
if e.match(event):
raise ForbiddenEventOccurred(event)
def expect(self, type, **kw):
"""
Waits for an event matching the supplied pattern to occur, and returns
it. For example, to await a D-Bus signal with particular arguments:
e = q.expect('dbus-signal', signal='Badgers', args=["foo", 42])
"""
pattern = EventPattern(type, **kw)
t = time.time()
while True:
event = self.wait([pattern.subqueue])
self._check_forbidden(event)
if pattern.match(event):
self.log('handled, took %0.3f ms'
% ((time.time() - t) * 1000.0) )
self.log('')
return event
self.log('not handled')
self.log('')
def expect_many(self, *patterns):
"""
Waits for events matching all of the supplied EventPattern instances to
return, and returns a list of events in the same order as the patterns
they matched. After a pattern is successfully matched, it is not
considered for future events; if more than one unsatisfied pattern
matches an event, the first "wins".
Note that the expected events may occur in any order. If you're
expecting a series of events in a particular order, use repeated calls
to expect() instead.
This method is useful when you're awaiting a number of events which may
happen in any order. For instance, in telepathy-gabble, calling a D-Bus
method often causes a value to be returned immediately, as well as a
query to be sent to the server. Since these events may reach the test
in either order, the following is incorrect and will fail if the IQ
happens to reach the test first:
ret = q.expect('dbus-return', method='Foo')
query = q.expect('stream-iq', query_ns=ns.FOO)
The following would be correct:
ret, query = q.expect_many(
EventPattern('dbus-return', method='Foo'),
EventPattern('stream-iq', query_ns=ns.FOO),
)
"""
ret = [None] * len(patterns)
t = time.time()
while None in ret:
try:
queues = set()
for i, pattern in enumerate(patterns):
if ret[i] is None:
queues.add(pattern.subqueue)
event = self.wait(queues)
except TimeoutError:
self.log('timeout')
self.log('still expecting:')
for i, pattern in enumerate(patterns):
if ret[i] is None:
self.log(' - %r' % pattern)
raise
self._check_forbidden(event)
for i, pattern in enumerate(patterns):
if ret[i] is None and pattern.match(event):
self.log('handled, took %0.3f ms'
% ((time.time() - t) * 1000.0) )
self.log('')
ret[i] = event
break
else:
self.log('not handled')
self.log('')
return ret
def demand(self, type, **kw):
pattern = EventPattern(type, **kw)
event = self.wait([pattern.subqueue])
if pattern.match(event):
self.log('handled')
self.log('')
return event
self.log('not handled')
raise RuntimeError('expected %r, got %r' % (pattern, event))
def queues_available(self, queues):
if queues == None:
return self.event_queues.keys()
else:
available = self.event_queues.keys()
return filter(lambda x: x in available, queues)
def pop_next(self, queue):
events = self.event_queues[queue]
e = events.pop(0)
if not events:
self.event_queues.pop (queue)
return e
def append(self, event):
self.log ("Adding to queue")
self.log_event (event)
self.event_queues[event.subqueue] = \
self.event_queues.get(event.subqueue, []) + [event]
class IteratingEventQueue(BaseEventQueue):
"""Event queue that works by iterating the Twisted reactor."""
def __init__(self, timeout=None):
BaseEventQueue.__init__(self, timeout)
def wait(self, queues=None):
stop = [False]
def later():
stop[0] = True
delayed_call = reactor.callLater(self.timeout, later)
self.log_queues(queues)
qa = self.queues_available(queues)
while not qa and (not stop[0]):
reactor.iterate(0.01)
qa = self.queues_available(queues)
if qa:
delayed_call.cancel()
e = self.pop_next (qa[0])
self.log_event (e)
return e
else:
raise TimeoutError
class TestEventQueue(BaseEventQueue):
def __init__(self, events):
BaseEventQueue.__init__(self)
for e in events:
self.append (e)
def wait(self, queues = None):
qa = self.queues_available(queues)
if qa:
return self.pop_next (qa[0])
else:
raise TimeoutError
class EventQueueTest(unittest.TestCase):
def test_expect(self):
queue = TestEventQueue([Event('test-foo'), Event('test-bar')])
assert queue.expect('test-foo').type == 'test-foo'
assert queue.expect('test-bar').type == 'test-bar'
def test_expect_many(self):
queue = TestEventQueue([Event('test-foo'),
Event('test-bar')])
bar, foo = queue.expect_many(
EventPattern('test-bar'),
EventPattern('test-foo'))
assert bar.type == 'test-bar'
assert foo.type == 'test-foo'
def test_expect_many2(self):
# Test that events are only matched against patterns that haven't yet
# been matched. This tests a regression.
queue = TestEventQueue([Event('test-foo', x=1), Event('test-foo', x=2)])
foo1, foo2 = queue.expect_many(
EventPattern('test-foo'),
EventPattern('test-foo'))
assert foo1.type == 'test-foo' and foo1.x == 1
assert foo2.type == 'test-foo' and foo2.x == 2
def test_expect_queueing(self):
queue = TestEventQueue([Event('foo-test', x=1),
Event('foo-test', x=2)])
queue.append(Event('bar-test', x=1))
queue.append(Event('bar-test', x=2))
queue.append(Event('baz-test', x=1))
queue.append(Event('baz-test', x=2))
for x in xrange(1,2):
e = queue.expect ('baz-test')
assertEquals (x, e.x)
e = queue.expect ('bar-test')
assertEquals (x, e.x)
e = queue.expect ('foo-test')
assertEquals (x, e.x)
def test_timeout(self):
queue = TestEventQueue([])
self.assertRaises(TimeoutError, queue.expect, 'test-foo')
def test_demand(self):
queue = TestEventQueue([Event('test-foo'), Event('test-bar')])
foo = queue.demand('test-foo')
assert foo.type == 'test-foo'
def test_demand_fail(self):
queue = TestEventQueue([Event('test-foo'), Event('test-bar')])
self.assertRaises(RuntimeError, queue.demand, 'test-bar')
def unwrap(x):
"""Hack to unwrap D-Bus values, so that they're easier to read when
printed."""
if isinstance(x, list):
return map(unwrap, x)
if isinstance(x, tuple):
return tuple(map(unwrap, x))
if isinstance(x, dict):
return dict([(unwrap(k), unwrap(v)) for k, v in x.iteritems()])
if isinstance(x, dbus.Boolean):
return bool(x)
for t in [unicode, str, long, int, float]:
if isinstance(x, t):
return t(x)
return x
def call_async(test, proxy, method, *args, **kw):
"""Call a D-Bus method asynchronously and generate an event for the
resulting method return/error."""
def reply_func(*ret):
test.append(Event('dbus-return', method=method,
value=unwrap(ret)))
def error_func(err):
test.append(Event('dbus-error', method=method, error=err,
name=err.get_dbus_name(), message=str(err)))
method_proxy = getattr(proxy, method)
kw.update({'reply_handler': reply_func, 'error_handler': error_func})
method_proxy(*args, **kw)
def sync_dbus(bus, q, conn):
# Dummy D-Bus method call. We can't use DBus.Peer.Ping() because libdbus
# replies to that message immediately, rather than handing it up to
# dbus-glib and thence Gabble, which means that Ping()ing Gabble doesn't
# ensure that it's processed all D-Bus messages prior to our ping.
#
# This won't do the right thing unless the proxy has a unique name.
assert conn.object.bus_name.startswith(':')
root_object = bus.get_object(conn.object.bus_name, '/', introspect=False)
call_async(q,
dbus.Interface(root_object, cs.PREFIX + '.Tests'),
'DummySyncDBus')
q.expect('dbus-error', method='DummySyncDBus')
class ProxyWrapper:
def __init__(self, object, default, others={}):
self.object = object
self.default_interface = dbus.Interface(object, default)
self.Properties = dbus.Interface(object, dbus.PROPERTIES_IFACE)
self.TpProperties = \
dbus.Interface(object, tp_name_prefix + '.Properties')
self.interfaces = dict([
(name, dbus.Interface(object, iface))
for name, iface in others.iteritems()])
def __getattr__(self, name):
if name in self.interfaces:
return self.interfaces[name]
if name in self.object.__dict__:
return getattr(self.object, name)
return getattr(self.default_interface, name)
class ConnWrapper(ProxyWrapper):
def inspect_contact_sync(self, handle):
return self.inspect_contacts_sync([handle])[0]
def inspect_contacts_sync(self, handles):
h2asv = self.Contacts.GetContactAttributes(handles, [], True)
ret = []
for h in handles:
ret.append(h2asv[h][cs.ATTR_CONTACT_ID])
return ret
def get_contact_handle_sync(self, identifier):
return self.Contacts.GetContactByID(identifier, [])[0]
def get_contact_handles_sync(self, ids):
return [self.get_contact_handle_sync(i) for i in ids]
def wrap_connection(conn):
return ConnWrapper(conn, tp_name_prefix + '.Connection',
dict([
(name, tp_name_prefix + '.Connection.Interface.' + name)
for name in ['Aliasing', 'Avatars', 'Capabilities', 'Contacts',
'SimplePresence', 'Requests']] +
[('Peer', 'org.freedesktop.DBus.Peer'),
('ContactCapabilities', cs.CONN_IFACE_CONTACT_CAPS),
('ContactInfo', cs.CONN_IFACE_CONTACT_INFO),
('Location', cs.CONN_IFACE_LOCATION),
('Future', tp_name_prefix + '.Connection.FUTURE'),
('MailNotification', cs.CONN_IFACE_MAIL_NOTIFICATION),
('ContactList', cs.CONN_IFACE_CONTACT_LIST),
('ContactGroups', cs.CONN_IFACE_CONTACT_GROUPS),
('PowerSaving', cs.CONN_IFACE_POWER_SAVING),
('Addressing', cs.CONN_IFACE_ADDRESSING),
]))
def wrap_channel(chan, type_, extra=None):
interfaces = {
type_: tp_name_prefix + '.Channel.Type.' + type_,
'Group': tp_name_prefix + '.Channel.Interface.Group',
}
if extra:
interfaces.update(dict([
(name, tp_name_prefix + '.Channel.Interface.' + name)
for name in extra]))
return ProxyWrapper(chan, tp_name_prefix + '.Channel', interfaces)
def wrap_content(chan, extra=None):
interfaces = { }
if extra:
interfaces.update(dict([
(name, tp_name_prefix + '.Call1.Content.Interface.' + name)
for name in extra]))
return ProxyWrapper(chan, tp_name_prefix + '.Call1.Content', interfaces)
def make_connection(bus, event_func, name, proto, params):
cm = bus.get_object(
tp_name_prefix + '.ConnectionManager.%s' % name,
tp_path_prefix + '/ConnectionManager/%s' % name,
introspect=False)
cm_iface = dbus.Interface(cm, tp_name_prefix + '.ConnectionManager')
connection_name, connection_path = cm_iface.RequestConnection(
proto, dbus.Dictionary(params, signature='sv'))
conn = wrap_connection(bus.get_object(connection_name, connection_path))
return conn
def make_channel_proxy(conn, path, iface):
bus = dbus.SessionBus()
chan = bus.get_object(conn.object.bus_name, path)
chan = dbus.Interface(chan, tp_name_prefix + '.' + iface)
return chan
# block_reading can be used if the test want to choose when we start to read
# data from the socket.
class EventProtocol(Protocol):
def __init__(self, queue=None, block_reading=False):
self.queue = queue
self.block_reading = block_reading
def dataReceived(self, data):
if self.queue is not None:
self.queue.append(Event('socket-data', protocol=self,
data=data))
def sendData(self, data):
self.transport.write(data)
def connectionMade(self):
if self.block_reading:
self.transport.stopReading()
def connectionLost(self, reason=None):
if self.queue is not None:
self.queue.append(Event('socket-disconnected', protocol=self))
class EventProtocolFactory(Factory):
def __init__(self, queue, block_reading=False):
self.queue = queue
self.block_reading = block_reading
def _create_protocol(self):
return EventProtocol(self.queue, self.block_reading)
def buildProtocol(self, addr):
proto = self._create_protocol()
self.queue.append(Event('socket-connected', protocol=proto))
return proto
class EventProtocolClientFactory(EventProtocolFactory, ClientFactory):
pass
def watch_tube_signals(q, tube):
def got_signal_cb(*args, **kwargs):
q.append(Event('tube-signal',
path=kwargs['path'],
signal=kwargs['member'],
args=map(unwrap, args),
tube=tube))
tube.add_signal_receiver(got_signal_cb,
path_keyword='path', member_keyword='member',
byte_arrays=True)
def pretty(x):
return pprint.pformat(unwrap(x))
def assertEquals(expected, value):
if expected != value:
raise AssertionError(
"expected:\n%s\ngot:\n%s" % (pretty(expected), pretty(value)))
def assertSameSets(expected, value):
exp_set = set(expected)
val_set = set(value)
if exp_set != val_set:
raise AssertionError(
"expected contents:\n%s\ngot:\n%s" % (
pretty(exp_set), pretty(val_set)))
def assertNotEquals(expected, value):
if expected == value:
raise AssertionError(
"expected something other than:\n%s" % pretty(value))
def assertContains(element, value):
if element not in value:
raise AssertionError(
"expected:\n%s\nin:\n%s" % (pretty(element), pretty(value)))
def assertDoesNotContain(element, value):
if element in value:
raise AssertionError(
"expected:\n%s\nnot in:\n%s" % (pretty(element), pretty(value)))
def assertLength(length, value):
if len(value) != length:
raise AssertionError("expected: length %d, got length %d:\n%s" % (
length, len(value), pretty(value)))
def assertFlagsSet(flags, value):
masked = value & flags
if masked != flags:
raise AssertionError(
"expected flags %u, of which only %u are set in %u" % (
flags, masked, value))
def assertFlagsUnset(flags, value):
masked = value & flags
if masked != 0:
raise AssertionError(
"expected none of flags %u, but %u are set in %u" % (
flags, masked, value))
def assertDBusError(name, error):
if error.get_dbus_name() != name:
raise AssertionError(
"expected DBus error named:\n %s\ngot:\n %s\n(with message: %s)"
% (name, error.get_dbus_name(), error.message))
def install_colourer():
def red(s):
return '\x1b[31m%s\x1b[0m' % s
def green(s):
return '\x1b[32m%s\x1b[0m' % s
patterns = {
'handled': green,
'not handled': red,
}
class Colourer:
def __init__(self, fh, patterns):
self.fh = fh
self.patterns = patterns
def write(self, s):
for p, f in self.patterns.items():
if s.startswith(p):
self.fh.write(f(p) + s[len(p):])
return
self.fh.write(s)
sys.stdout = Colourer(sys.stdout, patterns)
return sys.stdout
# this is just to shut up unittest.
class DummyStream(object):
def write(self, s):
if 'CHECK_TWISTED_VERBOSE' in os.environ:
print s,
def flush(self):
pass
if __name__ == '__main__':
stream = DummyStream()
runner = unittest.TextTestRunner(stream=stream)
unittest.main(testRunner=runner)
|
lgpl-2.1
| -6,420,750,494,713,039,000 | 7,003,898,149,926,571,000 | 30.786337 | 80 | 0.585806 | false |
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/requests/packages/chardet/mbcharsetprober.py
|
2924
|
3268
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
mit
| 7,333,750,411,144,890,000 | 6,153,446,421,854,612,000 | 37 | 78 | 0.594859 | false |
Ryex/airtime
|
python_apps/media-monitor/mm2/tests/test_owners.py
|
12
|
1265
|
# -*- coding: utf-8 -*-
import unittest
from media.monitor import owners
class TestMMP(unittest.TestCase):
def setUp(self):
self.f = "test.mp3"
def test_has_owner(self):
owners.reset_owners()
o = 12345
self.assertTrue( owners.add_file_owner(self.f,o) )
self.assertTrue( owners.has_owner(self.f) )
def test_add_file_owner(self):
owners.reset_owners()
self.assertFalse( owners.add_file_owner('testing', -1) )
self.assertTrue( owners.add_file_owner(self.f, 123) )
self.assertTrue( owners.add_file_owner(self.f, 123) )
self.assertTrue( owners.add_file_owner(self.f, 456) )
def test_remove_file_owner(self):
owners.reset_owners()
self.assertTrue( owners.add_file_owner(self.f, 123) )
self.assertTrue( owners.remove_file_owner(self.f) )
self.assertFalse( owners.remove_file_owner(self.f) )
def test_get_owner(self):
owners.reset_owners()
self.assertTrue( owners.add_file_owner(self.f, 123) )
self.assertEqual( owners.get_owner(self.f), 123, "file is owned" )
self.assertEqual( owners.get_owner("random_stuff.txt"), -1,
"file is not owned" )
if __name__ == '__main__': unittest.main()
|
agpl-3.0
| 7,151,501,940,183,657,000 | -1,363,843,069,317,283,300 | 34.138889 | 74 | 0.621344 | false |
fnp/wolnelektury
|
src/social/models.py
|
1
|
6767
|
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from random import randint
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _, get_language
from catalogue.models import Book
from wolnelektury.utils import cached_render, clear_cached_renders
class BannerGroup(models.Model):
name = models.CharField(_('name'), max_length=255, unique=True)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
class Meta:
ordering = ('name',)
verbose_name = _('banner group')
verbose_name_plural = _('banner groups')
def __str__(self):
return self.name
def get_absolute_url(self):
"""This is used for testing."""
return "%s?banner_group=%d" % (reverse('main_page'), self.id)
def get_banner(self):
banners = self.cite_set.all()
count = banners.count()
if not count:
return None
return banners[randint(0, count-1)]
class Cite(models.Model):
book = models.ForeignKey(Book, models.CASCADE, verbose_name=_('book'), null=True, blank=True)
text = models.TextField(_('text'), blank=True)
small = models.BooleanField(_('small'), default=False, help_text=_('Make this cite display smaller.'))
vip = models.CharField(_('VIP'), max_length=128, null=True, blank=True)
link = models.URLField(_('link'))
video = models.URLField(_('video'), blank=True)
picture = models.ImageField(_('picture'), blank=True,
help_text='Najlepsze wymiary: 975 x 315 z tekstem, 487 x 315 bez tekstu.')
picture_alt = models.CharField(_('picture alternative text'), max_length=255, blank=True)
picture_title = models.CharField(_('picture title'), max_length=255, null=True, blank=True)
picture_author = models.CharField(_('picture author'), max_length=255, blank=True, null=True)
picture_link = models.URLField(_('picture link'), blank=True, null=True)
picture_license = models.CharField(_('picture license name'), max_length=255, blank=True, null=True)
picture_license_link = models.URLField(_('picture license link'), blank=True, null=True)
sticky = models.BooleanField(_('sticky'), default=False, db_index=True,
help_text=_('Sticky cites will take precedense.'))
background_plain = models.BooleanField(_('plain background'), default=False)
background_color = models.CharField(_('background color'), max_length=32, blank=True)
image = models.ImageField(
_('background image'), upload_to='social/cite', null=True, blank=True,
help_text=_('Best background is 975 x 315 px and under 100kB.'))
image_title = models.CharField(_('background title'), max_length=255, null=True, blank=True)
image_author = models.CharField(_('background author'), max_length=255, blank=True, null=True)
image_link = models.URLField(_('background link'), blank=True, null=True)
image_license = models.CharField(_('background license name'), max_length=255, blank=True, null=True)
image_license_link = models.URLField(_('background license link'), blank=True, null=True)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
group = models.ForeignKey(BannerGroup, verbose_name=_('group'), null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
ordering = ('vip', 'text')
verbose_name = _('banner')
verbose_name_plural = _('banners')
def __str__(self):
t = []
if self.text:
t.append(self.text[:60])
if self.book_id:
t.append('[ks.]'[:60])
t.append(self.link[:60])
if self.vip:
t.append('vip: ' + self.vip)
if self.picture:
t.append('[obr.]')
if self.video:
t.append('[vid.]')
return ', '.join(t)
def get_absolute_url(self):
"""This is used for testing."""
return "%s?banner=%d" % (reverse('main_page'), self.id)
def has_box(self):
return self.video or self.picture
def has_body(self):
return self.vip or self.text or self.book
def layout(self):
pieces = []
if self.has_box():
pieces.append('box')
if self.has_body():
pieces.append('text')
return '-'.join(pieces)
def save(self, *args, **kwargs):
ret = super(Cite, self).save(*args, **kwargs)
self.clear_cache()
return ret
@cached_render('social/cite_promo.html')
def main_box(self):
return {
'cite': self,
'main': True,
}
def clear_cache(self):
clear_cached_renders(self.main_box)
class Carousel(models.Model):
placement = models.SlugField(_('placement'), choices=[
('main', 'main'),
])
priority = models.SmallIntegerField(_('priority'), default=0)
language = models.CharField(_('language'), max_length=2, blank=True, default='', choices=settings.LANGUAGES)
class Meta:
# ordering = ('placement', '-priority')
verbose_name = _('carousel')
verbose_name_plural = _('carousels')
def __str__(self):
return self.placement
@classmethod
def get(cls, placement):
carousel = cls.objects.filter(models.Q(language='') | models.Q(language=get_language()), placement=placement).order_by('-priority', '?').first()
if carousel is None:
carousel = cls.objects.create(placement=placement)
return carousel
class CarouselItem(models.Model):
order = models.PositiveSmallIntegerField(_('order'), unique=True)
carousel = models.ForeignKey(Carousel, models.CASCADE, verbose_name=_('carousel'))
banner = models.ForeignKey(Cite, models.CASCADE, null=True, blank=True, verbose_name=_('banner'))
banner_group = models.ForeignKey(BannerGroup, models.CASCADE, null=True, blank=True, verbose_name=_('banner group'))
class Meta:
ordering = ('order',)
unique_together = [('carousel', 'order')]
verbose_name = _('carousel item')
verbose_name_plural = _('carousel items')
def __str__(self):
return str(self.banner or self.banner_group)
def clean(self):
if not self.banner and not self.banner_group:
raise ValidationError(_('Either banner or banner group is required.'))
elif self.banner and self.banner_group:
raise ValidationError(_('Either banner or banner group is required.'))
def get_banner(self):
return self.banner or self.banner_group.get_banner()
|
agpl-3.0
| 7,675,297,706,836,847,000 | 1,995,903,927,762,785,500 | 38.8 | 152 | 0.635087 | false |
glouppe/scikit-learn
|
examples/model_selection/plot_roc.py
|
49
|
5041
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
| 1,782,310,081,594,902,500 | -5,830,752,488,772,330,000 | 33.060811 | 79 | 0.654235 | false |
MenZil/kuma
|
vendor/packages/nose/commands.py
|
68
|
6310
|
"""
nosetests setuptools command
----------------------------
The easiest way to run tests with nose is to use the `nosetests` setuptools
command::
python setup.py nosetests
This command has one *major* benefit over the standard `test` command: *all
nose plugins are supported*.
To configure the `nosetests` command, add a [nosetests] section to your
setup.cfg. The [nosetests] section can contain any command line arguments that
nosetests supports. The differences between issuing an option on the command
line and adding it to setup.cfg are:
* In setup.cfg, the -- prefix must be excluded
* In setup.cfg, command line flags that take no arguments must be given an
argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
Here's an example [nosetests] setup.cfg section::
[nosetests]
verbosity=1
detailed-errors=1
with-coverage=1
cover-package=nose
debug=nose.loader
pdb=1
pdb-failures=1
If you commonly run nosetests with a large number of options, using
the nosetests setuptools command and configuring with setup.cfg can
make running your tests much less tedious. (Note that the same options
and format supported in setup.cfg are supported in all other config
files, and the nosetests script will also load config files.)
Another reason to run tests with the command is that the command will
install packages listed in your `tests_require`, as well as doing a
complete build of your package before running tests. For packages with
dependencies or that build C extensions, using the setuptools command
can be more convenient than building by hand and running the nosetests
script.
Bootstrapping
-------------
If you are distributing your project and want users to be able to run tests
without having to install nose themselves, add nose to the setup_requires
section of your setup()::
setup(
# ...
setup_requires=['nose>=1.0']
)
This will direct setuptools to download and activate nose during the setup
process, making the ``nosetests`` command available.
"""
try:
from setuptools import Command
except ImportError:
Command = nosetests = None
else:
from nose.config import Config, option_blacklist, user_config_files, \
flag, _bool
from nose.core import TestProgram
from nose.plugins import DefaultPluginManager
def get_user_options(parser):
"""convert a optparse option list into a distutils option tuple list"""
opt_list = []
for opt in parser.option_list:
if opt._long_opts[0][2:] in option_blacklist:
continue
long_name = opt._long_opts[0][2:]
if opt.action not in ('store_true', 'store_false'):
long_name = long_name + "="
short_name = None
if opt._short_opts:
short_name = opt._short_opts[0][1:]
opt_list.append((long_name, short_name, opt.help or ""))
return opt_list
class nosetests(Command):
description = "Run unit tests using nosetests"
__config = Config(files=user_config_files(),
plugins=DefaultPluginManager())
__parser = __config.getParser()
user_options = get_user_options(__parser)
def initialize_options(self):
"""create the member variables, but change hyphens to
underscores
"""
self.option_to_cmds = {}
for opt in self.__parser.option_list:
cmd_name = opt._long_opts[0][2:]
option_name = cmd_name.replace('-', '_')
self.option_to_cmds[option_name] = cmd_name
setattr(self, option_name, None)
self.attr = None
def finalize_options(self):
"""nothing to do here"""
pass
def run(self):
"""ensure tests are capable of being run, then
run nose.main with a reconstructed argument list"""
if getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
build_py = self.get_finalized_command('build_py')
build_py.inplace = 0
build_py.run()
bpy_cmd = self.get_finalized_command("build_py")
build_path = bpy_cmd.build_lib
# Build extensions
egg_info = self.get_finalized_command('egg_info')
egg_info.egg_base = build_path
egg_info.run()
build_ext = self.get_finalized_command('build_ext')
build_ext.inplace = 0
build_ext.run()
else:
self.run_command('egg_info')
# Build extensions in-place
build_ext = self.get_finalized_command('build_ext')
build_ext.inplace = 1
build_ext.run()
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(
self.distribution.tests_require)
ei_cmd = self.get_finalized_command("egg_info")
argv = ['nosetests', '--where', ei_cmd.egg_base]
for (option_name, cmd_name) in self.option_to_cmds.items():
if option_name in option_blacklist:
continue
value = getattr(self, option_name)
if value is not None:
argv.extend(
self.cfgToArg(option_name.replace('_', '-'), value))
TestProgram(argv=argv, config=self.__config)
def cfgToArg(self, optname, value):
argv = []
long_optname = '--' + optname
opt = self.__parser.get_option(long_optname)
if opt.action in ('store_true', 'store_false'):
if not flag(value):
raise ValueError("Invalid value '%s' for '%s'" % (
value, optname))
if _bool(value):
argv.append(long_optname)
else:
argv.extend([long_optname, value])
return argv
|
mpl-2.0
| -2,178,768,374,436,785,700 | 8,546,394,911,192,313,000 | 35.686047 | 79 | 0.595721 | false |
blackzw/openwrt_sdk_dev1
|
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/unittest/loader.py
|
152
|
13420
|
"""Loading unittests."""
import os
import re
import sys
import traceback
import types
from functools import cmp_to_key as _CmpToKey
from fnmatch import fnmatch
from . import case, suite
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, suite.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception, e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
|
gpl-2.0
| -559,175,469,073,711,800 | 359,088,748,669,523,650 | 41.738854 | 94 | 0.594039 | false |
square/pants
|
src/python/pants/backend/python/tasks/python_binary_create.py
|
2
|
2193
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import time
from pants.backend.python.python_chroot import PythonChroot
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.tasks.python_task import PythonTask
from pants.base.exceptions import TaskError
class PythonBinaryCreate(PythonTask):
@staticmethod
def is_binary(target):
return isinstance(target, PythonBinary)
def __init__(self, *args, **kwargs):
super(PythonBinaryCreate, self).__init__(*args, **kwargs)
self._distdir = self.context.config.getdefault('pants_distdir')
def execute(self):
binaries = self.context.targets(self.is_binary)
# Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
names = {}
for binary in binaries:
name = binary.name
if name in names:
raise TaskError('Cannot build two binaries with the same name in a single invocation. '
'%s and %s both have the name %s.' % (binary, names[name], name))
names[name] = binary
for binary in binaries:
self.create_binary(binary)
def create_binary(self, binary):
interpreter = self.select_interpreter_for_targets(binary.closure())
run_info = self.context.run_tracker.run_info
build_properties = {}
build_properties.update(run_info.add_basic_info(run_id=None, timestamp=time.time()))
build_properties.update(run_info.add_scm_info())
pexinfo = binary.pexinfo.copy()
pexinfo.build_properties = build_properties
with self.temporary_pex_builder(pex_info=pexinfo, interpreter=interpreter) as builder:
chroot = PythonChroot(
targets=[binary],
builder=builder,
platforms=binary.platforms,
interpreter=interpreter,
conn_timeout=self.conn_timeout)
pex_path = os.path.join(self._distdir, '%s.pex' % binary.name)
chroot.dump()
builder.build(pex_path)
|
apache-2.0
| -9,066,204,315,941,738,000 | 1,687,666,522,290,083,300 | 34.370968 | 95 | 0.69585 | false |
da1z/intellij-community
|
python/testData/inspections/PyStringFormatInspection/UnionCallType.py
|
8
|
1566
|
from collections import namedtuple
def simple_func(cond):
if cond:
return 1
else:
return 1, 2
Point = namedtuple('Point', ['x', 'y'])
def named_tuple_func(cond):
if cond:
return 1
else:
return Point(1, 1)
def primitive_types_func(cond):
if cond:
return 1
else:
return 2
def collection_func(cond):
if cond:
return [1, 2]
else:
return {1, 2}
def list_tuple(cond):
if cond:
return [1, 2]
else:
return 1, 2
"%s %s" % simple_func(True)
"%s %s" % simple_func(False)
"%s %s %s" % <warning descr="Too few arguments for format string">simple_func(False)</warning>
"%s %s" % named_tuple_func(True)
"%s %s" % named_tuple_func(False)
"%s %s %s" % named_tuple_func(False)
"%s" % primitive_types_func(True)
"%s %s" % <warning descr="Too few arguments for format string">primitive_types_func(True)</warning>
"%s %s" % <warning descr="Too few arguments for format string">primitive_types_func(False)</warning>
"%s %s %s" % <warning descr="Too few arguments for format string">primitive_types_func(False)</warning>
"%s %s" % <warning descr="Too few arguments for format string">collection_func(True)</warning>
"%s %s" % <warning descr="Too few arguments for format string">collection_func(False)</warning>
"%s %s %s" % <warning descr="Too few arguments for format string">collection_func(False)</warning>
"%s %s" % list_tuple(True)
"%s %s" % list_tuple(True)
"%s %s %s" % <warning descr="Too few arguments for format string">list_tuple(True)</warning>
|
apache-2.0
| 4,017,369,371,497,230,300 | -9,154,520,972,500,484,000 | 28 | 103 | 0.636654 | false |
SlimSaber/kernel_oneplus_msm8974
|
tools/perf/scripts/python/syscall-counts-by-pid.py
|
11180
|
1927
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
| -2,091,135,553,220,703,700 | 6,117,823,080,890,084,000 | 26.927536 | 77 | 0.615464 | false |
brentdax/swift
|
utils/gyb_syntax_support/CommonNodes.py
|
11
|
1985
|
from Child import Child
from Node import Node # noqa: I201
COMMON_NODES = [
Node('Decl', kind='Syntax'),
Node('Expr', kind='Syntax'),
Node('Stmt', kind='Syntax'),
Node('Type', kind='Syntax'),
Node('Pattern', kind='Syntax'),
Node('UnknownDecl', kind='Decl'),
Node('UnknownExpr', kind='Expr'),
Node('UnknownStmt', kind='Stmt'),
Node('UnknownType', kind='Type'),
Node('UnknownPattern', kind='Pattern'),
# code-block-item = (decl | stmt | expr) ';'?
Node('CodeBlockItem', kind='Syntax', omit_when_empty=True,
description="""
A CodeBlockItem is any Syntax node that appears on its own line inside
a CodeBlock.
""",
children=[
Child('Item', kind='Syntax',
description="The underlying node inside the code block.",
node_choices=[
Child('Decl', kind='Decl'),
Child('Stmt', kind='Stmt'),
Child('Expr', kind='Expr'),
Child('TokenList', kind='TokenList'),
Child('NonEmptyTokenList', kind='NonEmptyTokenList'),
]),
Child('Semicolon', kind='SemicolonToken',
description="""
If present, the trailing semicolon at the end of the item.
""",
is_optional=True),
Child('ErrorTokens', kind='Syntax', is_optional=True),
]),
# code-block-item-list -> code-block-item code-block-item-list?
Node('CodeBlockItemList', kind='SyntaxCollection',
element='CodeBlockItem'),
# code-block -> '{' stmt-list '}'
Node('CodeBlock', kind='Syntax',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Statements', kind='CodeBlockItemList'),
Child('RightBrace', kind='RightBraceToken'),
]),
]
|
apache-2.0
| 648,847,075,674,780,300 | 5,538,611,250,629,724,000 | 37.173077 | 79 | 0.529471 | false |
abrt/faf
|
src/pyfaf/storage/migrations/versions/1c7edfbf8941_drop_reportunknownpackage_running_fields.py
|
1
|
3689
|
# Copyright (C) 2015 ABRT Team
# Copyright (C) 2015 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Drop reportunknownpackage.running fields remove installed prefix
Revision ID: 1c7edfbf8941
Revises: 43bd2d59838e
Create Date: 2015-03-18 15:19:28.412310
"""
from alembic.op import (create_foreign_key, create_unique_constraint, execute,
drop_constraint, drop_column, alter_column, add_column)
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1c7edfbf8941'
down_revision = '43bd2d59838e'
t = "reportunknownpackages"
def upgrade() -> None:
# constraint name is autogenerated differently between pg versions
try:
drop_constraint("reportunknownpackages_report_id_key", t)
except: # pylint: disable=bare-except
execute('ROLLBACK')
drop_constraint(
"reportunknownpackages_report_id_type_name_installed_epoch_i_key",
t)
drop_constraint("reportunknownpackages_installed_arch_id_fkey", t)
drop_column(t, "running_epoch")
drop_column(t, "running_version")
drop_column(t, "running_release")
drop_column(t, "running_arch_id")
alter_column(t, "installed_epoch", new_column_name="epoch")
alter_column(t, "installed_version", new_column_name="version")
alter_column(t, "installed_release", new_column_name="release")
alter_column(t, "installed_arch_id", new_column_name="arch_id")
create_foreign_key("reportunknownpackages_arch_id_fkey", t,
"archs", ["arch_id"], ["id"])
fields = ["report_id", "type", "name", "epoch",
"version", "release", "arch_id"]
create_unique_constraint("reportunknownpackages_report_id_key", t, fields)
def downgrade() -> None:
drop_constraint("reportunknownpackages_report_id_key", t)
drop_constraint("reportunknownpackages_arch_id_fkey", t)
add_column(t, sa.Column('running_epoch', sa.Integer(),
nullable=True))
add_column(t, sa.Column('running_version', sa.String(64),
nullable=True))
add_column(t, sa.Column('running_release', sa.String(64),
nullable=True))
add_column(t, sa.Column('running_arch_id', sa.Integer(),
sa.ForeignKey('archs.id'),
nullable=True))
alter_column(t, "epoch", new_column_name="installed_epoch")
alter_column(t, "version", new_column_name="installed_version")
alter_column(t, "release", new_column_name="installed_release")
alter_column(t, "arch_id", new_column_name="installed_arch_id")
fields = ["report_id", "type", "name", "installed_epoch",
"installed_version", "installed_release", "installed_arch_id",
"running_epoch", "running_version", "running_release",
"running_arch_id"]
create_unique_constraint("reportunknownpackages_report_id_key", t, fields)
create_foreign_key("reportunknownpackages_installed_arch_id_fkey", t,
"archs", ["installed_arch_id"], ["id"])
|
gpl-3.0
| -4,955,256,395,186,115,000 | -480,541,826,595,064,640 | 36.642857 | 79 | 0.657089 | false |
izonder/intellij-community
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_unicode.py
|
177
|
1269
|
r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
val = ur'\\'.join([
v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
for v in val.split(ur'\\')
])
if val[0] in u'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
|
apache-2.0
| 2,878,798,668,025,132,000 | -8,332,077,264,444,450,000 | 29.214286 | 79 | 0.51379 | false |
pombredanne/SourceForge-Allura
|
ForgeTracker/forgetracker/widgets/ticket_search.py
|
2
|
5248
|
import tg
from pylons import c
import ew as ew_core
import ew.jinja2_ew as ew
from allura.lib.widgets import form_fields as ffw
class TicketSearchResults(ew_core.SimpleForm):
template='jinja:forgetracker:templates/tracker_widgets/ticket_search_results.html'
defaults=dict(
ew_core.SimpleForm.defaults,
solr_error=None,
count=None,
limit=None,
query=None,
tickets=None,
sortable_custom_fields=None,
page=1,
sort=None,
columns=None)
class fields(ew_core.NameList):
page_list=ffw.PageList()
page_size=ffw.PageSize()
lightbox=ffw.Lightbox(name='col_list',trigger='#col_menu')
def resources(self):
yield ew.JSLink('tracker_js/ticket-list.js')
yield ew.CSSLink('tracker_css/ticket-list.css')
for r in super(TicketSearchResults, self).resources():
yield r
class MassEdit(ew_core.SimpleForm):
template='jinja:forgetracker:templates/tracker_widgets/mass_edit.html'
defaults=dict(
ew_core.SimpleForm.defaults,
count=None,
limit=None,
query=None,
tickets=None,
page=1,
sort=None)
class fields(ew_core.NameList):
page_list=ffw.PageList()
page_size=ffw.PageSize()
lightbox=ffw.Lightbox(name='col_list',trigger='#col_menu')
def resources(self):
yield ew.JSLink('tracker_js/ticket-list.js')
yield ew.CSSLink('tracker_css/ticket-list.css')
for r in super(MassEdit, self).resources():
yield r
class MassEditForm(ew_core.Widget):
template='jinja:forgetracker:templates/tracker_widgets/mass_edit_form.html'
defaults=dict(
ew_core.Widget.defaults,
globals=None,
query=None,
cancel_href=None,
limit=None,
sort=None)
def resources(self):
yield ew.JSLink('tracker_js/mass-edit.js')
class SearchHelp(ffw.Lightbox):
defaults=dict(
ffw.Lightbox.defaults,
name='search_help_modal',
trigger='a.search_help_modal',
content="""<div style="height:400px; overflow:auto;"><h1>Searching for tickets</h1>
<p>Searches use <a href="http://www.solrtutorial.com/solr-query-syntax.html" target="_blank">solr lucene query syntax</a>. Use the following fields in tracker ticket searches:</p>
<ul>
<li>User who owns the ticket - assigned_to_s</li>
<li>Labels assigned to the ticket - labels</li>
<li>Milestone the ticket is assigned to - _milestone</li>
<li>Last modified date - mod_date_dt</li>
<li>Body of the ticket - text</li>
<li>Number of ticket - ticket_num</li>
<li>User who created the ticket - reported_by_s</li>
<li>Status of the ticket - status</li>
<li>Title of the ticket - summary</li>
<li>Votes up/down of the ticket - votes_up_i/votes_down_i (if enabled in tool options)</li>
<li>Votes total of the ticket - votes_total_i</li>
<li>Custom field - the field name with an underscore in front, like _custom</li>
</ul>
<h2>Example searches</h2>
<p>Any ticket that is not closed in the 1.0 milestone with "foo" in the title</p>
<div class="codehilite"><pre>!status:closed AND summary:foo* AND _milestone:1.0</pre></div>
<p>Tickets with the label "foo" but not the label "bar":</p>
<div class="codehilite"><pre>labels:foo AND -labels:bar</pre></div>
<p>Tickets assigned to or added by a user with the username "admin1" and the custom field "size" set to 2</p>
<div class="codehilite"><pre>(assigned_to_s:admin1 or reported_by_s:admin1) AND _size:2</pre></div>
<p>The ticket has "foo" as the title or the body with a number lower than 50</p>
<div class="codehilite"><pre>(summary:foo or text:foo) AND ticket_num:[* TO 50]</pre></div>
<p>Tickets last modified in April 2012</p>
<div class="codehilite"><pre>mod_date_dt:[2012-04-01T00:00:00Z TO 2012-04-30T23:59:59Z]</pre></div>
<h2>Saving searches</h2>
<p>Ticket searches may be saved for later use by project administrators. To save a search, click "Edit Searches" in the tracker sidebar. Click "Add Bin" then enter a summary and search terms for the saved search. Your search will now show up in the sidebar under "Searches" with a count of how many tickets match the query.</p>
<h2>Sorting search results</h2>
<p>Ticket search results can be sorted by clicking the header of the column you want to sort by. The first click will sort the results in ascending order. Clicking the header again will sort the column in descending order. In addition to sorting by the column headers, you can manually sort on these properties:</p>
<ul>
<li>Labels assigned to the ticket - labels_s</li>
<li>Milestone the ticket is assigned to - _milestone_s</li>
<li>Last modified date - mod_date_dt</li>
<li>Body of the ticket - text_s</li>
<li>Number of ticket - ticket_num_i</li>
<li>User who created the ticket - reported_by_s</li>
<li>Status of the ticket - status_s</li>
<li>Title of the ticket - snippet_s</li>
<li>Custom field - the field name with an _ in front and _s at the end like _custom_s</li>
</ul>
<p>You can use these properties by appending them to the url (only one sort allowed at a time) like this:</p>
<div class="codehilite"><pre>/p/yourproject/tickets/search/?q=_milestone:1.0&sort=snippet_s+asc</pre></div></div>
""")
|
apache-2.0
| 1,969,437,678,831,374,600 | -5,921,157,911,175,089,000 | 43.10084 | 327 | 0.688643 | false |
rpadilha/rvpsite
|
rvpsite/blog/migrations/0001_initial.py
|
1
|
2442
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-15 16:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import s3direct.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blogs',
fields=[
('title', models.CharField(max_length=50, verbose_name='TÍTULO')),
('slug', models.SlugField(primary_key=True, serialize=False, verbose_name='SLUG')),
('category', models.CharField(choices=[('catalogos', 'CATÁLOGOS'), ('eventos', 'EVENTOS'), ('novidades', 'NOVIDADES'), ('promocoes', 'PROMOÇÕES'), ('outros', 'OUTROS')], max_length=15, verbose_name='CATEGORIA')),
('publish', models.BooleanField(default=False, verbose_name='PUBLICAR?')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='CRIADO EM')),
],
options={
'verbose_name': 'CADASTRO DE NOTÍCIA',
'verbose_name_plural': 'CADASTRO DE NOTÍCIAS',
'ordering': ('-created_at',),
},
),
migrations.CreateModel(
name='Contents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_out', models.IntegerField(verbose_name='ORDEM DE POSTAGEM')),
('text', models.TextField(blank=True, max_length=700, verbose_name='TEXTO')),
('picture', s3direct.fields.S3DirectField(blank=True, verbose_name='IMAGEM')),
('inverse', models.BooleanField(default=False, verbose_name='INVERTER ORDEM?')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blogs', verbose_name='NOME')),
],
options={
'verbose_name': 'CONTEÚDO DE NOTÍCIA',
'verbose_name_plural': 'CONTEÚDOS DE NOTÍCIA',
'ordering': ('order_out',),
},
),
migrations.AlterUniqueTogether(
name='blogs',
unique_together=set([('slug', 'created_at')]),
),
migrations.AlterUniqueTogether(
name='contents',
unique_together=set([('title', 'order_out')]),
),
]
|
agpl-3.0
| -3,741,713,042,324,957,000 | 812,455,268,721,934,100 | 41.666667 | 228 | 0.561266 | false |
pranalik/frappe-bb
|
frappe/model/db_schema.py
|
15
|
12290
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Syncs a database table to the `DocType` (metadata)
.. note:: This module is only used internally
"""
import os
import frappe
from frappe import _
from frappe.utils import cstr, cint
class InvalidColumnName(frappe.ValidationError): pass
type_map = {
'Currency': ('decimal', '18,6')
,'Int': ('int', '11')
,'Float': ('decimal', '18,6')
,'Percent': ('decimal', '18,6')
,'Check': ('int', '1')
,'Small Text': ('text', '')
,'Long Text': ('longtext', '')
,'Code': ('text', '')
,'Text Editor': ('text', '')
,'Date': ('date', '')
,'Datetime': ('datetime', '6')
,'Time': ('time', '6')
,'Text': ('text', '')
,'Data': ('varchar', '255')
,'Link': ('varchar', '255')
,'Dynamic Link':('varchar', '255')
,'Password': ('varchar', '255')
,'Select': ('varchar', '255')
,'Read Only': ('varchar', '255')
,'Attach': ('varchar', '255')
}
default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',\
'parentfield', 'parenttype', 'idx']
default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today']
# -------------------------------------------------
# Class database table
# -------------------------------------------------
class DbTable:
def __init__(self, doctype, prefix = 'tab'):
self.doctype = doctype
self.name = prefix + doctype
self.columns = {}
self.current_columns = {}
# lists for change
self.add_column = []
self.change_type = []
self.add_index = []
self.drop_index = []
self.set_default = []
# load
self.get_columns_from_docfields()
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar(255) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar(255),
owner varchar(255),
docstatus int(1) default '0',
parent varchar(255),
parentfield varchar(255),
parenttype varchar(255),
idx int(8),
%sindex parent(parent))
ENGINE=InnoDB
CHARACTER SET=utf8""" % (self.name, add_text))
def get_columns_from_docfields(self):
"""
get columns from docfields and custom fields
"""
fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1)
precisions = {}
if not frappe.flags.in_install_app:
custom_fl = frappe.db.sql("""\
SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1)
if custom_fl: fl += custom_fl
# get precision from property setters
for ps in frappe.get_all("Property Setter", fields=["field_name", "value"],
filters={"doc_type": self.doctype, "doctype_or_field": "DocField", "property": "precision"}):
precisions[ps.field_name] = ps.value
for f in fl:
self.columns[f['fieldname']] = DbColumn(self, f['fieldname'],
f['fieldtype'], f.get('length'), f.get('default'), f.get('search_index'),
f.get('options'), precisions.get(f['fieldname']) or f.get('precision'))
def get_columns_from_db(self):
self.show_columns = frappe.db.sql("desc `%s`" % self.name)
for c in self.show_columns:
self.current_columns[c[0]] = {'name': c[0], 'type':c[1], 'index':c[3], 'default':c[4]}
def get_column_definitions(self):
column_list = [] + default_columns
ret = []
for k in self.columns.keys():
if k not in column_list:
d = self.columns[k].get_definition()
if d:
ret.append('`'+ k+ '` ' + d)
column_list.append(k)
return ret
def get_index_definitions(self):
ret = []
for key, col in self.columns.items():
if col.set_index and col.fieldtype in type_map and \
type_map.get(col.fieldtype)[0] not in ('text', 'longtext'):
ret.append('index `' + key + '`(`' + key + '`)')
return ret
# GET foreign keys
def get_foreign_keys(self):
fk_list = []
txt = frappe.db.sql("show create table `%s`" % self.name)[0][1]
for line in txt.split('\n'):
if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1:
try:
fk_list.append((line.split('`')[3], line.split('`')[1]))
except IndexError:
pass
return fk_list
# Drop foreign keys
def drop_foreign_keys(self):
if not self.drop_foreign_key:
return
fk_list = self.get_foreign_keys()
# make dictionary of constraint names
fk_dict = {}
for f in fk_list:
fk_dict[f[0]] = f[1]
# drop
for col in self.drop_foreign_key:
frappe.db.sql("set foreign_key_checks=0")
frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname]))
frappe.db.sql("set foreign_key_checks=1")
def sync(self):
if not self.name in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name):
self.create()
else:
self.alter()
def alter(self):
self.get_columns_from_db()
for col in self.columns.values():
col.check(self.current_columns.get(col.fieldname, None))
query = []
for col in self.add_column:
query.append("add column `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.change_type:
query.append("change `{}` `{}` {}".format(col.fieldname, col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("drop index `{}`".format(col.fieldname))
for col in list(set(self.set_default).difference(set(self.change_type))):
if col.fieldname=="name":
continue
if not col.default:
col_default = "null"
else:
col_default = '"{}"'.format(col.default.replace('"', '\\"'))
query.append('alter column `{}` set default {}'.format(col.fieldname, col_default))
if query:
frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query)))
class DbColumn:
def __init__(self, table, fieldname, fieldtype, length, default, set_index, options, precision):
self.table = table
self.fieldname = fieldname
self.fieldtype = fieldtype
self.length = length
self.set_index = set_index
self.default = default
self.options = options
self.precision = precision
def get_definition(self, with_default=1):
ret = get_definition(self.fieldtype, self.precision)
if with_default and self.default and (self.default not in default_shortcuts) \
and not self.default.startswith(":") and ret not in ['text', 'longtext']:
ret += ' default "' + self.default.replace('"', '\"') + '"'
return ret
def check(self, current_def):
column_def = self.get_definition(0)
# no columns
if not column_def:
return
# to add?
if not current_def:
self.fieldname = validate_column_name(self.fieldname)
self.table.add_column.append(self)
return
# type
if current_def['type'] != column_def:
self.table.change_type.append(self)
# index
else:
if (current_def['index'] and not self.set_index):
self.table.drop_index.append(self)
if (not current_def['index'] and self.set_index and not (column_def in ['text', 'longtext'])):
self.table.add_index.append(self)
# default
if (self.default_changed(current_def) and (self.default not in default_shortcuts) and not cstr(self.default).startswith(":") and not (column_def in ['text','longtext'])):
self.table.set_default.append(self)
def default_changed(self, current_def):
if "decimal" in current_def['type']:
try:
return float(current_def['default'])!=float(self.default)
except TypeError:
return True
else:
return current_def['default'] != self.default
class DbManager:
"""
Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc...
#TODO:
0. Simplify / create settings for the restore database source folder
0a. Merge restore database and extract_sql(from frappe_server_tools).
1. Setter and getter for different mysql variables.
2. Setter and getter for mysql variables at global level??
"""
def __init__(self,db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_variables(self,regex):
"""
Get variables that match the passed pattern regex
"""
return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex))
def get_table_schema(self,table):
"""
Just returns the output of Desc tables.
"""
return list(self.db.sql("DESC `%s`"%table))
def get_tables_list(self,target=None):
"""get list of tables"""
if target:
self.db.use(target)
return [t[0] for t in self.db.sql("SHOW TABLES")]
def create_user(self, user, password, host):
#Create user if it doesn't exist.
try:
if password:
self.db.sql("CREATE USER '%s'@'%s' IDENTIFIED BY '%s';" % (user[:16], host, password))
else:
self.db.sql("CREATE USER '%s'@'%s';" % (user[:16], host))
except Exception:
raise
def delete_user(self, target, host):
# delete user if exists
try:
self.db.sql("DROP USER '%s'@'%s';" % (target, host))
except Exception, e:
if e.args[0]==1396:
pass
else:
raise
def create_database(self,target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE IF NOT EXISTS `%s` ;" % target)
def drop_database(self,target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target)
def grant_all_privileges(self, target, user, host):
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'%s';" % (target, user, host))
def grant_select_privilges(self, db, table, user, host):
if table:
self.db.sql("GRANT SELECT ON %s.%s to '%s'@'%s';" % (db, table, user, host))
else:
self.db.sql("GRANT SELECT ON %s.* to '%s'@'%s';" % (db, user, host))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
def restore_database(self,target,source,user,password):
from frappe.utils import make_esc
esc = make_esc('$ ')
os.system("mysql -u %s -p%s -h%s %s < %s" % \
(esc(user), esc(password), esc(frappe.db.host), esc(target), source))
def drop_table(self,table_name):
"""drop table if exists"""
if not table_name in self.get_tables_list():
return
self.db.sql("DROP TABLE IF EXISTS %s "%(table_name))
def validate_column_name(n):
n = n.replace(' ','_').strip().lower()
import re
if re.search("[\W]", n):
frappe.throw(_("Fieldname {0} cannot contain letters, numbers or spaces").format(n), InvalidColumnName)
return n
def updatedb(dt):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = frappe.db.sql("select ifnull(issingle, 0) from tabDocType where name=%s", (dt,))
if not res:
raise Exception, 'Wrong doctype "%s" in updatedb' % dt
if not res[0][0]:
frappe.db.commit()
tab = DbTable(dt, 'tab')
tab.sync()
frappe.db.begin()
def remove_all_foreign_keys():
frappe.db.sql("set foreign_key_checks = 0")
frappe.db.commit()
for t in frappe.db.sql("select name from tabDocType where ifnull(issingle,0)=0"):
dbtab = DbTable(t[0])
try:
fklist = dbtab.get_foreign_keys()
except Exception, e:
if e.args[0]==1146:
fklist = []
else:
raise
for f in fklist:
frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1]))
def get_definition(fieldtype, precision=None):
d = type_map.get(fieldtype)
if not d:
return
ret = d[0]
if d[1]:
length = d[1]
if fieldtype in ["Float", "Currency", "Percent"] and cint(precision) > 6:
length = '18,9'
ret += '(' + length + ')'
return ret
def add_column(doctype, column_name, fieldtype, precision=None):
frappe.db.commit()
frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype,
column_name, get_definition(fieldtype, precision)))
|
mit
| 4,095,577,606,443,012,000 | -8,118,481,781,990,692,000 | 27.252874 | 172 | 0.634825 | false |
JonasSC/SuMPF
|
tests/tests/_internal/test_interpolation.py
|
1
|
11458
|
# This file is a part of the "SuMPF" package
# Copyright (C) 2018-2021 Jonas Schulte-Coerne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests the interpolation functions"""
import hypothesis.extra.numpy
import numpy
import pytest
import sumpf._internal as sumpf_internal
def xs_ys(data, interpolation):
"""A helper function, that creates arrays of x and y values from the data pairs,
that have been created by hypothesis.
"""
if data:
xs, ys = map(numpy.array, zip(*sorted(data)))
else:
xs = numpy.empty(0)
ys = numpy.empty(0)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (xs <= 0).any():
xs -= xs.min()
xs += 1e-15
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys) + 1e-15
return xs, ys
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=0, max_size=2 ** 12,
unique_by=lambda t: t[0]))
def test_supporting_points(interpolation, data):
"""Tests if the interpolation at a supporting point is exactly the given y value"""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
assert (func(xs, xs, ys) == ys).all()
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
data=hypothesis.strategies.lists(elements=hypothesis.strategies.tuples(hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), # pylint: disable=line-too-long
hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
min_size=1, max_size=2 ** 12,
unique_by=lambda t: t[0]),
x=hypothesis.strategies.lists(elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), min_size=0, max_size=2 ** 12)) # pylint: disable=line-too-long
def test_x_as_scalar_and_vector(interpolation, data, x):
"""Tests if running a vectorized interpolation returns the same result as the scalar version."""
func = sumpf_internal.interpolation.get(interpolation)
xs, ys = xs_ys(data, interpolation)
x = numpy.array(x)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG):
if (x <= 0).any():
x -= x.min()
x += 1e-15
scalar = [func(s, xs, ys) for s in x]
vector = list(func(x, xs, ys))
assert scalar == pytest.approx(vector, nan_ok=True)
@pytest.mark.filterwarnings("ignore:divide by zero")
@hypothesis.given(interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=-1e15, max_value=1e15), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
k=hypothesis.strategies.floats(min_value=1e-15, max_value=1.0 - 1e-15))
def test_interpolation(interpolation, xs, ys, k): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches
"""Tests the computation of an interpolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x = xs[0] + k * (xs[1] - xs[0])
hypothesis.assume(x not in xs) # due to the limited precision of floating point numbers, this can still happen
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
assert func(x, xs, ys) == pytest.approx(numpy.interp(x, xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
log_xs = numpy.log2(xs)
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(numpy.log2(x), log_xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
assert func(x, xs, ys) == pytest.approx(numpy.interp(numpy.log2(x), log_xs, ys))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
log_ys = numpy.log(numpy.abs(ys))
assert func(x, xs, ys) == pytest.approx(numpy.exp(numpy.interp(x, xs, log_ys)), nan_ok=True)
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
if k < 0.5:
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
if numpy.log(x) - numpy.log(xs[0]) < numpy.log(xs[1]) - numpy.log(x):
assert func(x, xs, ys) == ys[0]
else:
assert func(x, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
@pytest.mark.filterwarnings("ignore:divide by zero encountered in log", "ignore:invalid value encountered", "ignore:overflow encountered in exp") # pylint: disable=line-too-long
@hypothesis.given(xs=hypothesis.extra.numpy.arrays(dtype=numpy.float64, shape=2, elements=hypothesis.strategies.floats(min_value=0.0, max_value=1e12), unique=True), # pylint: disable=line-too-long
ys=hypothesis.extra.numpy.arrays(dtype=numpy.complex128, shape=2, elements=hypothesis.strategies.complex_numbers(min_magnitude=0.0, max_magnitude=1e15)), # pylint: disable=line-too-long
interpolation=hypothesis.strategies.sampled_from(sumpf_internal.Interpolations),
delta_x=hypothesis.strategies.floats(min_value=1e-15, max_value=1e15))
def test_extrapolation(xs, ys, interpolation, delta_x): # noqa: C901; the function is not complex, it's just a long switch case
# pylint: disable=too-many-branches,too-many-statements
"""Tests the computation of an extrapolated value."""
func = sumpf_internal.interpolation.get(interpolation)
xs = numpy.array(sorted(xs))
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC,
sumpf_internal.Interpolations.LOG_X,
sumpf_internal.Interpolations.STAIRS_LOG) and \
min(xs) < 0.0:
xs -= min(xs)
if interpolation in (sumpf_internal.Interpolations.LOGARITHMIC, sumpf_internal.Interpolations.LOG_Y):
ys = numpy.abs(ys)
x0 = xs[0] * (1.0 - delta_x) - delta_x
x1 = xs[1] * (1.0 + delta_x) + delta_x
if interpolation is sumpf_internal.Interpolations.ZERO:
assert func(x0, xs, ys) == 0.0
assert func(x1, xs, ys) == 0.0
elif interpolation is sumpf_internal.Interpolations.ONE:
assert func(x0, xs, ys) == 1.0
assert func(x1, xs, ys) == 1.0
elif interpolation is sumpf_internal.Interpolations.LINEAR:
m = (ys[1] - ys[0]) / (xs[1] - xs[0])
n0 = ys[0] - m * xs[0]
n1 = ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(m * x0 + n0)
assert func(x1, xs, ys) == pytest.approx(m * x1 + n1)
elif interpolation is sumpf_internal.Interpolations.LOGARITHMIC:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_xs = numpy.log2(xs)
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (log_xs[1] - log_xs[0])
r0 = numpy.exp2(m * numpy.log2(x0) + log_ys[0] - m * log_xs[0])
r1 = numpy.exp2(m * numpy.log2(x1) + log_ys[1] - m * log_xs[1])
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_X:
log_xs = numpy.log2(xs)
m = (ys[1] - ys[0]) / (log_xs[1] - log_xs[0])
r0 = m * numpy.log2(x0) + ys[0] - m * log_xs[0]
r1 = m * numpy.log2(x1) + ys[1] - m * log_xs[1]
assert (numpy.isnan(func(x0, xs, ys)) and numpy.isnan(r0)) or (func(x0, xs, ys) == pytest.approx(r0))
assert (numpy.isnan(func(x1, xs, ys)) and numpy.isnan(r1)) or (func(x1, xs, ys) == pytest.approx(r1))
elif interpolation is sumpf_internal.Interpolations.LOG_Y:
if 0.0 in ys:
assert numpy.isnan(func(x0, xs, ys))
assert numpy.isnan(func(x1, xs, ys))
else:
log_ys = numpy.log2(ys)
m = (log_ys[1] - log_ys[0]) / (xs[1] - xs[0])
n0 = log_ys[0] - m * xs[0]
n1 = log_ys[1] - m * xs[1]
assert func(x0, xs, ys) == pytest.approx(numpy.exp2(m * x0 + n0))
assert func(x1, xs, ys) == pytest.approx(numpy.exp2(m * x1 + n1))
elif interpolation is sumpf_internal.Interpolations.STAIRS_LIN:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
elif interpolation is sumpf_internal.Interpolations.STAIRS_LOG:
assert func(x0, xs, ys) == ys[0]
assert func(x1, xs, ys) == ys[1]
else:
raise ValueError(f"Unknown interpolation: {interpolation}.")
|
lgpl-3.0
| -6,398,326,933,496,079,000 | -2,568,057,842,465,986,000 | 57.162437 | 207 | 0.619393 | false |
chudaol/edx-platform
|
common/lib/capa/capa/tests/response_xml_factory.py
|
30
|
34410
|
from lxml import etree
from abc import ABCMeta, abstractmethod
class ResponseXMLFactory(object):
""" Abstract base class for capa response XML factories.
Subclasses override create_response_element and
create_input_element to produce XML of particular response types"""
__metaclass__ = ABCMeta
@abstractmethod
def create_response_element(self, **kwargs):
""" Subclasses override to return an etree element
representing the capa response XML
(e.g. <numericalresponse>).
The tree should NOT contain any input elements
(such as <textline />) as these will be added later."""
return None
@abstractmethod
def create_input_element(self, **kwargs):
""" Subclasses override this to return an etree element
representing the capa input XML (such as <textline />)"""
return None
def build_xml(self, **kwargs):
""" Construct an XML string for a capa response
based on **kwargs.
**kwargs is a dictionary that will be passed
to create_response_element() and create_input_element().
See the subclasses below for other keyword arguments
you can specify.
For all response types, **kwargs can contain:
*question_text*: The text of the question to display,
wrapped in <p> tags.
*explanation_text*: The detailed explanation that will
be shown if the user answers incorrectly.
*script*: The embedded Python script (a string)
*num_responses*: The number of responses to create [DEFAULT: 1]
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
Returns a string representation of the XML tree.
"""
# Retrieve keyward arguments
question_text = kwargs.get('question_text', '')
explanation_text = kwargs.get('explanation_text', '')
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
# The root is <problem>
root = etree.Element("problem")
# Add a script if there is one
if script:
script_element = etree.SubElement(root, "script")
script_element.set("type", "loncapa/python")
script_element.text = str(script)
# The problem has a child <p> with question text
question = etree.SubElement(root, "p")
question.text = question_text
# Add the response(s)
for __ in range(int(num_responses)):
response_element = self.create_response_element(**kwargs)
root.append(response_element)
# Add input elements
for __ in range(int(num_inputs)):
input_element = self.create_input_element(**kwargs)
if not None == input_element:
response_element.append(input_element)
# The problem has an explanation of the solution
if explanation_text:
explanation = etree.SubElement(root, "solution")
explanation_div = etree.SubElement(explanation, "div")
explanation_div.set("class", "detailed-solution")
explanation_div.text = explanation_text
return etree.tostring(root)
@staticmethod
def textline_input_xml(**kwargs):
""" Create a <textline/> XML element
Uses **kwargs:
*math_display*: If True, then includes a MathJax display of user input
*size*: An integer representing the width of the text line
"""
math_display = kwargs.get('math_display', False)
size = kwargs.get('size', None)
input_element = etree.Element('textline')
if math_display:
input_element.set('math', '1')
if size:
input_element.set('size', str(size))
return input_element
@staticmethod
def choicegroup_input_xml(**kwargs):
""" Create a <choicegroup> XML element
Uses **kwargs:
*choice_type*: Can be "checkbox", "radio", or "multiple"
*choices*: List of True/False values indicating whether
a particular choice is correct or not.
Users must choose *all* correct options in order
to be marked correct.
DEFAULT: [True]
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
"""
# Names of group elements
group_element_names = {
'checkbox': 'checkboxgroup',
'radio': 'radiogroup',
'multiple': 'choicegroup'
}
# Retrieve **kwargs
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert choice_type in group_element_names
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
# name attribute and the text of the element
if name:
choice_element.text = str(name)
choice_element.set("name", str(name))
return group_element
class NumericalResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <numericalresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <numericalresponse> XML element.
Uses **kwarg keys:
*answer*: The correct answer (e.g. "5")
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
response_element = etree.Element('numericalresponse')
if answer:
if isinstance(answer, float):
response_element.set('answer', repr(answer))
else:
response_element.set('answer', str(answer))
if tolerance:
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class CustomResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <customresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <customresponse> XML element.
Uses **kwargs:
*cfn*: the Python code to run. Can be inline code,
or the name of a function defined in earlier <script> tags.
Should have the form: cfn(expect, answer_given, student_answers)
where expect is a value (see below),
answer_given is a single value (for 1 input)
or a list of values (for multiple inputs),
and student_answers is a dict of answers by input ID.
*expect*: The value passed to the function cfn
*answer*: Inline script that calculates the answer
"""
# Retrieve **kwargs
cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element
response_element = etree.Element("customresponse")
if cfn:
response_element.set('cfn', str(cfn))
if expect:
response_element.set('expect', str(expect))
if answer:
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SchematicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <schematicresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create the <schematicresponse> XML element.
Uses *kwargs*:
*answer*: The Python script used to evaluate the answer.
"""
answer_script = kwargs.get('answer', None)
# Create the <schematicresponse> element
response_element = etree.Element("schematicresponse")
# Insert the <answer> script if one is provided
if answer_script:
answer_element = etree.SubElement(response_element, "answer")
answer_element.set("type", "loncapa/python")
answer_element.text = str(answer_script)
return response_element
def create_input_element(self, **kwargs):
""" Create the <schematic> XML element.
Although <schematic> can have several attributes,
(*height*, *width*, *parts*, *analyses*, *submit_analysis*, and *initial_value*),
none of them are used in the capa module.
For testing, we create a bare-bones version of <schematic>."""
return etree.Element("schematic")
class CodeResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <coderesponse> XML trees """
def build_xml(self, **kwargs):
# Since we are providing an <answer> tag,
# we should override the default behavior
# of including a <solution> tag as well
kwargs['explanation_text'] = None
return super(CodeResponseXMLFactory, self).build_xml(**kwargs)
def create_response_element(self, **kwargs):
"""
Create a <coderesponse> XML element.
Uses **kwargs:
*initial_display*: The code that initially appears in the textbox
[DEFAULT: "Enter code here"]
*answer_display*: The answer to display to the student
[DEFAULT: "This is the correct answer!"]
*grader_payload*: A JSON-encoded string sent to the grader
[DEFAULT: empty dict string]
*allowed_files*: A space-separated string of file names.
[DEFAULT: None]
*required_files*: A space-separated string of file names.
[DEFAULT: None]
"""
# Get **kwargs
initial_display = kwargs.get("initial_display", "Enter code here")
answer_display = kwargs.get("answer_display", "This is the correct answer!")
grader_payload = kwargs.get("grader_payload", '{}')
allowed_files = kwargs.get("allowed_files", None)
required_files = kwargs.get("required_files", None)
# Create the <coderesponse> element
response_element = etree.Element("coderesponse")
# If files are involved, create the <filesubmission> element.
has_files = allowed_files or required_files
if has_files:
filesubmission_element = etree.SubElement(response_element, "filesubmission")
if allowed_files:
filesubmission_element.set("allowed_files", allowed_files)
if required_files:
filesubmission_element.set("required_files", required_files)
# Create the <codeparam> element.
codeparam_element = etree.SubElement(response_element, "codeparam")
# Set the initial display text
initial_element = etree.SubElement(codeparam_element, "initial_display")
initial_element.text = str(initial_display)
# Set the answer display text
answer_element = etree.SubElement(codeparam_element, "answer_display")
answer_element.text = str(answer_display)
# Set the grader payload string
grader_element = etree.SubElement(codeparam_element, "grader_payload")
grader_element.text = str(grader_payload)
# Create the input within the response
if not has_files:
input_element = etree.SubElement(response_element, "textbox")
input_element.set("mode", "python")
return response_element
def create_input_element(self, **kwargs):
# Since we create this in create_response_element(),
# return None here
return None
class ChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <choiceresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <choiceresponse> element """
return etree.Element("choiceresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element."""
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class FormulaResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <formularesponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <formularesponse> element.
*sample_dict*: A dictionary of the form:
{ VARIABLE_NAME: (MIN, MAX), ....}
This specifies the range within which
to numerically sample each variable to check
student answers.
[REQUIRED]
*num_samples*: The number of times to sample the student's answer
to numerically compare it to the correct answer.
*tolerance*: The tolerance within which answers will be accepted
[DEFAULT: 0.01]
*answer*: The answer to the problem. Can be a formula string
or a Python variable defined in a script
(e.g. "$calculated_answer" for a Python variable
called calculated_answer)
[REQUIRED]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the formula for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
"""
# Retrieve kwargs
sample_dict = kwargs.get("sample_dict", None)
num_samples = kwargs.get("num_samples", None)
tolerance = kwargs.get("tolerance", 0.01)
answer = kwargs.get("answer", None)
hint_list = kwargs.get("hints", None)
assert answer
assert sample_dict and num_samples
# Create the <formularesponse> element
response_element = etree.Element("formularesponse")
# Set the sample information
sample_str = self._sample_str(sample_dict, num_samples, tolerance)
response_element.set("samples", sample_str)
# Set the tolerance
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("type", "tolerance")
responseparam_element.set("default", str(tolerance))
# Set the answer
response_element.set("answer", str(answer))
# Include hints, if specified
if hint_list:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list:
# For each hint, create a <formulahint> element
formulahint_element = etree.SubElement(hintgroup_element, "formulahint")
# We could sample a different range, but for simplicity,
# we use the same sample string for the hints
# that we used previously.
formulahint_element.set("samples", sample_str)
formulahint_element.set("answer", str(hint_prompt))
formulahint_element.set("name", str(hint_name))
# For each hint, create a <hintpart> element
# corresponding to the <formulahint>
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
text_element = etree.SubElement(hintpart_element, "text")
text_element.text = str(hint_text)
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
def _sample_str(self, sample_dict, num_samples, tolerance):
# Loncapa uses a special format for sample strings:
# "x,y,z@4,5,3:10,12,8#4" means plug in values for (x,y,z)
# from within the box defined by points (4,5,3) and (10,12,8)
# The "#4" means to repeat 4 times.
variables = [str(v) for v in sample_dict.keys()]
low_range_vals = [str(f[0]) for f in sample_dict.values()]
high_range_vals = [str(f[1]) for f in sample_dict.values()]
sample_str = (
",".join(sample_dict.keys()) + "@" +
",".join(low_range_vals) + ":" +
",".join(high_range_vals) +
"#" + str(num_samples)
)
return sample_str
class ImageResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <imageresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <imageresponse> element."""
return etree.Element("imageresponse")
def create_input_element(self, **kwargs):
""" Create the <imageinput> element.
Uses **kwargs:
*src*: URL for the image file [DEFAULT: "/static/image.jpg"]
*width*: Width of the image [DEFAULT: 100]
*height*: Height of the image [DEFAULT: 100]
*rectangle*: String representing the rectangles the user should select.
Take the form "(x1,y1)-(x2,y2)", where the two (x,y)
tuples define the corners of the rectangle.
Can include multiple rectangles separated by a semicolon, e.g.
"(490,11)-(556,98);(242,202)-(296,276)"
*regions*: String representing the regions a user can select
Take the form "[ [[x1,y1], [x2,y2], [x3,y3]],
[[x1,y1], [x2,y2], [x3,y3]] ]"
(Defines two regions, each with 3 points)
REQUIRED: Either *rectangle* or *region* (or both)
"""
# Get the **kwargs
src = kwargs.get("src", "/static/image.jpg")
width = kwargs.get("width", 100)
height = kwargs.get("height", 100)
rectangle = kwargs.get('rectangle', None)
regions = kwargs.get('regions', None)
assert rectangle or regions
# Create the <imageinput> element
input_element = etree.Element("imageinput")
input_element.set("src", str(src))
input_element.set("width", str(width))
input_element.set("height", str(height))
if rectangle:
input_element.set("rectangle", rectangle)
if regions:
input_element.set("regions", regions)
return input_element
class JavascriptResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <javascriptresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <javascriptresponse> element.
Uses **kwargs:
*generator_src*: Name of the JS file to generate the problem.
*grader_src*: Name of the JS file to grade the problem.
*display_class*: Name of the class used to display the problem
*display_src*: Name of the JS file used to display the problem
*param_dict*: Dictionary of parameters to pass to the JS
"""
# Get **kwargs
generator_src = kwargs.get("generator_src", None)
grader_src = kwargs.get("grader_src", None)
display_class = kwargs.get("display_class", None)
display_src = kwargs.get("display_src", None)
param_dict = kwargs.get("param_dict", {})
# Both display_src and display_class given,
# or neither given
assert((display_src and display_class) or
(not display_src and not display_class))
# Create the <javascriptresponse> element
response_element = etree.Element("javascriptresponse")
if generator_src:
generator_element = etree.SubElement(response_element, "generator")
generator_element.set("src", str(generator_src))
if grader_src:
grader_element = etree.SubElement(response_element, "grader")
grader_element.set("src", str(grader_src))
if display_class and display_src:
display_element = etree.SubElement(response_element, "display")
display_element.set("class", str(display_class))
display_element.set("src", str(display_src))
for (param_name, param_val) in param_dict.items():
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("name", str(param_name))
responseparam_element.set("value", str(param_val))
return response_element
def create_input_element(self, **kwargs):
""" Create the <javascriptinput> element """
return etree.Element("javascriptinput")
class MultipleChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <multiplechoiceresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <multiplechoiceresponse> element"""
return etree.Element('multiplechoiceresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class TrueFalseResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <truefalseresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <truefalseresponse> element"""
return etree.Element('truefalseresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class OptionResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <optionresponse> XML"""
def create_response_element(self, **kwargs):
""" Create the <optionresponse> element"""
return etree.Element("optionresponse")
def create_input_element(self, **kwargs):
""" Create the <optioninput> element.
Uses **kwargs:
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
assert options_list and correct_option
assert len(options_list) > 1
assert correct_option in options_list
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
# Set the "options" attribute
# Format: "('first', 'second', 'third')"
options_attr_string = u",".join([u"'{}'".format(o) for o in options_list])
options_attr_string = u"({})".format(options_attr_string)
optioninput_element.set('options', options_attr_string)
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
return optioninput_element
class StringResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <stringresponse> XML """
def create_response_element(self, **kwargs):
""" Create a <stringresponse> XML element.
Uses **kwargs:
*answer*: The correct answer (a string) [REQUIRED]
*case_sensitive*: Whether the response is case-sensitive (True/False)
[DEFAULT: True]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
*regexp*: Whether the response is regexp
*additional_answers*: list of additional asnwers.
*non_attribute_answers*: list of additional answers to be coded in the
non-attribute format
"""
# Retrieve the **kwargs
answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", None)
hint_list = kwargs.get('hints', None)
hint_fn = kwargs.get('hintfn', None)
regexp = kwargs.get('regexp', None)
additional_answers = kwargs.get('additional_answers', [])
non_attribute_answers = kwargs.get('non_attribute_answers', [])
assert answer
# Create the <stringresponse> element
response_element = etree.Element("stringresponse")
# Set the answer attribute
response_element.set("answer", unicode(answer))
# Set the case sensitivity and regexp:
type_value = ''
if case_sensitive is not None:
type_value += "cs" if case_sensitive else "ci"
type_value += ' regexp' if regexp else ''
if type_value:
response_element.set("type", type_value.strip())
# Add the hints if specified
if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
if hint_list:
assert not hint_fn
for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
if hint_fn:
assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
for additional_answer in additional_answers:
additional_node = etree.SubElement(response_element, "additional_answer") # pylint: disable=no-member
additional_node.set("answer", additional_answer)
for answer in non_attribute_answers:
additional_node = etree.SubElement(response_element, "additional_answer") # pylint: disable=no-member
additional_node.text = answer
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class AnnotationResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <annotationresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <annotationresponse> element """
return etree.Element("annotationresponse")
def create_input_element(self, **kwargs):
""" Create a <annotationinput> element."""
input_element = etree.Element("annotationinput")
text_children = [
{'tag': 'title', 'text': kwargs.get('title', 'super cool annotation')},
{'tag': 'text', 'text': kwargs.get('text', 'texty text')},
{'tag': 'comment', 'text': kwargs.get('comment', 'blah blah erudite comment blah blah')},
{'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below')},
{'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag')}
]
for child in text_children:
etree.SubElement(input_element, child['tag']).text = child['text']
default_options = [('green', 'correct'), ('eggs', 'incorrect'), ('ham', 'partially-correct')]
options = kwargs.get('options', default_options)
options_element = etree.SubElement(input_element, 'options')
for (description, correctness) in options:
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
option_element.text = description
return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if not isinstance(choices[0], (list, tuple)):
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if not isinstance(answers, (list, tuple)):
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
|
agpl-3.0
| -3,607,793,989,432,723,500 | -4,036,246,515,851,744,000 | 36.647702 | 114 | 0.604882 | false |
bosstb/HaberPush
|
youtube_dl/extractor/eitb.py
|
71
|
3278
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
parse_iso8601,
sanitized_Request,
)
class EitbIE(InfoExtractor):
IE_NAME = 'eitb.tv'
_VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/',
'md5': 'edf4436247185adee3ea18ce64c47998',
'info_dict': {
'id': '4090227752001',
'ext': 'mp4',
'title': '60 minutos (Lasa y Zabala, 30 años)',
'description': 'Programa de reportajes de actualidad.',
'duration': 3996.76,
'timestamp': 1381789200,
'upload_date': '20131014',
'tags': list,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id,
video_id, 'Downloading video JSON')
media = video['web_media'][0]
formats = []
for rendition in media['RENDITIONS']:
video_url = rendition.get('PMD_URL')
if not video_url:
continue
tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000)
format_id = 'http'
if tbr:
format_id += '-%d' % int(tbr)
formats.append({
'url': rendition['PMD_URL'],
'format_id': format_id,
'width': int_or_none(rendition.get('FRAME_WIDTH')),
'height': int_or_none(rendition.get('FRAME_HEIGHT')),
'tbr': tbr,
})
hls_url = media.get('HLS_SURL')
if hls_url:
request = sanitized_Request(
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
headers={'Referer': url})
token_data = self._download_json(
request, video_id, 'Downloading auth token', fatal=False)
if token_data:
token = token_data.get('token')
if token:
formats.extend(self._extract_m3u8_formats(
'%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False))
hds_url = media.get('HDS_SURL')
if hds_url:
formats.extend(self._extract_f4m_formats(
'%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'),
video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'],
'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'),
'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'),
'duration': float_or_none(media.get('LENGTH'), 1000),
'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '),
'tags': media.get('TAGS'),
'formats': formats,
}
|
mit
| -1,558,962,271,945,751,800 | -4,512,121,483,993,808,000 | 36.238636 | 128 | 0.525175 | false |
jessrosenfield/pants
|
src/python/pants/backend/codegen/targets/jaxb_library.py
|
15
|
1613
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
class JaxbLibrary(JvmTarget):
"""Generates a stub Java library from jaxb xsd files."""
def __init__(self, payload=None, package=None, language='java', **kwargs):
"""
:param package: java package (com.company.package) in which to generate the output java files.
If unspecified, Pants guesses it from the file path leading to the schema
(xsd) file. This guess is accurate only if the .xsd file is in a path like
``.../com/company/package/schema.xsd``. Pants looks for packages that start with 'com', 'org',
or 'net'.
:param string language: only 'java' is supported. Default: 'java'
"""
payload = payload or Payload()
payload.add_fields({
'package': PrimitiveField(package),
'jaxb_language': PrimitiveField(language),
})
super(JaxbLibrary, self).__init__(payload=payload, **kwargs)
self.add_labels('codegen')
self.add_labels('jaxb')
if language != 'java':
raise ValueError('Language "{lang}" not supported for {class_type}'
.format(lang=language, class_type=type(self).__name__))
@property
def package(self):
return self.payload.package
|
apache-2.0
| 8,263,685,731,244,156,000 | -1,451,297,292,291,337,500 | 37.404762 | 100 | 0.681959 | false |
juvoinc/airflow
|
airflow/ti_deps/dep_context.py
|
12
|
4987
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from airflow.ti_deps.deps.dag_unpaused_dep import DagUnpausedDep
from airflow.ti_deps.deps.dagrun_exists_dep import DagrunRunningDep
from airflow.ti_deps.deps.exec_date_after_start_date_dep import ExecDateAfterStartDateDep
from airflow.ti_deps.deps.not_running_dep import NotRunningDep
from airflow.ti_deps.deps.not_skipped_dep import NotSkippedDep
from airflow.ti_deps.deps.pool_has_space_dep import PoolHasSpaceDep
from airflow.ti_deps.deps.runnable_exec_date_dep import RunnableExecDateDep
from airflow.ti_deps.deps.valid_state_dep import ValidStateDep
from airflow.utils.state import State
class DepContext(object):
"""
A base class for contexts that specifies which dependencies should be evaluated in
the context for a task instance to satisfy the requirements of the context. Also
stores state related to the context that can be used by dependendency classes.
For example there could be a SomeRunContext that subclasses this class which has
dependencies for:
- Making sure there are slots available on the infrastructure to run the task instance
- A task-instance's task-specific dependencies are met (e.g. the previous task
instance completed successfully)
- ...
:param deps: The context-specific dependencies that need to be evaluated for a
task instance to run in this execution context.
:type deps: set(BaseTIDep)
:param flag_upstream_failed: This is a hack to generate the upstream_failed state
creation while checking to see whether the task instance is runnable. It was the
shortest path to add the feature. This is bad since this class should be pure (no
side effects).
:type flag_upstream_failed: boolean
:param ignore_all_deps: Whether or not the context should ignore all ignoreable
dependencies. Overrides the other ignore_* parameters
:type ignore_all_deps: boolean
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
Backfills)
:type ignore_depends_on_past: boolean
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
trigger rule
:type ignore_task_deps: boolean
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: boolean
"""
def __init__(
self,
deps=None,
flag_upstream_failed=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False):
self.deps = deps or set()
self.flag_upstream_failed = flag_upstream_failed
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
# In order to be able to get queued a task must have one of these states
QUEUEABLE_STATES = {
State.FAILED,
State.NONE,
State.QUEUED,
State.SCHEDULED,
State.SKIPPED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
}
# The minimum execution context for task instances to be executed.
MIN_EXEC_DEPS = {
NotRunningDep(),
NotSkippedDep(),
RunnableExecDateDep(),
}
# Context to get the dependencies that need to be met in order for a task instance to
# be backfilled.
QUEUE_DEPS = MIN_EXEC_DEPS | {
ValidStateDep(QUEUEABLE_STATES)
}
# Dependencies that need to be met for a given task instance to be able to get run by an
# executor. This class just extends QueueContext by adding dependencies for resources.
RUN_DEPS = QUEUE_DEPS | {
DagTISlotsAvailableDep(),
PoolHasSpaceDep(),
}
# TODO(aoen): SCHEDULER_DEPS is not coupled to actual execution in any way and
# could easily be modified or removed from the scheduler causing this dependency to become
# outdated and incorrect. This coupling should be created (e.g. via a dag_deps analog of
# ti_deps that will be used in the scheduler code) to ensure that the logic here is
# equivalent to the logic in the scheduler.
# Dependencies that need to be met for a given task instance to get scheduled by the
# scheduler, then queued by the scheduler, then run by an executor.
SCHEDULER_DEPS = RUN_DEPS | {
DagrunRunningDep(),
DagUnpausedDep(),
ExecDateAfterStartDateDep(),
}
|
apache-2.0
| -7,605,932,226,328,579,000 | 1,231,398,732,015,259,400 | 41.623932 | 90 | 0.732505 | false |
40223231/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/SDL.py
|
603
|
1813
|
from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
|
gpl-3.0
| 5,172,145,620,441,544,000 | -5,056,792,204,299,189,000 | 16.266667 | 57 | 0.694429 | false |
detiber/lib_openshift
|
lib_openshift/models/v1_git_build_source.py
|
2
|
5508
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1GitBuildSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'uri': 'str',
'ref': 'str',
'http_proxy': 'str',
'https_proxy': 'str'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'uri': 'uri',
'ref': 'ref',
'http_proxy': 'httpProxy',
'https_proxy': 'httpsProxy'
}
def __init__(self, uri=None, ref=None, http_proxy=None, https_proxy=None):
"""
V1GitBuildSource - a model defined in Swagger
"""
self._uri = uri
self._ref = ref
self._http_proxy = http_proxy
self._https_proxy = https_proxy
@property
def uri(self):
"""
Gets the uri of this V1GitBuildSource.
URI points to the source that will be built. The structure of the source will depend on the type of build to run
:return: The uri of this V1GitBuildSource.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this V1GitBuildSource.
URI points to the source that will be built. The structure of the source will depend on the type of build to run
:param uri: The uri of this V1GitBuildSource.
:type: str
"""
self._uri = uri
@property
def ref(self):
"""
Gets the ref of this V1GitBuildSource.
Ref is the branch/tag/ref to build.
:return: The ref of this V1GitBuildSource.
:rtype: str
"""
return self._ref
@ref.setter
def ref(self, ref):
"""
Sets the ref of this V1GitBuildSource.
Ref is the branch/tag/ref to build.
:param ref: The ref of this V1GitBuildSource.
:type: str
"""
self._ref = ref
@property
def http_proxy(self):
"""
Gets the http_proxy of this V1GitBuildSource.
HTTPProxy is a proxy used to reach the git repository over http
:return: The http_proxy of this V1GitBuildSource.
:rtype: str
"""
return self._http_proxy
@http_proxy.setter
def http_proxy(self, http_proxy):
"""
Sets the http_proxy of this V1GitBuildSource.
HTTPProxy is a proxy used to reach the git repository over http
:param http_proxy: The http_proxy of this V1GitBuildSource.
:type: str
"""
self._http_proxy = http_proxy
@property
def https_proxy(self):
"""
Gets the https_proxy of this V1GitBuildSource.
HTTPSProxy is a proxy used to reach the git repository over https
:return: The https_proxy of this V1GitBuildSource.
:rtype: str
"""
return self._https_proxy
@https_proxy.setter
def https_proxy(self, https_proxy):
"""
Sets the https_proxy of this V1GitBuildSource.
HTTPSProxy is a proxy used to reach the git repository over https
:param https_proxy: The https_proxy of this V1GitBuildSource.
:type: str
"""
self._https_proxy = https_proxy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1GitBuildSource.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -2,947,266,719,463,969,000 | 6,351,466,017,653,835,000 | 25.480769 | 120 | 0.565178 | false |
dahlstrom-g/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_vars.py
|
7
|
26282
|
""" pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import math
import pickle
from _pydev_bundle.pydev_imports import quote
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES, NUMPY_FLOATING_POINT_TYPES
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
try:
from collections import OrderedDict
except:
OrderedDict = dict
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import VariableWithOffset
SENTINEL_VALUE = []
DEFAULT_DF_FORMAT = "s"
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_current_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
# Note: commented this error message out (it may commonly happen
# if a message asking for a frame is issued while a thread is paused
# but the thread starts running before the message is actually
# handled).
# Leaving code to uncomment during tests.
# err_msg = '''find_frame: frame not found.
# Looking for thread_id:%s, frame_id:%s
# Current thread_id:%s, available frames:
# %s\n
# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
#
# sys.stderr.write(err_msg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2).
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def get_offset(attrs):
"""
Extract offset from the given attributes.
:param attrs: The string of a compound variable fields split by tabs.
If an offset is given, it must go the first element.
:return: The value of offset if given or 0.
"""
offset = 0
if attrs is not None:
try:
offset = int(attrs.split('\t')[0])
except ValueError:
pass
return offset
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
:note: PyCharm supports progressive loading of large collections and uses the `attrs`
parameter to pass the offset, e.g. 300\t\\obj\tattr1\tattr2 should return
the value of attr2 starting from the 300th element. This hack makes it possible
to add the support of progressive loading without extending of the protocol.
"""
offset = get_offset(attrs)
orig_attrs, attrs = attrs, attrs.split('\t', 1)[1] if offset else attrs
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, orig_attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
offset = get_offset(attrs)
attrs = attrs.split('\t', 1)[1] if offset else attrs
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if value is SENTINEL_VALUE:
# It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with
# `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in
# list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main
# reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.
value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = float('inf')
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return xml
class ExceedingArrayDimensionsException(Exception):
pass
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException()
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def get_column_formatter_by_type(initial_format, column_type):
if column_type in NUMPY_NUMERIC_TYPES and initial_format:
if column_type in NUMPY_FLOATING_POINT_TYPES and initial_format.strip() == DEFAULT_DF_FORMAT:
# use custom formatting for floats when default formatting is set
return array_default_format(column_type)
return initial_format
else:
return array_default_format(column_type)
def get_formatted_row_elements(row, iat, dim, cols, format, dtypes):
for c in range(cols):
val = iat[row, c] if dim > 1 else iat[row]
col_formatter = get_column_formatter_by_type(format, dtypes[c])
try:
yield ("%" + col_formatter) % (val,)
except TypeError:
yield ("%" + DEFAULT_DF_FORMAT) % (val,)
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
DATAFRAME_HEADER_LOAD_MAX_SIZE = 100
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = 'O'
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
xml = slice_to_xml(name, num_rows, num_cols, format, "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
xml += header_data_to_xml(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
return xml
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(column_type):
return get_column_formatter_by_type(format, column_type)
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, formatted_row_elements, format)
return xml
def array_data_to_xml(rows, cols, get_row, format):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % row
for value in get_row(row):
xml += var_to_xml(value, '', format=format)
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, quote(format), type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))
bounds = col_bounds[col]
col_format = "%" + col_to_format(dtypes[col])
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(dtypes[col]), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
def is_able_to_format_number(format):
try:
format % math.pi
except Exception:
return False
return True
TYPE_TO_XML_CONVERTERS = {
"ndarray": array_to_xml,
"DataFrame": dataframe_to_xml,
"Series": dataframe_to_xml,
"GeoDataFrame": dataframe_to_xml,
"GeoSeries": dataframe_to_xml
}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
|
apache-2.0
| -3,719,770,672,759,844,400 | 8,885,872,311,570,299,000 | 33.76455 | 138 | 0.589871 | false |
syscoin/syscoin
|
test/functional/mining_getblocktemplate_longpoll.py
|
1
|
3612
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
import random
import threading
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import get_rpc_proxy
from test_framework.wallet import MiniWallet
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.log.info("Test that longpollid doesn't change between successive getblocktemplate() invocations if nothing else happens")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
self.log.info("Test that longpoll waits if we do nothing")
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
miniwallets = [ MiniWallet(node) for node in self.nodes ]
self.log.info("Test that longpoll will terminate if another node generates a block")
miniwallets[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
self.log.info("Test that longpoll will terminate if we generate a block ourselves")
thr = LongpollThread(self.nodes[0])
thr.start()
miniwallets[0].generate(1) # generate a block on own node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Add enough mature utxos to the wallets, so that all txs spend confirmed coins
self.nodes[0].generate(100)
self.sync_blocks()
self.log.info("Test that introducing a new transaction into the mempool will terminate the longpoll")
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
fee_rate = min_relay_fee + Decimal('0.00000010') * random.randint(0,20)
miniwallets[0].send_self_transfer(from_node=random.choice(self.nodes),
fee_rate=fee_rate)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
|
mit
| -5,725,999,291,743,215,000 | 1,737,112,516,973,186,600 | 43.592593 | 134 | 0.668051 | false |
tchellomello/home-assistant
|
homeassistant/auth/permissions/merge.py
|
19
|
1786
|
"""Merging of policies."""
from typing import Dict, List, Set, cast
from .types import CategoryType, PolicyType
def merge_policies(policies: List[PolicyType]) -> PolicyType:
"""Merge policies."""
new_policy: Dict[str, CategoryType] = {}
seen: Set[str] = set()
for policy in policies:
for category in policy:
if category in seen:
continue
seen.add(category)
new_policy[category] = _merge_policies(
[policy.get(category) for policy in policies]
)
cast(PolicyType, new_policy)
return new_policy
def _merge_policies(sources: List[CategoryType]) -> CategoryType:
"""Merge a policy."""
# When merging policies, the most permissive wins.
# This means we order it like this:
# True > Dict > None
#
# True: allow everything
# Dict: specify more granular permissions
# None: no opinion
#
# If there are multiple sources with a dict as policy, we recursively
# merge each key in the source.
policy: CategoryType = None
seen: Set[str] = set()
for source in sources:
if source is None:
continue
# A source that's True will always win. Shortcut return.
if source is True:
return True
assert isinstance(source, dict)
if policy is None:
policy = cast(CategoryType, {})
assert isinstance(policy, dict)
for key in source:
if key in seen:
continue
seen.add(key)
key_sources = []
for src in sources:
if isinstance(src, dict):
key_sources.append(src.get(key))
policy[key] = _merge_policies(key_sources)
return policy
|
apache-2.0
| 8,006,422,198,578,084,000 | 3,121,701,770,610,241,500 | 26.476923 | 73 | 0.581747 | false |
robinro/ansible
|
test/units/plugins/lookup/test_lastpass.py
|
153
|
6918
|
# (c)2016 Andrew Zenk <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from argparse import ArgumentParser
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.errors import AnsibleError
from ansible.module_utils import six
from ansible.plugins.lookup.lastpass import LookupModule, LPass, LPassException
MOCK_ENTRIES = [{'username': 'user',
'name': 'Mock Entry',
'password': 't0pS3cret passphrase entry!',
'url': 'https://localhost/login',
'notes': 'Test\nnote with multiple lines.\n',
'id': '0123456789'}]
class MockLPass(LPass):
_mock_logged_out = False
_mock_disconnected = False
def _lookup_mock_entry(self, key):
for entry in MOCK_ENTRIES:
if key == entry['id'] or key == entry['name']:
return entry
def _run(self, args, stdin=None, expected_rc=0):
# Mock behavior of lpass executable
base_options = ArgumentParser(add_help=False)
base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
p = ArgumentParser()
sp = p.add_subparsers(help='command', dest='subparser_name')
logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
field_group = show_p.add_mutually_exclusive_group(required=True)
for field in MOCK_ENTRIES[0].keys():
field_group.add_argument("--{0}".format(field), default=False, action='store_true')
field_group.add_argument('--field', default=None)
show_p.add_argument('selector', help='Unique Name or ID')
args = p.parse_args(args)
def mock_exit(output='', error='', rc=0):
if rc != expected_rc:
raise LPassException(error)
return output, error
if args.color != 'never':
return mock_exit(error='Error: Mock only supports --color=never', rc=1)
if args.subparser_name == 'logout':
if self._mock_logged_out:
return mock_exit(error='Error: Not currently logged in', rc=1)
logged_in_error = 'Are you sure you would like to log out? [Y/n]'
if stdin and stdin.lower() == 'n\n':
return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
elif stdin and stdin.lower() == 'y\n':
return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
else:
return mock_exit(error='Error: aborted response', rc=1)
if args.subparser_name == 'show':
if self._mock_logged_out:
return mock_exit(error='Error: Could not find decryption key.' +
' Perhaps you need to login with `lpass login`.', rc=1)
if self._mock_disconnected:
return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
mock_entry = self._lookup_mock_entry(args.selector)
if args.field:
return mock_exit(output=mock_entry.get(args.field, ''))
elif args.password:
return mock_exit(output=mock_entry.get('password', ''))
elif args.username:
return mock_exit(output=mock_entry.get('username', ''))
elif args.url:
return mock_exit(output=mock_entry.get('url', ''))
elif args.name:
return mock_exit(output=mock_entry.get('name', ''))
elif args.id:
return mock_exit(output=mock_entry.get('id', ''))
elif args.notes:
return mock_exit(output=mock_entry.get('notes', ''))
raise LPassException('We should never get here')
class DisconnectedMockLPass(MockLPass):
_mock_disconnected = True
class LoggedOutMockLPass(MockLPass):
_mock_logged_out = True
class TestLPass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_lastpass_cli_path(self):
lp = MockLPass(path='/dev/null')
self.assertEqual('/dev/null', lp.cli_path)
def test_lastpass_build_args_logout(self):
lp = MockLPass()
self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
def test_lastpass_logged_in_true(self):
lp = MockLPass()
self.assertTrue(lp.logged_in)
def test_lastpass_logged_in_false(self):
lp = LoggedOutMockLPass()
self.assertFalse(lp.logged_in)
def test_lastpass_show_disconnected(self):
lp = DisconnectedMockLPass()
with self.assertRaises(LPassException):
lp.get_field('0123456789', 'username')
def test_lastpass_show(self):
lp = MockLPass()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(), lp.get_field(entry_id, k))
class TestLastpassPlugin(unittest.TestCase):
@patch('ansible.plugins.lookup.lastpass.LPass', new=MockLPass)
def test_lastpass_plugin_normal(self):
lookup_plugin = LookupModule()
for entry in MOCK_ENTRIES:
entry_id = entry.get('id')
for k, v in six.iteritems(entry):
self.assertEqual(v.strip(),
lookup_plugin.run([entry_id], field=k)[0])
@patch('ansible.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
def test_lastpass_plugin_logged_out(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
@patch('ansible.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
def test_lastpass_plugin_disconnected(self):
lookup_plugin = LookupModule()
entry = MOCK_ENTRIES[0]
entry_id = entry.get('id')
with self.assertRaises(AnsibleError):
lookup_plugin.run([entry_id], field='password')
|
gpl-3.0
| -8,258,574,258,502,531,000 | -4,761,401,365,062,072,000 | 34.84456 | 97 | 0.612605 | false |
molebot/brython
|
www/src/Lib/test/test_importlib/builtin/test_loader.py
|
26
|
3341
|
import importlib
from importlib import machinery
from .. import abc
from .. import util
from . import util as builtin_util
import sys
import types
import unittest
class LoaderTests(abc.LoaderTests):
"""Test load_module() for built-in modules."""
verification = {'__name__': 'errno', '__package__': '',
'__loader__': machinery.BuiltinImporter}
def verify(self, module):
"""Verify that the module matches against what it should have."""
self.assertIsInstance(module, types.ModuleType)
for attr, value in self.verification.items():
self.assertEqual(getattr(module, attr), value)
self.assertIn(module.__name__, sys.modules)
load_module = staticmethod(lambda name:
machinery.BuiltinImporter.load_module(name))
def test_module(self):
# Common case.
with util.uncache(builtin_util.NAME):
module = self.load_module(builtin_util.NAME)
self.verify(module)
def test_package(self):
# Built-in modules cannot be a package.
pass
def test_lacking_parent(self):
# Built-in modules cannot be a package.
pass
def test_state_after_failure(self):
# Not way to force an imoprt failure.
pass
def test_module_reuse(self):
# Test that the same module is used in a reload.
with util.uncache(builtin_util.NAME):
module1 = self.load_module(builtin_util.NAME)
module2 = self.load_module(builtin_util.NAME)
self.assertIs(module1, module2)
def test_unloadable(self):
name = 'dssdsdfff'
assert name not in sys.builtin_module_names
with self.assertRaises(ImportError) as cm:
self.load_module(name)
self.assertEqual(cm.exception.name, name)
def test_already_imported(self):
# Using the name of a module already imported but not a built-in should
# still fail.
assert hasattr(importlib, '__file__') # Not a built-in.
with self.assertRaises(ImportError) as cm:
self.load_module('importlib')
self.assertEqual(cm.exception.name, 'importlib')
class InspectLoaderTests(unittest.TestCase):
"""Tests for InspectLoader methods for BuiltinImporter."""
def test_get_code(self):
# There is no code object.
result = machinery.BuiltinImporter.get_code(builtin_util.NAME)
self.assertIsNone(result)
def test_get_source(self):
# There is no source.
result = machinery.BuiltinImporter.get_source(builtin_util.NAME)
self.assertIsNone(result)
def test_is_package(self):
# Cannot be a package.
result = machinery.BuiltinImporter.is_package(builtin_util.NAME)
self.assertTrue(not result)
def test_not_builtin(self):
# Modules not built-in should raise ImportError.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(machinery.BuiltinImporter, meth_name)
with self.assertRaises(ImportError) as cm:
method(builtin_util.BAD_NAME)
self.assertRaises(builtin_util.BAD_NAME)
def test_main():
from test.support import run_unittest
run_unittest(LoaderTests, InspectLoaderTests)
if __name__ == '__main__':
test_main()
|
bsd-3-clause
| 8,717,826,909,039,547,000 | -4,934,931,716,632,179,000 | 30.819048 | 79 | 0.63933 | false |
byterom/android_external_chromium_org
|
build/get_landmines.py
|
26
|
2454
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: To delete generated mojo class files.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
print 'ninja dependency cycle: crbug.com/408192'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| -1,616,606,449,113,242,000 | -3,801,887,502,375,272,400 | 35.626866 | 80 | 0.683374 | false |
rmm-fcul/workshops
|
2015_graz/binary_choice/two_arenas_real_real/casu_utils.py
|
5
|
8116
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
a library of functions used in CASU controller dynamics. Got a lot of
messy code that would be neater like this
RM, Feb 2015
'''
import numpy as np
from assisipy import casu
#import matplotlib.cm as cm
from datetime import datetime
import parsing
import time
### ============= maths ============= ###
#{{{ rolling_avg
def rolling_avg(x, n):
'''
given the sample x, provide a rolling average taking n samples per data point.
NOT a quick solution, but easy...
'''
y = np.zeros((len(x),))
for ctr in range(len(x)):
y[ctr] = np.sum(x[ctr:(ctr+n)])
return y/n
#}}}
### ============= general behaviour ============= ###
#{{{ measure_ir_sensors
def measure_ir_sensors(mycasu, detect_data):
''' count up sensors that detect a bee, plus rotate history array '''
# don't discriminate between specific directions, so just accumulate all
count = 0
for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):
if (val > t):
count += 1
#print "raw:",
#print ",".join(["{:.2f}".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])
#mycasu.total_count += count # historical count over all time
detect_data = np.roll(detect_data, 1) # step all positions back
detect_data[0] = count # and overwrite the first entry (this was rolled
# around, so is the oldest entry -- and to become the newest now)
# allow ext usage to apply window -- remain agnostic here during collection.
return detect_data, count
#}}}
#{{{ heater_one_step
def heater_one_step(h):
'''legacy function'''
return detect_bee_proximity_saturated(h)
def detect_bee_proximity_saturated(h):
# measure proximity
detect_data, count = measure_ir_sensors(h, h.detect_data)
h.detect_data = detect_data
# overall bee count for this casu
sat_count = min(h.sat_lim, count) # saturates
return sat_count
#}}}
#{{{ find_mean_ext_temp
def find_mean_ext_temp(h):
r = []
for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:
r.append(h.get_temp(sensor))
if len(r):
mean = sum(r) / float(len(r))
else:
mean = 0.0
return mean
#}}}
### ============= inter-casu comms ============= ###
#{{{ comms functions
def transmit_my_count(h, sat_count, dest='accomplice'):
s = "{}".format(sat_count)
if h.verb > 1:
print "\t[i]==> {} send msg ({} by): '{}' bees, to {}".format(
h._thename, len(s), s, dest)
h.send_message(dest, s)
#TODO: this is non-specific, i.e., any message from anyone is assumed to have
# the right form. For heterogeneous neighbours, we need to check identity as
# well
def recv_all_msgs(h, retry_cnt=0, max_recv=None):
'''
continue to read message bffer until no more messages.
as list of parsed messages parsed into (src, float) pairs
'''
msgs = []
try_cnt = 0
while(True):
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
msgs.append((src, bee_cnt))
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename,
BLU, ENDC)
if h.verb > 1:
#print dir(msg)
print msg.items()
if(max_recv is not None and len(msgs) >= max_recv):
break
else:
# buffer emptied, return
try_cnt += 1
if try_cnt > retry_cnt:
break
return msgs
def recv_neighbour_msg(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = int(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
def recv_neighbour_msg_w_src(h):
''' provide the source of a message as well as the message count'''
bee_cnt = 0
src = None
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
if h.verb > 1:
#print dir(msg)
print msg.items()
return bee_cnt, src
def recv_neighbour_msg_flt(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = float(txt.split()[0])
if h.verb > 1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
#}}}
def find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):
links = parsing.find_comm_link_mapping(
name, rtc_path=rtc_path, suffix=suffix, verb=verb)
if verb:
print "[I] for {}, found the following nodes/edges".format(name)
print "\t", links.items()
print "\n===================================\n\n"
return links
### ============= display ============= ###
#{{{ term codes for colored text
ERR = '\033[41m'
BLU = '\033[34m'
ENDC = '\033[0m'
#}}}
#{{{ color funcs
#def gen_cmap(m='hot', n=32) :
# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps
def gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):
t_rng = float(max_temp - min_temp)
fr = (new_temp - min_temp) / t_rng
i = int(fr * len(cmap))
# compute basic color, if on target
#r,g,b,a = cmap(i)
g = 0.0; b = 0.0; a = 1.0;
i = sorted([0, i, len(cmap)-1])[1]
r = cmap[i]
# now adjust according to distance from target
if tgt is None: tgt=new_temp
dt = np.abs(new_temp - tgt)
dt_r = dt / t_rng
h2 = np.array([r,g,b])
h2 *= (1-dt_r)
return h2
# a colormap with 8 settings, taht doesn't depend on the presence of
# matplotlib (hard-coded though.) -- depricating
_clrs = [
(0.2, 0.2, 0.2),
(0.041, 0, 0),
(0.412, 0, 0),
(0.793, 0, 0),
(1, 0.174, 0),
(1, 0.555, 0),
(1, 0.936, 0),
(1, 1, 0.475),
(1, 1, 1),
]
_dflt_clr = (0.2, 0.2, 0.2)
# can access other gradations of colour using M = cm.hot(n) for n steps, then
# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)
# BT here we're going to use 8 steps for all CASUs so no bother.
#}}}
def sep_with_nowtime():
print "# =================== t={} =================== #\n".format(
datetime.now().strftime("%H:%M:%S"))
### ============= more generic ============= ###
#{{{ a struct constructor
# some handy python utilities, from Kier Dugan
class Struct:
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def get(self, key, default=None):
return self.__dict__.get(key, default)
def addFields(self, **kwargs):
# add other fields (basically variables) after initialisation
self.__dict__.update (kwargs)
#}}}
### calibraiont
def _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):
'''
read the sensors several times, and take the highest reading
seen as the threshold.
'''
h._raw_thresh = [0] * 7 # default cases for threshold
for stp in xrange(calib_steps):
for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):
if v > h._raw_thresh[i]:
h._raw_thresh[i] = v
time.sleep(interval)
h.thresh = [x*calib_gain for x in h._raw_thresh]
h.threshold = [x*calib_gain for x in h._raw_thresh]
if h.verb:
_ts =", ".join(["{:.2f}".format(x) for x in h.thresh])
print "[I] post-calibration, we have thresh: ", _ts
|
lgpl-3.0
| 5,204,720,287,532,705,000 | -8,646,442,989,038,605,000 | 26.326599 | 92 | 0.545096 | false |
Lucretiel/autocommand
|
test/test_automain.py
|
1
|
1906
|
# Copyright 2014-2016 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
import pytest
from autocommand.automain import automain, AutomainRequiresModuleError
@pytest.mark.parametrize('module_name', ['__main__', True])
def test_name_equals_main_or_true(module_name):
with pytest.raises(SystemExit):
@automain(module_name)
def main():
return 0
def test_name_not_main_or_true():
def main():
return 0
wrapped_main = automain('some_module')(main)
assert wrapped_main is main
def test_invalid_usage():
with pytest.raises(AutomainRequiresModuleError):
@automain
def main():
return 0
def test_args():
main_called = False
with pytest.raises(SystemExit):
@automain(True, args=[1, 2])
def main(a, b):
nonlocal main_called
main_called = True
assert a == 1
assert b == 2
assert main_called
def test_args_and_kwargs():
main_called = False
with pytest.raises(SystemExit):
@automain(True, args=[1], kwargs={'b': 2})
def main(a, b):
nonlocal main_called
main_called = True
assert a == 1
assert b == 2
assert main_called
|
lgpl-3.0
| 6,332,096,639,977,679,000 | -3,628,972,625,046,544,400 | 27.029412 | 77 | 0.654774 | false |
mhotwagner/backstage
|
facade/models.py
|
1
|
2837
|
from django.db import models
from ckeditor.fields import RichTextField
from solo.models import SingletonModel
from phonenumber_field import modelfields as phonenumber_models
from foti.models import Foto
from opere.models import Opera
from scritti.models import Scritto
class Profile(SingletonModel):
name = models.CharField(
max_length=255,
blank=False,
)
_title = models.CharField(
max_length=255,
blank=True,
help_text='Site title used in tab. Defaults to \'name\' if left blank.',
)
tagline = models.CharField(
max_length=515,
blank=True,
help_text='Just a quick description (e.g. "Waddling through the world in search of adventure and snuggles" to go with "Nomad Penguin"',
)
intro = RichTextField(
max_length=1024,
blank=True,
)
bio_title = models.CharField(max_length=64, blank=True)
bio_image = models.ImageField(upload_to='profile', blank=True)
bio = RichTextField(
max_length=4096,
blank=True,
)
# Contact Info
_contact_name = models.CharField(
max_length=64,
blank=True,
help_text='Just in case you didn\'t use your real name up above. You can leave this blank if you want.',
)
address = models.CharField(
max_length=64,
blank=True,
)
city = models.CharField(
max_length=64,
blank=True,
)
state = models.CharField(
max_length=64,
blank=True,
)
country = models.CharField(
max_length=128,
blank=True
)
zip_code = models.CharField(
max_length=16,
blank=True,
help_text='"Postal Code", technically.'
)
email = models.EmailField(
max_length=128,
blank=True
)
phone = phonenumber_models.PhoneNumberField(blank=True)
website = models.URLField(blank=True, help_text='In case you have another one, I guess?')
twitter = models.URLField(blank=True)
facebook = models.URLField(blank=True)
instagram = models.URLField(blank=True)
linkedin = models.URLField(blank=True)
pinterest = models.URLField(blank=True)
tumblr = models.URLField(blank=True)
# Someday we'll change the first one to accept Opera
homepage_features = models.ManyToManyField(Scritto, related_name='facade_homepage_features', help_text='Max of 6!', blank=True)
writing_features = models.ManyToManyField(Scritto, related_name='facade_writing_features', help_text='Max of 6!', blank=True)
photo_features = models.ManyToManyField(Foto, related_name='facade_photo_features', help_text='Max of 6!', blank=True)
@property
def title(self):
return self._title or self.name
@property
def fullname(self):
return self._contact_name or self.name
|
mit
| 8,616,108,469,972,528,000 | 3,059,469,844,961,295,000 | 29.836957 | 143 | 0.650335 | false |
abadger/ansible
|
lib/ansible/plugins/action/yum.py
|
11
|
4713
|
# (c) 2018, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleActionFail
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
VALID_BACKENDS = frozenset(('yum', 'yum4', 'dnf'))
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
'''
Action plugin handler for yum3 vs yum4(dnf) operations.
Enables the yum module to use yum3 and/or yum4. Yum4 is a yum
command-line compatibility layer on top of dnf. Since the Ansible
modules for yum(aka yum3) and dnf(aka yum4) call each of yum3 and yum4's
python APIs natively on the backend, we need to handle this here and
pass off to the correct Ansible module to execute on the remote system.
'''
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Carry-over concept from the package action plugin
if 'use' in self._task.args and 'use_backend' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')")
module = self._task.args.get('use', self._task.args.get('use_backend', 'auto'))
if module == 'auto':
try:
if self._task.delegate_to: # if we delegate, we should use delegated host's facts
module = self._templar.template("{{hostvars['%s']['ansible_facts']['pkg_mgr']}}" % self._task.delegate_to)
else:
module = self._templar.template("{{ansible_facts.pkg_mgr}}")
except Exception:
pass # could not get it from template!
if module not in VALID_BACKENDS:
facts = self._execute_module(
module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
task_vars=task_vars)
display.debug("Facts %s" % facts)
module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
result['ansible_facts'] = {'pkg_mgr': module}
if module not in VALID_BACKENDS:
result.update(
{
'failed': True,
'msg': ("Could not detect which major revision of yum is in use, which is required to determine module backend.",
"You should manually specify use_backend to tell the module whether to use the yum (yum3) or dnf (yum4) backend})"),
}
)
else:
if module == "yum4":
module = "dnf"
# eliminate collisions with collections search while still allowing local override
module = 'ansible.legacy.' + module
if not self._shared_loader_obj.module_loader.has_plugin(module):
result.update({'failed': True, 'msg': "Could not find a yum module backend for %s." % module})
else:
# run either the yum (yum3) or dnf (yum4) backend module
new_module_args = self._task.args.copy()
if 'use_backend' in new_module_args:
del new_module_args['use_backend']
if 'use' in new_module_args:
del new_module_args['use']
display.vvvv("Running %s as the backend for the yum action plugin" % module)
result.update(self._execute_module(
module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
# Cleanup
if not self._task.async_val:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
gpl-3.0
| -2,212,381,552,543,442,400 | 9,029,612,497,851,877,000 | 42.238532 | 144 | 0.613198 | false |
qPCR4vir/orange3
|
Orange/canvas/canvas/items/utils.py
|
3
|
3147
|
import numpy
import sip
from PyQt4.QtGui import QColor, QRadialGradient, QPainterPathStroker
from PyQt4.QtCore import QObject, QSignalMapper
from PyQt4.QtCore import pyqtSignal as Signal
def saturated(color, factor=150):
"""Return a saturated color.
"""
h = color.hsvHueF()
s = color.hsvSaturationF()
v = color.valueF()
a = color.alphaF()
s = factor * s / 100.0
s = max(min(1.0, s), 0.0)
return QColor.fromHsvF(h, s, v, a).convertTo(color.spec())
def sample_path(path, num=10):
"""Sample `num` equidistant points from the `path` (`QPainterPath`).
"""
space = numpy.linspace(0.0, 1.0, num, endpoint=True)
return [path.pointAtPercent(float(p)) for p in space]
def radial_gradient(color, color_light=50):
"""
radial_gradient(QColor, QColor)
radial_gradient(QColor, int)
Return a radial gradient. `color_light` can be a QColor or an int.
In the later case the light color is derived from `color` using
`saturated(color, color_light)`.
"""
if not isinstance(color_light, QColor):
color_light = saturated(color, color_light)
gradient = QRadialGradient(0.5, 0.5, 0.5)
gradient.setColorAt(0.0, color_light)
gradient.setColorAt(0.5, color_light)
gradient.setColorAt(1.0, color)
gradient.setCoordinateMode(QRadialGradient.ObjectBoundingMode)
return gradient
def toGraphicsObjectIfPossible(item):
"""Return the item as a QGraphicsObject if possible.
This function is intended as a workaround for a problem with older
versions of PyQt (< 4.9), where methods returning 'QGraphicsItem *'
lose the type of the QGraphicsObject subclasses and instead return
generic QGraphicsItem wrappers.
"""
if item is None:
return None
obj = item.toGraphicsObject()
return item if obj is None else obj
def linspace(count):
"""Return `count` evenly spaced points from 0..1 interval excluding
both end points, e.g. `linspace(3) == [0.25, 0.5, 0.75]`.
"""
return list(map(float, numpy.linspace(0.0, 1.0, count + 2, endpoint=True)[1:-1]))
def uniform_linear_layout(points):
"""Layout the points (a list of floats in 0..1 range) in a uniform
linear space while preserving the existing sorting order.
"""
indices = numpy.argsort(points)
space = numpy.asarray(linspace(len(points)))
# invert the indices
indices = invert_permutation_indices(indices)
# assert((numpy.argsort(points) == numpy.argsort(space[indices])).all())
points = space[indices]
return points.tolist()
def invert_permutation_indices(indices):
"""Invert the permutation giver by indices.
"""
inverted = [0] * len(indices)
for i, index in enumerate(indices):
inverted[index] = i
return inverted
def stroke_path(path, pen):
"""Create a QPainterPath stroke from the `path` drawn with `pen`.
"""
stroker = QPainterPathStroker()
stroker.setCapStyle(pen.capStyle())
stroker.setJoinStyle(pen.joinStyle())
stroker.setMiterLimit(pen.miterLimit())
stroker.setWidth(max(pen.widthF(), 1e-9))
return stroker.createStroke(path)
|
bsd-2-clause
| -558,571,985,131,366,300 | 7,097,538,080,905,876,000 | 28.411215 | 85 | 0.68033 | false |
hehongliang/tensorflow
|
tensorflow/python/kernel_tests/basic_gpu_test.py
|
2
|
10569
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.cached_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.cached_session(use_gpu=True) as sess:
return sess.run(broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
apache-2.0
| -1,939,121,600,402,297,300 | -7,721,803,849,920,276,000 | 36.746429 | 80 | 0.631186 | false |
moqada/hatena2evernote
|
h2e.py
|
1
|
11014
|
# -*- coding: utf-8 -*-
import argparse
import binascii
import datetime
import hashlib
import os
import re
import requests
import time
import urllib
from evernote.api.client import EvernoteClient
from evernote.edam.type import ttypes as Types
from BeautifulSoup import BeautifulSoup
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import SafeConfigParser
HATEBU_URL = 'http://b.hatena.ne.jp/%(username)s/atomfeed'
READABILITY_PARSER_API = (
'https://readability.com/api/content/v1/parser?url=%(url)s&token=%(token)s'
)
ENML_ENABLED_TAGS = (
'a', 'abbr', 'acronym', 'address', 'area', 'b', 'bdo', 'big', 'blockquote',
'br', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del',
'dfn', 'div', 'dl', 'dt', 'em', 'font', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'ins', 'kbd', 'li', 'map', 'ol', 'p', 'pre', 'q', 's',
'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'tfoot', 'th', 'thead', 'title', 'tr', 'tt', 'u', 'ul',
'var', 'xmp'
)
ENML_DISABLED_TAGS_REGEX = re.compile(
r'<(/?)(?!(%s)(\s.*?)?>)[\w_-]+(\s.*?)?>' % '|'.join(ENML_ENABLED_TAGS))
# http://dev.evernote.com/intl/jp/doc/articles/enml.php
# name属性は使用できないとは明記していないが怒られた...
ENML_DISABLED_ATTRIBUTES = (
'rel', 'class', 'id', 'on\w*?', 'frame', 'rules', 'alt', 'datetime',
'accesskey', 'data', 'dynsrc', 'tabindex', 'name',
)
# 主に連携サービスのToken情報などを格納しているグローバル変数
global_config = {}
def fetch_entries(username, date):
""" 指定日付のはてブフィードを取得
"""
def fetch_feed(url):
print 'Fetch: ', url
res = requests.get(url)
return BeautifulSoup(res.text)
def get_date_entries(url, target_date, entries):
""" 対象日のエントリのみを取得する
フィードが対象日以前になるまでページネーションして収集を続ける
"""
soup = fetch_feed(url)
for entry in soup.findAll('entry'):
entry = get_entry(entry)
entry_d = datetime.datetime.fromtimestamp(entry['created']).date()
if target_date < entry_d:
continue
elif target_date > entry_d:
return entries
entries.append(entry)
next_link = soup.find('link', rel='next')
if next_link is not None:
get_date_entries(next_link.get('href'), target_date, entries)
def get_entry(soup_entry):
""" entry要素(BeautifulSoupオブジェクト)から必要な項目をまとめて返す
"""
created = datetime.datetime.strptime(
soup_entry.find('issued').text[:-6], '%Y-%m-%dT%H:%M:%S')
return {
'title': soup_entry.find('title').text,
'summary': soup_entry.find('summary').text or u'',
'url': soup_entry.find('link', rel='related').get('href'),
'tags': [t.text for t in soup_entry.findAll('dc:subject')],
'created': int(time.mktime(created.timetuple())),
}
hb_entries = []
feed_url = HATEBU_URL % {'username': username}
soup = fetch_feed('%s?date=%s' % (feed_url, date))
# タイトルに件数表記があって対象日のエントリ数が20件以内ならそのまま日付フィードを取得
# (日付が変わってしばらくは日付指定フィードのタイトルに件数表記がない)
# 20件より多い場合は全体フィードからひたすら対象日のエントリを収集する
title = soup.find('title').text
match = re.search(r'\((\d+)\)$', title)
if match and int(match.group(1)) <= 20:
for entry in soup.findAll('entry'):
hb_entries.append(get_entry(entry))
else:
get_date_entries(
feed_url,
datetime.datetime.strptime(date, '%Y%m%d').date(),
hb_entries)
return hb_entries
def to_enml(content, url=''):
""" HTMLをENML形式に変換
"""
enml = re.sub(r'<img(.*?)>', r'<img\1 />', content)
# 許容されていない属性を削除する
for attr in ENML_DISABLED_ATTRIBUTES:
enml = re.sub(
r'(<\w+.*?)( %s=".*?")(.*?>)' % attr,
r'\1\3', enml, flags=re.DOTALL)
# width属性も不自然な要素だと怒られるようなので変換
for attr in ('width', 'height'):
enml = re.sub(
r'<(?!(img)\s.*?>)(\w+\s.*?)(%s=(\'.*?\'|".*?"))(.*?)>' % attr,
r'<\2\5>', enml, flags=re.DOTALL)
# href の中身が空や相対パスだと怒られるので変換
enml = re.sub(
r'(<a.*?)(href="")(.*?>)', r'\1href="#"\3', enml, flags=re.DOTALL)
if url:
pattrn = (
r'\1href="%s\3"\4'
% re.search(r'https?://.*?(/|$)', url).group()
)
else:
pattrn = r'\1href="./"\4'
enml = re.sub(
r'(<a.*?)(href="(/.*?)")(.*?>)', pattrn, enml, flags=re.DOTALL)
# preにstyleを追加
enml = re.sub(
r'(<pre.*?>)',
r'<pre style="background-color:#EEE;padding:10px;">',
enml)
# 許容されていない要素をdivに変換
return re.sub(ENML_DISABLED_TAGS_REGEX, r'<\1div>', enml)
def img_to_resource(note):
""" 記事中の画像をResourceに変換してNoteに埋め込む
"""
images = {}
for img in re.finditer(r'<img.*?src="(.+?)".*?/>', note.content):
src = img.group(1)
try:
res = urllib.urlopen(src)
binary = res.read()
except Exception:
# なんらかの取得エラーが発生したら普通のimgタグのまま残しておく
continue
content_type = res.headers.get('content-type', '').split(';')[0]
if content_type.find('image/') != 0:
continue
# IEからアップロードされた画像はContent-Typeがimage/pjpegになっていることがある
# この状態のままだとEvernote上でうまく表示されない
# see: http://blog.netandfield.com/shar/2009/04/imagepjpeg.html
content_type = content_type.replace('pjpeg', 'jpeg')
md5 = hashlib.md5()
md5.update(binary)
binary_hash = md5.digest()
data = Types.Data()
data.size = len(binary)
data.bodyHash = binary_hash
data.body = binary
resource = Types.Resource()
resource.mime = content_type
resource.data = data
# width/height情報を引き継ぐ
match = re.search(r'width="(\d+)"', img.group(0))
if match:
resource.width = int(match.group(1))
match = re.search(r'height="(\d+)"', img.group(0))
if match:
resource.height = int(match.group(1))
images[img.group(0)] = resource
# imgタグをen-mediaタグに変換
for k, v in images.items():
hash_hex = binascii.hexlify(v.data.bodyHash)
note.content = note.content.replace(
k,
'<en-media type="%s" hash="%s" width="%s" height="%s"></en-media>'
% (v.mime, hash_hex, v.width or '', v.height or ''))
note.resources = images.values()
return note
def create_note(entry):
""" ブックマーク情報からEvernoteのNoteを作成
"""
client = EvernoteClient(
token=global_config['evernote']['token'], sandbox=False)
note_store = client.get_note_store()
note = Types.Note()
note.title = entry['title']
note.title = note.title.replace(unichr(int('2028', 16)), ' ')
note.title = note.title.replace(unichr(int('2029', 16)), ' ')
note.title = note.title.encode('utf-8')
content = (
u'<?xml version="1.0" encoding="UTF-8"?>'
u'<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">'
)
content += u'<en-note>'
if entry['summary']:
content += u'%s<hr />' % entry['summary']
content += to_enml(entry['content'], url=entry['url'])
content += u'</en-note>'
soup = BeautifulSoup(content)
note.content = str(soup)
attrs = Types.NoteAttributes(sourceURL=entry['url'])
note.attributes = attrs
note.tagNames = [e.encode('utf-8') for e in entry['tags']]
# 時間がミリ秒単位になるので1000を乗算する
note.created = entry['created'] * 1000
note = img_to_resource(note)
note_store.createNote(note)
return note
def fetch_readability(url):
""" Readability Parser API から整形したHTMLを取得
"""
res = requests.get(
READABILITY_PARSER_API % {
'url': url,
'token': global_config['readability']['token']
})
res_json = res.json()
if res_json.get('content'):
body = to_unicode(res_json.get('content'))
return body
# Readabilityでparseできない場合はその旨を本文に表記する
return u'<b>記事をパースできませんでした</b>'
def to_unicode(content):
""" JSONのマルチバイト文字列をunicodeに変換
"""
num = len(content)
words = ''
i = 0
while i < num:
if content[i] == '&':
if content[i:i + 3] == '&#x':
s_hex = ''
for j, c in enumerate(content[i + 3:], 4):
if c == ';':
break
s_hex += c
words += unichr(int(s_hex, 16))
i += j
continue
words += content[i]
i += 1
return words
def parse_config(filename):
""" 設定ファイル読み込み
"""
fp = os.path.expanduser('~/.h2e')
parser = SafeConfigParser()
parser.read(fp)
global_config.update({
'evernote': {'token': parser.get('evernote', 'token')},
'readability': {'token': parser.get('readability', 'token')},
})
def command():
""" コマンド実行
"""
yesterday = datetime.date.today() - datetime.timedelta(days=1)
parser = argparse.ArgumentParser(
description=u'はてブエントリの記事本文をEvernoteに保存します')
parser.add_argument('hatenaid', help=u'対象はてブのはてなユーザ名')
parser.add_argument(
'--date', default=yesterday.strftime('%Y%m%d'),
help=(
u'はてブの収集対象日、YYYYMMDD形式、デフォルト: 前日(%s)'
% yesterday.strftime('%Y%m%d')
))
parser.add_argument(
'--config', default='~/.h2e',
help=u'設定ファイルのパス、デフォルト: ~/.h2e'
)
ns = parser.parse_args()
parse_config(ns.config)
# 収集処理実行
entries = fetch_entries(ns.hatenaid, ns.date)
print u'Got %s entries' % len(entries)
for entry in entries:
entry['content'] = fetch_readability(entry['url'])
print u'Fetch:', entry['title'], entry['url']
create_note(entry)
if __name__ == '__main__':
command()
|
mit
| -3,104,654,835,447,992,000 | -6,324,491,993,035,161,000 | 32.121622 | 79 | 0.555998 | false |
zooniverse/aggregation
|
docs/source/conf.py
|
1
|
9778
|
# -*- coding: utf-8 -*-
#
# Zooniverse Aggregation Engine documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 14 11:15:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock as MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zooniverse Aggregation Engine'
copyright = u'2016, Zooniverse'
author = u'Greg Hines'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.9'
# The full version, including alpha/beta/rc tags.
release = u'0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZooniverseAggregationEnginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ZooniverseAggregationEngine.tex', u'Zooniverse Aggregation Engine Documentation',
u'Greg Hines', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zooniverseaggregationengine', u'Zooniverse Aggregation Engine Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ZooniverseAggregationEngine', u'Zooniverse Aggregation Engine Documentation',
author, 'ZooniverseAggregationEngine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['shapely','pandas','numpy','scipy','cassandra-driver',"sklearn"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
|
apache-2.0
| 2,155,205,762,797,739,500 | -6,468,737,946,701,100,000 | 31.596667 | 99 | 0.709143 | false |
bbqsrc/kbdgen
|
pysrc/kbdgen/gen/osxutil.py
|
2
|
30141
|
import copy
import json
import uuid
import pathlib
import itertools
import subprocess
import re
from collections import OrderedDict
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import Element, SubElement
from ..bundle import parse_desktop_layout
from ..base import get_logger
from ..cldr import CP_REGEX
logger = get_logger(__name__)
OSX_KEYMAP = OrderedDict(
(
("C01", "0"),
("C02", "1"),
("C03", "2"),
("C04", "3"),
("C06", "4"),
("C05", "5"),
("B01", "6"),
("B02", "7"),
("B03", "8"),
("B04", "9"),
("B00", "50"), # E00 flipped!
("B05", "11"),
("D01", "12"),
("D02", "13"),
("D03", "14"),
("D04", "15"),
("D06", "16"),
("D05", "17"),
("E01", "18"),
("E02", "19"),
("E03", "20"),
("E04", "21"),
("E06", "22"),
("E05", "23"),
("E12", "24"),
("E09", "25"),
("E07", "26"),
("E11", "27"),
("E08", "28"),
("E10", "29"),
("D12", "30"),
("D09", "31"),
("D07", "32"),
("D11", "33"),
("D08", "34"),
("D10", "35"),
# U WOT 36 - space yeah yeah
("C09", "37"),
("C07", "38"),
("C11", "39"),
("C08", "40"),
("C10", "41"),
("D13", "42"),
("B08", "43"),
("B10", "44"),
("B06", "45"),
("B07", "46"),
("B09", "47"),
# U WOT 48 - backspace yeah yeah
("A03", "49"),
("E00", "10"), # B00 flipped!
("E13", "93"),
("B11", "94"),
)
)
OSX_HARDCODED = OrderedDict(
(
("36", r"\u{D}"),
("48", r"\u{9}"),
("51", r"\u{8}"),
("53", r"\u{1B}"),
("64", r"\u{10}"),
("66", r"\u{1D}"),
("70", r"\u{1C}"),
("71", r"\u{1B}"),
("72", r"\u{1F}"),
("76", r"\u{3}"),
("77", r"\u{1E}"),
("79", r"\u{10}"),
("80", r"\u{10}"),
("96", r"\u{10}"),
("97", r"\u{10}"),
("98", r"\u{10}"),
("99", r"\u{10}"),
("100", r"\u{10}"),
("101", r"\u{10}"),
("103", r"\u{10}"),
("105", r"\u{10}"),
("106", r"\u{10}"),
("107", r"\u{10}"),
("109", r"\u{10}"),
("111", r"\u{10}"),
("113", r"\u{10}"),
("114", r"\u{5}"),
("115", r"\u{1}"),
("116", r"\u{B}"),
("117", r"\u{7F}"),
("118", r"\u{10}"),
("119", r"\u{4}"),
("120", r"\u{10}"),
("121", r"\u{C}"),
("122", r"\u{10}"),
("123", r"\u{1C}"),
("124", r"\u{1D}"),
("125", r"\u{1F}"),
("126", r"\u{1E}"),
)
)
def plutil_get_json(path):
cmd = "plutil -convert json -o -".split(" ")
cmd.append(path)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
json_str = process.communicate()[0].decode()
return json.loads(json_str, object_pairs_hook=OrderedDict)
def plutil_to_xml_str(json_obj):
cmd = "plutil -convert xml1 -o - -".split(" ")
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return process.communicate(json.dumps(json_obj).encode())[0].decode()
class Pbxproj:
@staticmethod
def gen_key():
return uuid.uuid4().hex[8:].upper()
def __init__(self, path):
self._proj = plutil_get_json(path)
def __str__(self):
return plutil_to_xml_str(self._proj)
@property
def objects(self):
return self._proj["objects"]
@property
def root(self):
return self.objects[self._proj["rootObject"]]
@property
def main_group(self):
return self.objects[self.root["mainGroup"]]
def find_ref_for_name(self, name, isa=None):
logger.trace("find_ref_for_name: %s %r" % (name, isa))
for ref, o in self.objects.items():
if o.get("name", None) == name and (
isa is None or o.get("isa", None) == isa
):
return ref
return None
def find_resource_build_phase(self, target_name):
logger.trace("find_resource_build_phase: %s" % target_name)
targets = [self.objects[t] for t in self.root["targets"]]
target = None
for t in targets:
if t["name"] == target_name:
target = t
break
if target is None:
return None
for build_phase in target["buildPhases"]:
phase = self.objects[build_phase]
if phase["isa"] == "PBXResourcesBuildPhase":
return phase
return None
def create_plist_string_variant(self, variants):
o = {
"isa": "PBXVariantGroup",
"children": variants,
"name": "InfoPlist.strings",
"sourceTree": "<group>",
}
return o
def add_plist_strings(self, locales):
plist_strs = [self.create_plist_string_file(l) for l in locales]
variant = self.create_plist_string_variant(plist_strs)
var_key = Pbxproj.gen_key()
self.objects[var_key] = variant
key = Pbxproj.gen_key()
self.objects[key] = {"isa": "PBXBuildFile", "fileRef": var_key}
return (var_key, key)
def add_plist_strings_to_build_phase(self, locales, target_name):
phase = self.find_resource_build_phase(target_name)
(var_ref, ref) = self.add_plist_strings(locales)
phase["files"].append(ref)
return var_ref
def find_variant_group(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXVariantGroup"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
return o
def set_target_build_setting(self, target, key, value):
logger.trace("set_target_build_setting: %r %r %r" % (target, key, value))
o = self.find_target(target)
build_cfg_list = self.objects[o["buildConfigurationList"]]
build_cfgs = [self.objects[x] for x in build_cfg_list["buildConfigurations"]]
for cfg in build_cfgs:
cfg["buildSettings"][key] = value
def set_target_package_id(self, target, new_id):
logger.trace("set_target_package_id: %r %r" % (target, new_id))
o = self.find_target(target)
build_cfg_list = self.objects[o["buildConfigurationList"]]
build_cfgs = [self.objects[x] for x in build_cfg_list["buildConfigurations"]]
for cfg in build_cfgs:
cfg["buildSettings"]["PRODUCT_BUNDLE_IDENTIFIER"] = new_id
def add_file_ref_to_variant_group(self, file_ref, variant_name):
variant = self.find_variant_group(variant_name)
variant["children"].append(file_ref)
return variant
def add_plist_strings_to_variant_group(self, locales, variant_name, target_name):
variant = self.find_variant_group(variant_name)
o = []
for locale in locales:
ref = self.create_plist_string_file(locale, target_name)
variant["children"].append(ref)
o.append(ref)
return o
def add_ref_to_group(self, ref, group_list):
logger.trace("add_ref_to_group: %r %r" % (ref, group_list))
o = self.main_group
n = False
for g in group_list:
for c in o["children"]:
co = self.objects[c]
if n:
break
if co.get("path", co.get("name", None)) == g:
o = co
n = True
if n:
n = False
continue
else:
# Create new group
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXGroup",
"children": [],
"path": g,
"sourceTree": "<group>",
}
o["children"].append(ref)
n = False
o = self.objects[ref]
continue
o["children"].append(ref)
return True
def create_file_reference(self, file_type, locale, name, **kwargs):
logger.trace(
"create_file_reference: %r %r %r %r" % (file_type, locale, name, kwargs)
)
o = {
"isa": "PBXFileReference",
"lastKnownFileType": file_type,
"name": locale,
"path": "%s.lproj/%s" % (locale, name),
"sourceTree": "<group>",
}
o.update(kwargs)
k = Pbxproj.gen_key()
self.objects[k] = o
return k
def create_plist_file(self, plist_path):
logger.trace("create_plist_file: %r" % plist_path)
o = {
"isa": "PBXFileReference",
"lastKnownFileType": "text.plist.xml",
"name": pathlib.Path(plist_path).name,
"path": plist_path,
"sourceTree": "<group>",
}
k = Pbxproj.gen_key()
self.objects[k] = o
return k
def create_plist_string_file(self, locale, name="InfoPlist.strings"):
return self.create_file_reference("text.plist.strings", locale, name)
def create_text_file(self, locale, name):
return self.create_file_reference("text", locale, name)
def add_path(self, path_list, target=None):
if target is None:
target = self.main_group
for name in path_list:
children = [self.objects[r] for r in target["children"]]
for c in children:
if c.get("path", None) == name:
target = c
break
else:
ref = Pbxproj.gen_key()
o = {
"children": [],
"isa": "PBXGroup",
"path": name,
"sourceTree": "<group>",
}
self.objects[ref] = o
target["children"].append(ref)
target = self.objects[ref]
def clear_target_dependencies(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
# HACK: unclear; leaves dangling nodes
o["dependencies"] = []
def clear_target_embedded_binaries(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
o["files"] = []
def create_container_item_proxy(self, container_portal, remote_ref, info):
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXContainerItemProxy",
"containerPortal": container_portal,
"proxyType": "1",
"remoteGlobalIDString": remote_ref,
"remoteInfo": info,
}
logger.debug(self.objects[ref])
return ref
def create_target_dependency(self, proxy_ref, dep_ref):
ref = Pbxproj.gen_key()
self.objects[ref] = {
"isa": "PBXTargetDependency",
"targetProxy": proxy_ref,
"target": dep_ref,
}
logger.debug(self.objects[ref])
return ref
def add_dependency_to_target(self, target_ref, dep_ref):
target = self.objects[target_ref]
if target.get("dependencies", None) is None:
target["dependencies"] = []
target["dependencies"].append(dep_ref)
logger.debug(target)
def add_appex_to_target_dependencies(self, appex, target):
logger.debug("add_appex_to_target_dependencies: %s %s" % (appex, target))
# Find target
appex_ref = self.find_ref_for_name(appex, isa="PBXNativeTarget")
logger.debug("Appex ref: " + appex_ref)
# Create container proxy
proxy_ref = self.create_container_item_proxy(
self._proj["rootObject"], appex_ref, appex
)
logger.debug("Proxy ref: " + proxy_ref)
# Create target dependency
dep_ref = self.create_target_dependency(proxy_ref, appex_ref)
logger.debug("Target dep ref: " + dep_ref)
# Add to deps
target_ref = self.find_ref_for_name(target, isa="PBXNativeTarget")
logger.debug(target_ref)
self.add_dependency_to_target(target_ref, dep_ref)
def remove_appex_from_target_embedded_binaries(self, appex, target):
logger.trace(
"remove_appex_from_target_embedded_binaries: %s %s" % (appex, target)
)
for appex_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXFileReference"
and o.get("path", None) == appex
):
break
else:
raise Exception("No appex src found.")
logger.trace("appex_ref: %r" % appex_ref)
for appex_file_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXBuildFile"
and o.get("fileRef", None) == appex_ref
):
break
else:
raise Exception("No appex src found.")
for appex_native_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("productReference", None) == appex_ref
):
break
else:
raise Exception("No target src found.")
for native_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No target src found.")
logger.trace("native_ref: %r" % native_ref)
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
# native_target = o
for target_dep_ref, o in self.objects.items():
if o.get("isa", None) == "PBXTargetDependency":
logger.trace(o)
if (
o.get("isa", None) == "PBXTargetDependency"
and o.get("target", None) == appex_native_ref
):
break
else:
raise Exception("No dependency target src found.")
# target_dep = o
target_o["dependencies"].remove(target_dep_ref)
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
o["files"].remove(appex_file_ref)
break
else:
raise Exception("No src found.")
# del self.objects[appex_ref]
def add_appex_to_target_embedded_binaries(self, appex, target):
logger.trace("add_appex_to_target_embedded_binaries: %s %s" % (appex, target))
for appex_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXFileReference"
and o.get("path", None) == appex
):
break
else:
raise Exception("No appex src found.")
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No target src found.")
target_o = o
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if (
o.get("isa", None) == "PBXCopyFilesBuildPhase"
and o.get("name", None) == "Embed App Extensions"
):
break
else:
raise Exception("No src found.")
ref = Pbxproj.gen_key()
appex_o = {
"isa": "PBXBuildFile",
"fileRef": appex_ref,
"settings": {"ATTRIBUTES": ["RemoveHeadersOnCopy"]},
}
self.objects[ref] = appex_o
o["files"].append(ref)
def find_target(self, target):
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
return o
else:
raise Exception("No src found.")
def add_source_ref_to_build_phase(self, ref, target):
logger.trace("add_source_ref_to_build_phase: %r %r" % (ref, target))
target_o = self.find_target(target)
for o in [self.objects[x] for x in target_o["buildPhases"]]:
if o.get("isa", None) == "PBXSourcesBuildPhase":
break
else:
raise Exception("No src found.")
nref = Pbxproj.gen_key()
self.objects[nref] = {"isa": "PBXBuildFile", "fileRef": ref}
o["files"].append(nref)
def remove_target(self, target):
logger.trace("remove_target: %r" % target)
for ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == target
):
break
else:
raise Exception("No src found.")
prod_ref = o["productReference"]
logger.trace("remove_target productReference: %r" % prod_ref)
del self.objects[o["productReference"]]
delete_refs = []
for target_ref, o in self.objects.items():
if (
o.get("isa", None) == "PBXTargetDependency"
and o.get("target", None) == ref
):
delete_refs.append(target_ref)
for dref in delete_refs:
del self.objects[dref]
for nref, o in self.objects.items():
if (
o.get("isa", None) == "PBXBuildFile"
and o.get("fileRef", None) == prod_ref
):
break
else:
raise Exception("No src found.")
for o in self.objects.values():
if o.get("isa", None) == "PBXGroup" and o.get("name", None) == "Products":
break
else:
raise Exception("No src found.")
o["children"].remove(prod_ref)
self.root["targets"].remove(ref)
del self.objects[ref]
def duplicate_target(self, src_name, dst_name, plist_path):
logger.trace("duplicate_target: %r %r %r" % (src_name, dst_name, plist_path))
for o in self.objects.values():
if (
o.get("isa", None) == "PBXNativeTarget"
and o.get("name", None) == src_name
):
break
else:
raise Exception("No src found.")
base_clone = copy.deepcopy(o)
base_ref = Pbxproj.gen_key()
self.objects[base_ref] = base_clone
base_clone["name"] = dst_name
conf_ref = Pbxproj.gen_key()
conf_clone = copy.deepcopy(self.objects[base_clone["buildConfigurationList"]])
self.objects[conf_ref] = conf_clone
base_clone["buildConfigurationList"] = conf_ref
new_confs = []
for conf in conf_clone["buildConfigurations"]:
ref = Pbxproj.gen_key()
new_confs.append(ref)
self.objects[ref] = copy.deepcopy(self.objects[conf])
self.objects[ref]["buildSettings"]["INFOPLIST_FILE"] = plist_path
self.objects[ref]["buildSettings"]["PRODUCT_NAME"] = dst_name
self.objects[ref]["buildSettings"]["CODE_SIGN_STYLE"] = "Manual"
self.objects[ref]["buildSettings"]["ENABLE_BITCODE"] = "NO"
conf_clone["buildConfigurations"] = new_confs
appex_ref = Pbxproj.gen_key()
appex_clone = copy.deepcopy(self.objects[base_clone["productReference"]])
self.objects[appex_ref] = appex_clone
appex_clone["path"] = "%s.appex" % dst_name
base_clone["productReference"] = appex_ref
# PBXContainerItemProxy etc seem unaffected by leaving dependencies in
# base_clone['dependencies'] = []
self.add_ref_to_group(appex_ref, ["Products"])
self.root["targets"].append(base_ref)
return base_clone, appex_ref
def generate_osx_mods():
conv = OrderedDict(
(
("cmd", "command"),
("caps", "caps"),
("alt", "anyOption"),
("shift", "anyShift"),
)
)
def gen_conv(tpl):
tplo = []
for t, v in conv.items():
if t not in tpl:
v += "?"
tplo.append(v)
return tuple(tplo)
m = ("caps", "alt", "shift")
mods = (x for i in range(len(m)) for x in itertools.combinations(m, i))
o = OrderedDict()
for mod in mods:
mod = ("cmd",) + mod
o["+".join(mod)] = (" ".join(gen_conv(mod)),)
return o
class OSXKeyLayout:
doctype = (
'<!DOCTYPE keyboard PUBLIC "" '
+ '"file://localhost/System/Library/DTDs/KeyboardLayout.dtd">'
)
modes = OrderedDict(
(
("default", ("command?",)),
("shift", ("anyShift caps? command?",)),
("caps", ("caps",)),
("caps+shift", ("caps anyShift",)),
("alt", ("anyOption command?",)),
("alt+shift", ("anyOption anyShift caps? command?",)),
("caps+alt", ("caps anyOption command?",)),
("caps+alt+shift", ("caps anyOption anyShift command?",)),
("ctrl", ("anyShift? caps? anyOption? anyControl",)),
("cmd", ("command",)),
("cmd+shift", ("command anyShift",)),
)
)
modes.update(generate_osx_mods())
# TODO unused
required = ("default", "shift", "caps")
DEFAULT_CMD = parse_desktop_layout(
r"""
§ 1 2 3 4 5 6 7 8 9 0 - =
q w e r t y u i o p [ ]
a s d f g h j k l ; ' \
` z x c v b n m , . /
"""
)
DEFAULT_CMD_SHIFT = parse_desktop_layout(
r"""
± ! @ # $ % ^ & * ( ) _ +
Q W E R T Y U I O P { }
A S D F G H J K L : " |
~ Z X C V B N M < > ?
"""
)
def __bytes__(self):
"""XML almost; still encode the control chars. Death to standards!"""
# Convert
v = CP_REGEX.sub(lambda x: "&#x%04X;" % int(x.group(1), 16), str(self))
v = re.sub(
r"&(quot|amp|apos|lt|gt);",
lambda x: {
""": """,
"&": "&",
"'": "'",
"<": "<",
">": ">",
}[x.group(0)],
v,
)
return ('<?xml version="1.1" encoding="UTF-8"?>\n%s' % v).encode("utf-8")
def __str__(self):
root = copy.deepcopy(self.elements["root"])
actions = root.findall("actions")[0]
terminators = root.findall("terminators")[0]
if len(actions) == 0:
root.remove(actions)
if len(terminators) == 0:
root.remove(terminators)
return self.doctype + etree.tostring(
root, encoding="unicode"
)
def __init__(self, name, id_):
modifiers_ref = "modifiers"
mapset_ref = "default"
self.elements = {}
root = Element("keyboard", group="126", id=id_, name=name)
self.elements["root"] = root
self.elements["layouts"] = SubElement(root, "layouts")
SubElement(
self.elements["layouts"],
"layout",
first="0",
last="17",
mapSet=mapset_ref,
modifiers=modifiers_ref,
)
self.elements["modifierMap"] = SubElement(
root, "modifierMap", id=modifiers_ref, defaultIndex="0"
)
self.elements["keyMapSet"] = SubElement(root, "keyMapSet", id=mapset_ref)
self.elements["actions"] = SubElement(root, "actions")
self.elements["terminators"] = SubElement(root, "terminators")
self.key_cache = {}
self.kmap_cache = {}
self.action_cache = {}
class KeyIncrementer:
def __init__(self, prefix):
self.prefix = prefix
self.data = {}
self.c = 0
def has(self, key):
return key in self.data
def get(self, key):
if self.data.get(key, None) is None:
self.data[key] = self.c
self.c += 1
return "%s%03d" % (self.prefix, self.data[key])
self.states = KeyIncrementer("s")
self.actions = KeyIncrementer("a")
self._n = 0
def _add_modifier_map(self, mode):
mm = self.elements["modifierMap"]
kms = self.elements["keyMapSet"]
node = SubElement(mm, "keyMapSelect", mapIndex=str(self._n))
mods = self.modes.get(mode, None)
for mod in mods:
SubElement(node, "modifier", keys=mod)
self.kmap_cache[mode] = SubElement(kms, "keyMap", index=str(self._n))
self._n += 1
return self.kmap_cache[mode]
def _get_kmap(self, mode):
kmap = self.kmap_cache.get(mode, None)
if kmap is not None:
return kmap
return self._add_modifier_map(mode)
def _set_key(self, mode, key, key_id, action=None, output=None):
if action is not None and output is not None:
raise Exception("Cannot specify contradictory action and output.")
key_key = "%s %s" % (mode, key_id)
node = self.key_cache.get(key_key, None)
if node is None:
kmap_node = self._get_kmap(mode)
node = SubElement(kmap_node, "key", code=key_id)
self.key_cache[key_key] = node
if action is not None:
node.attrib["action"] = str(action)
if node.attrib.get("output", None) is not None:
del node.attrib["output"]
elif output is not None:
node.attrib["output"] = str(output)
if node.attrib.get("action", None) is not None:
del node.attrib["action"]
def _set_default_action(self, key):
action_id = self.actions.get(key) # "Key %s" % key
action = self.action_cache.get(action_id, None)
if action is None:
action = SubElement(self.elements["actions"], "action", id=action_id)
self.action_cache[action_id] = action
def _set_terminator(self, action_id, output):
termin = self.elements["terminators"].findall(
'when[@state="%s"]' % action_id.replace('"', r""")
)
if len(termin) == 0:
el = SubElement(self.elements["terminators"], "when")
el.set("state", action_id)
el.set("output", output)
def _set_default_transform(self, action_id, output):
action = self.action_cache.get(action_id, None)
# TODO create a generic create or get method for actions
if action is None:
logger.trace(
"Create default action - action:%r output:%r" % (action_id, output)
)
action = SubElement(self.elements["actions"], "action", id=action_id)
self.action_cache[action_id] = action
if len(action.findall('when[@state="none"]')) == 0:
logger.trace(
"Create 'none' when - action:%r output:%r" % (action_id, output)
)
el = SubElement(action, "when")
el.set("state", "none")
el.set("output", output)
def set_key(self, mode, key, key_id):
self._set_key(mode, key, key_id, output=key)
def set_deadkey(self, mode, key, key_id, output):
"""output is the output when the deadkey is followed by an invalid"""
logger.trace("%r %r %r %r" % (mode, key, key_id, output))
action_id = self.actions.get(key) # "Key %s" % key
pressed_id = self.states.get(key) # "State %s" % key
self._set_key(mode, key, key_id, action=action_id)
# Create default action (set to pressed state)
self._set_default_action(key)
self._set_terminator(pressed_id, output)
def set_transform_key(self, mode, key, key_id):
action_id = self.actions.get(key) # "Key %s" % key
self._set_key(mode, key, key_id, action=action_id)
# Find action, add none state (move the output)
self._set_default_transform(action_id, key)
def add_transform(self, action_id, state, output=None, next=None):
action = self.action_cache.get(action_id, None)
if action is None:
raise Exception("'%s' was not a found action_id." % action_id)
if output is not None and next is not None:
raise Exception("Output and next cannot be simultaneously defined.")
if output is not None:
el = SubElement(action, "when")
el.set("state", state)
el.set("output", output)
elif next is not None:
el = SubElement(action, "when")
el.set("state", state)
el.set("next", next)
# logger.trace("%r" % el)
|
apache-2.0
| 2,331,730,478,783,618,600 | 6,612,165,828,562,578,000 | 30.199793 | 86 | 0.497329 | false |
kavardak/suds
|
suds/mx/encoded.py
|
211
|
4651
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides encoded I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.literal import Literal
from suds.mx.typer import Typer
from suds.sudsobject import Factory, Object
from suds.xsd.query import TypeQuery
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Literal):
"""
A SOAP section (5) encoding marshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# For soap encoded arrays, the 'aty' (array type) information
# is extracted and added to the 'content'. Then, the content.value
# is replaced with an object containing an 'item=[]' attribute
# containing values that are 'typed' suds objects.
#
start = Literal.start(self, content)
if start and isinstance(content.value, (list,tuple)):
resolved = content.type.resolve()
for c in resolved:
if hasattr(c[0], 'aty'):
content.aty = (content.tag, c[0].aty)
self.cast(content)
break
return start
def end(self, parent, content):
#
# For soap encoded arrays, the soapenc:arrayType attribute is
# added with proper type and size information.
# Eg: soapenc:arrayType="xs:int[3]"
#
Literal.end(self, parent, content)
if content.aty is None:
return
tag, aty = content.aty
ns0 = ('at0', aty[1])
ns1 = ('at1', 'http://schemas.xmlsoap.org/soap/encoding/')
array = content.value.item
child = parent.getChild(tag)
child.addPrefix(ns0[0], ns0[1])
child.addPrefix(ns1[0], ns1[1])
name = '%s:arrayType' % ns1[0]
value = '%s:%s[%d]' % (ns0[0], aty[0], len(array))
child.set(name, value)
def encode(self, node, content):
if content.type.any():
Typer.auto(node, content.value)
return
if content.real.any():
Typer.auto(node, content.value)
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace()
Typer.manual(node, name, ns)
def cast(self, content):
"""
Cast the I{untyped} list items found in content I{value}.
Each items contained in the list is checked for XSD type information.
Items (values) that are I{untyped}, are replaced with suds objects and
type I{metadata} is added.
@param content: The content holding the collection.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
aty = content.aty[1]
resolved = content.type.resolve()
array = Factory.object(resolved.name)
array.item = []
query = TypeQuery(aty)
ref = query.execute(self.schema)
if ref is None:
raise TypeNotFound(qref)
for x in content.value:
if isinstance(x, (list, tuple)):
array.item.append(x)
continue
if isinstance(x, Object):
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
if isinstance(x, dict):
x = Factory.object(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
x = Factory.property(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
content.value = array
return self
|
lgpl-3.0
| -1,432,720,540,491,213,600 | 5,767,216,147,707,069,000 | 33.969925 | 78 | 0.588691 | false |
JuanMatSa/PyFME
|
src/pyfme/models/tests/test_euler_flat_earth.py
|
5
|
5115
|
# -*- coding: utf-8 -*-
"""
Tests of equations of euler flat earth model.
"""
import numpy as np
from pyfme.models.euler_flat_earth import (lamceq,
lamceq_jac,
kaeq,
kaeq_jac,
kleq)
def test1_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 1, 1, 1, 1, 1], dtype=float)
mass = 10
inertia = np.array([[1000, 0, -100],
[ 0, 100, 0],
[-100, 0, 100]], dtype=float)
forces = np.array([100., 100., 100.], dtype=float)
moments = np.array([100., 1000., 100], dtype=float)
expected_sol = np.array([10, 10, 10, 11./9, 1, 92./9], dtype=float)
sol = lamceq(time, vel, mass, inertia, forces,
moments)
assert(np.allclose(expected_sol, sol))
def test2_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 0, 1, 0, 1, 0], dtype=float)
mass = 10
inertia = np.array([[100, 0, -10],
[ 0, 100, 0],
[-10, 0, 100]], dtype=float)
forces = np.array([1000, 10, 10], dtype=float)
moments = np.array([100, 100, 100], dtype=float)
expected_sol = np.array([99, 1, 2, 10./9, 1, 10./9], dtype=float)
sol = lamceq(time, vel, mass, inertia, forces,
moments)
assert(np.allclose(expected_sol, sol))
def test1_jac_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 1, 1, 1, 1, 1], dtype=float)
mass = 10
inertia = np.array([[1000, 0, -100],
[ 0, 100, 0],
[-100, 0, 100]], dtype=float)
expected_sol = np.zeros([6, 6], dtype=float)
expected_sol[0, 1] = 1
expected_sol[0, 2] = - 1
expected_sol[0, 4] = - 1
expected_sol[0, 5] = 1
expected_sol[1, 0] = - 1
expected_sol[1, 2] = 1
expected_sol[1, 3] = 1
expected_sol[1, 5] = - 1
expected_sol[2, 0] = 1
expected_sol[2, 1] = - 1
expected_sol[2, 3] = - 1
expected_sol[2, 4] = 1
expected_sol[3, 3] = 10./9
expected_sol[3, 4] = 1
expected_sol[3, 5] = - 1./9
expected_sol[4, 3] = - 11
expected_sol[4, 5] = - 7
expected_sol[5, 3] = 91./9
expected_sol[5, 4] = 9
expected_sol[5, 5] = - 10./9
sol = lamceq_jac(time, vel, mass, inertia)
assert(np.allclose(expected_sol, sol))
def test2_jac_linear_and_angular_momentum_eqs():
time = 0
vel = np.array([1, 0, 1, 0, 1, 0], dtype=float)
mass = 10
inertia = np.array([[100, 0, -10],
[ 0, 100, 0],
[-10, 0, 100]], dtype=float)
expected_sol = np.zeros([6, 6], dtype=float)
expected_sol[0, 2] = - 1
expected_sol[0, 4] = - 1
expected_sol[1, 3] = 1
expected_sol[1, 5] = - 1
expected_sol[2, 0] = 1
expected_sol[2, 4] = 1
expected_sol[3, 3] = 10./99
expected_sol[3, 5] = - 1./99
expected_sol[5, 3] = 1./99
expected_sol[5, 5] = - 10./99
sol = lamceq_jac(time, vel, mass, inertia)
assert(np.allclose(expected_sol, sol))
def test1_kinematic_angular_eqs():
time = 0
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
ang_vel = np.array([1, 1, 1], dtype=float)
expected_sol = np.array([0, 1 + 2 ** 0.5, 2])
sol = kaeq(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test2_kinematic_angular_eqs():
time = 0
euler_angles = np.array([0, np.pi / 2, 0])
ang_vel = np.array([0, 1, 0], dtype=float)
expected_sol = np.array([0, 0, 1], dtype=float)
sol = kaeq(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test1_jac_kinematic_angular_eqs():
time = 0
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
ang_vel = np.array([1, 1, 1], dtype=float)
expected_sol = np.zeros([3, 3])
expected_sol[0, 1] = - 2 ** 0.5
expected_sol[1, 0] = 2 * 2 ** 0.5
expected_sol[2, 0] = 2
sol = kaeq_jac(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test2_jac_kinematic_angular_eqs():
time = 0
euler_angles = np.array([0, np.pi / 2, 0])
ang_vel = np.array([0, 1, 0], dtype=float)
expected_sol = np.zeros([3, 3], dtype=float)
expected_sol[0, 1] = - 1
expected_sol[1, 0] = 1
sol = kaeq_jac(time, euler_angles, ang_vel)
assert(np.allclose(expected_sol, sol))
def test1_navigation_eqs():
time = 0
lin_vel = np.array([1, 1, 1], dtype=float)
euler_angles = np.array([np.pi / 4, np.pi / 4, 0])
expected_sol = np.array([1 + (2 ** 0.5) / 2, 0, 1 - (2 ** 0.5) / 2])
sol = kleq(time, lin_vel, euler_angles)
assert(np.allclose(expected_sol, sol))
def test2_navigation_eqs():
time = 0
lin_vel = np.array([1, 0, 1], dtype=float)
euler_angles = np.array([0, np.pi / 2, 0])
expected_sol = np.array([1, - 1, 0], dtype=float)
sol = kleq(time, lin_vel, euler_angles)
assert(np.allclose(expected_sol, sol))
|
mit
| 6,671,520,329,149,897,000 | -326,688,641,077,385,340 | 24.833333 | 72 | 0.519453 | false |
plaice/Zebrackets
|
src/zebrackets/zebraFont.py
|
1
|
8776
|
#!/usr/bin/python3
# File zebraFont.py
#
# Copyright (c) Blanca Mancilla, John Plaice, 2015, 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''zebraFont.py TYPE STYLE STRIPES SIZE FAMILY MAG
creates a new MetaFont file and then invokes it.
'''
import argparse
import glob
import io
import math
import os
import re
import subprocess
import shutil
import sys
import zebraFontFiles
import zebraHelp
class Parameters:
def __init__(self, kind, style, slots, family,
size, mag, texmfHome, checkArgs):
self.kind = zebraHelp.validate_kind(kind)
self.style = zebraHelp.validate_style(style)
self.slots = zebraHelp.validate_slots(slots)
self.slotsAsLetter = chr(ord('a') + self.slots)
self.family = zebraHelp.validate_family(family)
self.size = zebraHelp.validate_size(size)
zebraHelp.validate_family_size(family, size)
self.mag = zebraHelp.validate_mag(mag)
self.texmfHome = zebraHelp.validate_texmfhome(texmfHome)
self.checkArgs = checkArgs
def callAndLog(args, log):
try:
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, universal_newlines=True)
output = proc.stdout.read()
if output != '':
log.append(output)
except subprocess.CalledProcessError:
raise zebraHelp.CompError('System died when calling {0}'.format(*args))
def createMFcontent(kind, style, slots, sourceFont):
'''This method creates the font file's header, returning it as string.
'''
styledict = { 'b' : '0', 'f' : '1', 'h' : '2' }
textFormat = '''% Copied from rtest on p.311 of the MetaFont book.
if unknown cmbase: input cmbase fi
mode_setup;
def generate suffix t = enddef;
input {0}; font_setup;
let iff = always_iff;
slots:={1};
foreground:={2};
input zeroman{3};'''
text = textFormat.format(
sourceFont, slots,
styledict[style], kind)
return text
def checkAndCreateFont(fileName, destMFdir, fileContent, texmfHome, log):
# Check if the font file exists already, and not create it.
# Write the content in the file.
fileNameMF = '{0}.mf'.format(fileName)
try:
subprocess.check_output(['kpsewhich', fileNameMF])
except subprocess.CalledProcessError:
destMFpath = '{0}/{1}.mf'.format(destMFdir, fileName)
with open(destMFpath, 'w') as fileMF:
fileMF.write(fileContent)
callAndLog(['mktexlsr', texmfHome], log)
def createMFfiles(params):
# Set up of diretories and files names
sourceFont = '{0}{1}'.format(params.family, int(params.size))
destMFdir = '{0}/fonts/source/public/zbtex'.format(params.texmfHome)
destTFMdir = '{0}/fonts/tfm/public/zbtex'.format(params.texmfHome)
destPKdir = '{0}/fonts/pk/ljfour/public/zbtex'.format(params.texmfHome)
destMF = 'z{0}{1}{2}{3}'.format(
params.kind, params.style,
params.slotsAsLetter, sourceFont)
destMFpath = '{0}/{1}.mf'.format(destMFdir, destMF)
textMFfile = createMFcontent(
params.kind, params.style,
params.slots, sourceFont)
# Check that the master font exists in the TeX ecosystem.
try:
subprocess.check_output(['kpsewhich', '{0}.mf'.format(sourceFont)])
except subprocess.CalledProcessError:
raise zebraHelp.CompError('File "{0}.mf" does not exist'.format(destMF))
# Create the directory where font files will be stored for this run.
try:
os.makedirs(destMFdir)
except FileExistsError:
pass
zbtexFontsLog = []
## This is now outside in def method
# Check if the font file exists already, and not create it.
# Write the content in the file.
checkAndCreateFont(
destMF, destMFdir, textMFfile, params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zepunctb', destMFdir, zebraFontFiles.str_zepunctb,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zepunctp', destMFdir, zebraFontFiles.str_zepunctp,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zeromanb', destMFdir, zebraFontFiles.str_zeromanb,
params.texmfHome, zbtexFontsLog)
checkAndCreateFont(
'zeromanp', destMFdir, zebraFontFiles.str_zeromanp,
params.texmfHome, zbtexFontsLog)
# Checking main fonts exists
# generate the TFM font and install the file
# generate the ls-R database used by the kpathsea library
try:
subprocess.check_output(['kpsewhich', '{0}.tfm'.format(destMF)])
except subprocess.CalledProcessError:
callAndLog(['mktextfm', destMF], zbtexFontsLog)
callAndLog(
['mktexlsr', params.texmfHome], zbtexFontsLog)
if int(params.mag) != 1:
dpi = params.mag * 600
try:
subprocess.check_output(
['kpsewhich', '{0}.{1}pk'.format(destMF, dpi)])
except subprocess.CalledProcessError:
try:
proc = subprocess.Popen(
['kpsewhich', '{0}.600pk'.format(destMF)],
stdout=subprocess.PIPE, universal_newlines=True)
except subprocess.CalledProcessError:
raise zebraHelp.CompError('Could not find file {0}.600pk'.
format(destMF))
dpidir = re.sub('/[^/]*$', '', proc.stdout.read())
callAndLog(['mf-nowin',
'-progname=mf',
'\\mode:=ljfour; mag:={0}; nonstopmode; input {1}'.
format(math.sqrt(float(params.mag)), destMF)],
zbtexFontsLog)
callAndLog(['gftopk',
'{0}.{1}gf'.format(destMF, dpi),
'{0}.{1}pk'.format(destMF, dpi)],
zbtexFontsLog)
shutil.move('{0}.{1}pk'.format(destMF, dpi), dpidir)
callAndLog(['mktexlsr', params.texmfHome], zbtexFontsLog)
for file in glob.glob('{0}.*'.format(destMF)):
os.unlink(file)
with open('zbtexfonts.log', 'a') as zbtexLogFile:
for string in zbtexFontsLog:
zbtexLogFile.write(string)
def zebraFont(kind, style, slots, family,
size, mag, texmfHome, checkArgs):
try:
parameters = Parameters(kind, style, slots, family,
size, mag, texmfHome, checkArgs)
if checkArgs is False:
createMFfiles(parameters)
return zebraHelp.Result(True, "")
except zebraHelp.ArgError as e:
return zebraHelp.Result(False, "zebraFont ArgError: " + e)
except zebraHelp.CompError as e:
return zebraHelp.Result(False, "zebraFont CompError: " + e)
def zebraFontParser(inputArguments = sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Build a zebrackets font.',
epilog="This module is part of the zebrackets package.")
parser.add_argument('--kind', type=str, choices=zebraHelp.validKinds,
required=True, help='b = bracket, p = parenthesis')
parser.add_argument('--style', type=str, choices=zebraHelp.validStyles,
required=True, help='b = background, f = foreground, h = hybrid')
parser.add_argument('--slots', type=int,
required=True, choices=zebraHelp.validSlots,
help='number of slots in brackets')
parser.add_argument('--family', type=str,
choices=zebraHelp.validFontFamilies,
required=True, help='font family')
parser.add_argument('--size', type=int,
choices=zebraHelp.validFontSizes,
required=True, help='font size')
parser.add_argument('--mag', type=int,
default=1, help='magnification')
parser.add_argument('--texmfhome', type=str,
help='substitute for variable TEXMFHOME')
parser.add_argument('--checkargs', action='store_true',
help='check validity of input arguments')
args = parser.parse_args(inputArguments)
return zebraFont(args.kind, args.style, args.slots, args.family,
args.size, args.mag, args.texmfhome, args.checkargs)
if __name__ == '__main__':
zebraFontParser()
|
gpl-3.0
| 2,148,408,199,908,428,300 | 5,372,196,503,259,373,000 | 38.35426 | 80 | 0.635939 | false |
Tallefer/karaka
|
karaka/api/api.py
|
4
|
6215
|
#
# Karaka Skype-XMPP Gateway: Customer API
# <http://www.vipadia.com/products/karaka.html>
#
# Copyright (C) 2008-2009 Vipadia Limited
# Richard Mortier <[email protected]>
# Neil Stratford <[email protected]>
#
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License version
## 2 as published by the Free Software Foundation.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License version 2 for more details.
## You should have received a copy of the GNU General Public License
## version 2 along with this program; if not, write to the Free
## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
import time
import MySQLdb
##
## Copied from common.py
## DO NOT EDIT
##
import syslog
def _log(level, mesg):
if "\n" in mesg: mesgs = mesg.split("\n")
else: mesgs = [mesg]
pfx = ""
for mesg in mesgs:
while len(mesg) > 254:
syslog.syslog(level, "%s%s" % (pfx, mesg[:254].encode("utf-8"),))
mesg = "%s" % mesg[254:]
pfx = "||"
syslog.syslog(level, "%s%s" % (pfx, mesg.encode("utf-8")))
pfx = "|"
def _dbg(s): _log(syslog.LOG_DEBUG, s)
##
## End
##
from apiconfig import APIConfig
## # Crypto - KeyCzar
## from keyczar import keyczar
## PRIVATE_KEYLOC="/etc/karaka/keys/private/"
## PUBLIC_KEYLOC="/etc/karaka/keys/public/"
# Debug
Debug = 6
def dbg(s, l=0):
if Debug > l: _dbg(s)
##
## Database API
## Invoked by MASTER to persist registrations and CDRs
##-----------------------------------------------------
class DatabaseAPI:
def __init__(self):
self.config = APIConfig()
self.conn = MySQLdb.connect(
self.config.sql_server, self.config.sql_user, self.config.sql_password,
self.config.sql_database)
self.conn.autocommit(True)
def _invoke(self, cmd, args=None):
dbg("_invoke: cmd:%s args:%s" % (cmd, args), 5)
cursor = self.conn.cursor()
nrows = cursor.execute(cmd, args)
rows = cursor.fetchall()
cursor.close()
dbg(" nrows:%s rows:%s" % (nrows, rows,), 5)
return (nrows, rows)
## set_credentials_plain(user-jid, skype-handle, skype-secret) -> (bool, reason)
## insert credentials into the database for this user
def set_credentials_plain(self, userjid, skypeuser, skypesecret):
# Encrypt before writing to DB
dbg("set_credentials: userjid:%s skypeuser:%s skypesecret:*" % (
userjid, skypeuser), 4)
## KeyCzar
## crypter = keyczar.Encrypter.Read(PUBLIC_KEYLOC)
## skypesecret = crypter.Encrypt(str(skypesecret))
dbg(" encrypt(skypesecret):%s" % (skypesecret,), 4)
cmd = "INSERT INTO registrations (userjid, user, secret) VALUES (%s, %s, %s)"
args = (userjid, skypeuser, skypesecret,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
return (True, "Success")
## remove_credentials(user-jid)
## delete credentials from the database for this user
def remove_credentials(self, userjid):
dbg("remove_credentials: userjid:%s" % (userjid,), 4)
cmd = "DELETE FROM registrations WHERE userjid=%s"
args = (userjid,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
## get_credentials_crypt(user-jid) -> (skype-user, encrypted-skype-password)
## retrieve credentials (enctypted password) for this user
def get_credentials_crypt(self, userjid):
dbg("get_credentials: userjid:%s" % (userjid,), 4)
cmd = "SELECT user, secret FROM registrations WHERE userjid=%s"
args = (userjid,)
(cnt, res) = self._invoke(cmd, args)
dbg(" cnt:%d res:%s" % (cnt, res), 4)
if not res: return res
return (res[0][0], res[0][1])
## get_marketing_message(user-jid)
## retrieve mood message prefix
def get_marketing_message(self, userjid):
dbg("get_marketing_message: userjid:%s" % (userjid,), 4)
return self.config.marketing_message
## log_start(user-jid, skype-user)
## record the start event for a user signing in
def log_start(self, userjid, skypehandle):
dbg("log_start: user:%s skypehandle:%s" % (userjid, skypehandle), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "start", "")
self._invoke(cmd, args)
## log_stop(user-jid, skype-user)
## record the stop event for a user signing out
def log_stop(self, userjid, skypehandle):
dbg("log_stop: user:%s skypehandle:%s" % (userjid, skypehandle), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "stop", "")
self._invoke(cmd, args)
## log_error(user-jid, skype-user, errormsg)
## record an error event
def log_error(self, userjid, skypehandle, errormsg):
dbg("log_error: user:%s skypehandle:%s errormsg:%s" % (userjid, skypehandle, errormsg), 4)
now = time.time()
cmd = "INSERT INTO log (userjid, skypehandle, at, event, message) " \
+ " VALUES (%s,%s,%s,%s,%s)"
args = (userjid, skypehandle, now, "error", errormsg)
self._invoke(cmd, args)
#
# Cryptography API
# Invoked by individual BUNDLE to decode credentials
#----------------------------------------------------
class CryptoAPI:
def __init__(self): pass
## decrypt(encrypted-skype-password) -> skype-password
## decrypt the given input password
def decrypt(self, inputtext):
## KeyCzar
## crypter = keyczar.Crypter.Read(PRIVATE_KEYLOC)
## return crypter.Decrypt(inputtext)
return inputtext
|
gpl-2.0
| -1,249,359,697,403,012,900 | 5,886,721,365,118,097,000 | 34.718391 | 98 | 0.599517 | false |
zhenwendai/RGP
|
gpnarx.py
|
1
|
1997
|
from __future__ import print_function
import GPy
import numpy as np
def transformTimeSeriesToSeq(Y, timeWindow):
Ntr,D = Y.shape
blocksNumber = Ntr - timeWindow
X = np.zeros((blocksNumber, timeWindow*D))
Ynew = np.zeros((blocksNumber,D))
for i in range(blocksNumber):
tmp = Y[i:i+timeWindow,:].T
X[i,:] = tmp.flatten().T
Ynew[i,:] = Y[i+timeWindow,:]
return X, Ynew
def transformSeqToTimeSeries(X, Y, timeWindow):
assert(X.shape[0] == Y.shape[0])
N = X.shape[0] + timeWindow
D = X.shape[1] / (timeWindow * 1.0)
Ynew = np.zeros((N, D))
for i in range(X.shape[0]):
Ynew[i:i+timeWindow, :] = X[i,:].reshape(D, timeWindow).T
Ynew[-1,:] = Y[-1,:]
return Ynew
def test_transformSeries(Y, timeWindow):
(xx,yy) = transformTimeSeriesToSeq(Y, timeWindow)
return transformSeqToTimeSeries(xx,yy,timeWindow)
def gp_narx(m, x_start, N, Uts, ws, Ydebug=None):
D = m.output_dim
Q = x_start.shape[1]
Y = np.empty((N,D,))
Y[:] = np.NAN
varY = Y.copy()
assert(Q%ws==0)
assert(D == Q/ws)
Xnew = m.X.copy()
Ynew = m.Y.copy()
curX = x_start
varYpred = None
for i in range(N):
# Make sure the added x_add is a matrix (1,Q) and not (Q,)
if len(curX.shape) < 2:
curX = curX.reshape(1,curX.shape[0])
varYpred_prev = varYpred
#Ypred, varYpred = m._raw_predict(np.hstack((curX,curU)))
#curU = Uts[i,:]
#Ypred, varYpred = m._raw_predict(np.hstack((curX,Uts[i,:][None,:])))
Ypred, varYpred = m.predict(np.hstack((curX,Uts[i,:][None,:])))
Y[i,:] = Ypred
varY[i,:] = varYpred
#print i, ': ', Y[i,:] , ' | var: ', varYpred #####
if Ydebug is not None:
print(i, ': X=', str(curX.flatten()), 'U=', str(Uts[i,:].flatten()), 'Y=', str(Ydebug[i,:]))
if i == N-1:
break
curX = np.hstack((curX[0,D:], Ypred[0,:]))
return Y, varY
|
bsd-3-clause
| 3,276,927,869,561,655,000 | 8,117,333,669,473,364,000 | 26 | 105 | 0.548322 | false |
maartenq/ansible
|
lib/ansible/modules/network/netscaler/netscaler_service.py
|
67
|
31451
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_service
short_description: Manage service configuration in Netscaler
description:
- Manage service configuration in Netscaler.
- This module allows the creation, deletion and modification of Netscaler services.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
- This module supports check mode.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- >-
Name for the service. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Cannot be changed after the service has been created.
- "Minimum length = 1"
ip:
description:
- "IP to assign to the service."
- "Minimum length = 1"
servername:
description:
- "Name of the server that hosts the service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol in which data is exchanged with the service."
port:
description:
- "Port number of the service."
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
cleartextport:
description:
- >-
Port to which clear text data must be sent after the appliance decrypts incoming SSL traffic.
Applicable to transparent SSL services.
- "Minimum value = 1"
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections to the service."
- "Minimum value = 0"
- "Maximum value = 4294967294"
healthmonitor:
description:
- "Monitor the health of this service"
default: yes
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = 0"
- "Maximum value = 65535"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward requests to the cache server."
- "Note: Do not specify this parameter if you set the Cache Type parameter."
default: no
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Before forwarding a request to the service, insert an HTTP header with the client's IPv4 or IPv6
address as its value. Used if the server needs the client's IP address for security, accounting, or
other purposes, and setting the Use Source IP parameter is not a viable option.
cipheader:
description:
- >-
Name for the HTTP header whose value must be set to the IP address of the client. Used with the
Client IP parameter. If you set the Client IP parameter, and you do not specify a name for the
header, the appliance uses the header name specified for the global Client IP Header parameter (the
cipHeader parameter in the set ns param CLI command or the Client IP Header parameter in the
Configure HTTP Parameters dialog box at System > Settings > Change HTTP parameters). If the global
Client IP Header parameter is not specified, the appliance inserts a header with the name
"client-ip.".
- "Minimum length = 1"
usip:
description:
- >-
Use the client's IP address as the source IP address when initiating a connection to the server. When
creating a service, if you do not set this parameter, the service inherits the global Use Source IP
setting (available in the enable ns mode and disable ns mode CLI commands, or in the System >
Settings > Configure modes > Configure Modes dialog box). However, you can override this setting
after you create the service.
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP (USIP) parameter is set to YES."
sp:
description:
- "Enable surge protection for the service."
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service."
default: off
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = 0"
- "Maximum value = 31536000"
customserverid:
description:
- >-
Unique identifier for the service. Used when the persistency type for the virtual server is set to
Custom Server ID.
default: 'None'
serverid:
description:
- "The identifier for the service. This is used when the persistency type is set to Custom Server ID."
cka:
description:
- "Enable client keep-alive for the service."
tcpb:
description:
- "Enable TCP buffering for the service."
cmp:
description:
- "Enable compression for the service."
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated to the service."
- "Minimum value = 0"
- "Maximum value = 4294967287"
accessdown:
description:
- >-
Use Layer 2 mode to bridge the packets sent to this service if it is marked as DOWN. If the service
is DOWN, and this parameter is disabled, the packets are dropped.
default: no
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = 0"
- "Maximum value = 65535"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with a service whose state transitions from UP to DOWN. Do
not enable this option for applications that must complete their transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service."
- "Minimum length = 1"
- "Maximum length = 127"
hashid:
description:
- >-
A numerical identifier that can be used by hash based load balancing methods. Must be unique for each
service.
- "Minimum value = 1"
comment:
description:
- "Any information about the service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information."
netprofile:
description:
- "Network profile to use for the service."
- "Minimum length = 1"
- "Maximum length = 127"
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = 0"
- "Maximum value = 4094"
processlocal:
choices:
- 'enabled'
- 'disabled'
description:
- >-
By turning on this option packets destined to a service in a cluster will not under go any steering.
Turn this option for single packet request response mode or when the upstream device is performing a
proper RSS for connection based distribution.
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the service. DNS profile properties will applied to the
transactions processed by a service. This parameter is valid only for ADNS and ADNS-TCP services.
- "Minimum length = 1"
- "Maximum length = 127"
ipaddress:
description:
- "The new IP address of the service."
graceful:
description:
- >-
Shut down gracefully, not accepting any new connections, and disabling the service when all of its
connections are closed.
default: no
monitor_bindings:
description:
- A list of load balancing monitors to bind to this service.
- Each monitor entry is a dictionary which may contain the following options.
- Note that if not using the built in monitors they must first be setup.
suboptions:
monitorname:
description:
- Name of the monitor.
weight:
description:
- Weight to assign to the binding between the monitor and service.
dup_state:
choices:
- 'enabled'
- 'disabled'
description:
- State of the monitor.
- The state setting for a monitor of a given type affects all monitors of that type.
- For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled.
- If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
dup_weight:
description:
- Weight to assign to the binding between the monitor and service.
disabled:
description:
- When set to C(yes) the service state will be set to DISABLED.
- When set to C(no) the service state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# Monitor monitor-1 must have been already setup
- name: Setup http service
gather_facts: False
delegate_to: localhost
netscaler_service:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: service-http-1
servicetype: HTTP
ipaddress: 10.78.0.1
port: 80
monitor_bindings:
- monitor-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
diff:
description: A dictionary with a list of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: "{ 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }"
'''
import copy
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_service_binding import lbmonitor_service_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines,
get_immutables_intersection)
def service_exists(client, module):
if service.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def service_identical(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_dict = service_proxy.diff_object(service_list[0])
# the actual ip address is stored in the ipaddress attribute
# of the retrieved object
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def diff(client, module, service_proxy):
service_list = service.get_filtered(client, 'name:%s' % module.params['name'])
diff_object = service_proxy.diff_object(service_list[0])
if 'ip' in diff_object:
del diff_object['ip']
return diff_object
def get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs):
bindings = {}
if module.params['monitor_bindings'] is not None:
for binding in module.params['monitor_bindings']:
attribute_values_dict = copy.deepcopy(binding)
# attribute_values_dict['servicename'] = module.params['name']
attribute_values_dict['servicegroupname'] = module.params['name']
binding_proxy = ConfigProxy(
actual=lbmonitor_service_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=monitor_bindings_rw_attrs,
)
key = binding_proxy.monitorname
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
bindings = {}
if service_lbmonitor_binding.count(client, module.params['name']) == 0:
return bindings
# Fallthrough to rest of execution
for binding in service_lbmonitor_binding.get(client, module.params['name']):
# Excluding default monitors since we cannot operate on them
if binding.monitor_name in ('tcp-default', 'ping-default'):
continue
key = binding.monitor_name
actual = lbmonitor_service_binding()
actual.weight = binding.weight
actual.monitorname = binding.monitor_name
actual.dup_weight = binding.dup_weight
actual.servicename = module.params['name']
bindings[key] = actual
return bindings
def monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_proxys.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for monitor_name in configured_key_set:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
diff_dict = proxy.diff_object(actual)
if 'servicegroupname' in diff_dict:
if proxy.servicegroupname == actual.servicename:
del diff_dict['servicegroupname']
if len(diff_dict) > 0:
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module, monitor_bindings_rw_attrs):
configured_proxys = get_configured_monitor_bindings(client, module, monitor_bindings_rw_attrs)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_keyset = set(configured_proxys.keys())
actual_keyset = set(actual_bindings.keys())
# Delete extra
delete_keys = list(actual_keyset - configured_keyset)
for monitor_name in delete_keys:
log('Deleting binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual_bindings[monitor_name])
# Delete and re-add modified
common_keyset = list(configured_keyset & actual_keyset)
for monitor_name in common_keyset:
proxy = configured_proxys[monitor_name]
actual = actual_bindings[monitor_name]
if not proxy.has_equal_attributes(actual):
log('Deleting and re adding binding for monitor %s' % monitor_name)
lbmonitor_service_binding.delete(client, actual)
proxy.add()
# Add new
new_keys = list(configured_keyset - actual_keyset)
for monitor_name in new_keys:
log('Adding binding for monitor %s' % monitor_name)
configured_proxys[monitor_name].add()
def all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
return service_identical(client, module, service_proxy) and monitor_bindings_identical(client, module, monitor_bindings_rw_attrs)
def do_state_change(client, module, service_proxy):
if module.params['disabled']:
log('Disabling service')
result = service.disable(client, service_proxy.actual)
else:
log('Enabling service')
result = service.enable(client, service_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ip=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX'
]
),
port=dict(type='int'),
cleartextport=dict(type='int'),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
healthmonitor=dict(
type='bool',
default=True,
),
maxreq=dict(type='float'),
cacheable=dict(
type='bool',
default=False,
),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
useproxyport=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(
type='bool',
default=False,
),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
customserverid=dict(
type='str',
default='None',
),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
accessdown=dict(
type='bool',
default=False
),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
netprofile=dict(type='str'),
processlocal=dict(
type='str',
choices=[
'enabled',
'disabled',
],
),
dnsprofilename=dict(type='str'),
ipaddress=dict(type='str'),
graceful=dict(
type='bool',
default=False,
),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Fallthrough to rest of execution
# Instantiate Service Config object
readwrite_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'maxclient',
'healthmonitor',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'useproxyport',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'customserverid',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'accessdown',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'hashid',
'comment',
'appflowlog',
'netprofile',
'processlocal',
'dnsprofilename',
'ipaddress',
'graceful',
]
readonly_attrs = [
'numofconnections',
'policyname',
'serviceconftype',
'serviceconftype2',
'value',
'gslb',
'dup_state',
'publicip',
'publicport',
'svrstate',
'monitor_state',
'monstatcode',
'lastresponse',
'responsetime',
'riseapbrstatsmsgcode2',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimesec',
'statechangetimemsec',
'tickssincelaststatechange',
'stateupdatereason',
'clmonowner',
'clmonview',
'serviceipstr',
'oracleserverversion',
]
immutable_attrs = [
'name',
'ip',
'servername',
'servicetype',
'port',
'cleartextport',
'cachetype',
'cipheader',
'serverid',
'state',
'td',
'monitor_name_svc',
'riseapbrstatsmsgcode',
'graceful',
'all',
'Internal',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'accessdown': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
'processlocal': [lambda v: v.upper()],
}
monitor_bindings_rw_attrs = [
'servicename',
'servicegroupname',
'dup_state',
'dup_weight',
'monitorname',
'weight',
]
# Translate module arguments to correspondign config oject attributes
if module.params['ip'] is None:
module.params['ip'] = module.params['ipaddress']
service_proxy = ConfigProxy(
actual=service(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not service_exists(client, module):
if not module.check_mode:
service_proxy.add()
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, service_proxy, monitor_bindings_rw_attrs):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, service_proxy)
immutables_changed = get_immutables_intersection(service_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
# Service sync
if not service_identical(client, module, service_proxy):
if not module.check_mode:
service_proxy.update()
# Monitor bindings sync
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
if not module.check_mode:
sync_monitor_bindings(client, module, monitor_bindings_rw_attrs)
module_result['changed'] = True
if not module.check_mode:
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, service_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not service_exists(client, module):
module.fail_json(msg='Service does not exist', **module_result)
if not service_identical(client, module, service_proxy):
module.fail_json(msg='Service differs from configured', diff=diff(client, module, service_proxy), **module_result)
if not monitor_bindings_identical(client, module, monitor_bindings_rw_attrs):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if service_exists(client, module):
if not module.check_mode:
service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if service_exists(client, module):
module.fail_json(msg='Service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
gpl-3.0
| -165,827,485,163,182,800 | -2,873,190,875,772,254,700 | 32.036765 | 144 | 0.558551 | false |
Curious72/sympy
|
sympy/core/tests/test_basic.py
|
11
|
6009
|
"""This tests sympy/core/basic.py with (ideally) no reference to subclasses
of Basic or Atom."""
from sympy.core.basic import Basic, Atom, preorder_traversal
from sympy.core.singleton import S, Singleton
from sympy.core.symbol import symbols
from sympy.core.compatibility import default_sort_key, with_metaclass
from sympy import sin, Lambda, Q
from sympy.utilities.pytest import raises
b1 = Basic()
b2 = Basic(b1)
b3 = Basic(b2)
b21 = Basic(b2, b1)
def test_structure():
assert b21.args == (b2, b1)
assert b21.func(*b21.args) == b21
assert bool(b1)
def test_equality():
instances = [b1, b2, b3, b21, Basic(b1, b1, b1), Basic]
for i, b_i in enumerate(instances):
for j, b_j in enumerate(instances):
assert (b_i == b_j) == (i == j)
assert (b_i != b_j) == (i != j)
assert Basic() != []
assert not(Basic() == [])
assert Basic() != 0
assert not(Basic() == 0)
def test_matches_basic():
instances = [Basic(b1, b1, b2), Basic(b1, b2, b1), Basic(b2, b1, b1),
Basic(b1, b2), Basic(b2, b1), b2, b1]
for i, b_i in enumerate(instances):
for j, b_j in enumerate(instances):
if i == j:
assert b_i.matches(b_j) == {}
else:
assert b_i.matches(b_j) is None
assert b1.match(b1) == {}
def test_has():
assert b21.has(b1)
assert b21.has(b3, b1)
assert b21.has(Basic)
assert not b1.has(b21, b3)
assert not b21.has()
def test_subs():
assert b21.subs(b2, b1) == Basic(b1, b1)
assert b21.subs(b2, b21) == Basic(b21, b1)
assert b3.subs(b2, b1) == b2
assert b21.subs([(b2, b1), (b1, b2)]) == Basic(b2, b2)
assert b21.subs({b1: b2, b2: b1}) == Basic(b2, b2)
raises(ValueError, lambda: b21.subs('bad arg'))
raises(ValueError, lambda: b21.subs(b1, b2, b3))
def test_atoms():
assert b21.atoms() == set()
def test_free_symbols_empty():
assert b21.free_symbols == set()
def test_doit():
assert b21.doit() == b21
assert b21.doit(deep=False) == b21
def test_S():
assert repr(S) == 'S'
def test_xreplace():
assert b21.xreplace({b2: b1}) == Basic(b1, b1)
assert b21.xreplace({b2: b21}) == Basic(b21, b1)
assert b3.xreplace({b2: b1}) == b2
assert Basic(b1, b2).xreplace({b1: b2, b2: b1}) == Basic(b2, b1)
assert Atom(b1).xreplace({b1: b2}) == Atom(b1)
assert Atom(b1).xreplace({Atom(b1): b2}) == b2
raises(TypeError, lambda: b1.xreplace())
raises(TypeError, lambda: b1.xreplace([b1, b2]))
def test_Singleton():
global instantiated
instantiated = 0
class MySingleton(with_metaclass(Singleton, Basic)):
def __new__(cls):
global instantiated
instantiated += 1
return Basic.__new__(cls)
assert instantiated == 0
MySingleton() # force instantiation
assert instantiated == 1
assert MySingleton() is not Basic()
assert MySingleton() is MySingleton()
assert S.MySingleton is MySingleton()
assert instantiated == 1
class MySingleton_sub(MySingleton):
pass
assert instantiated == 1
MySingleton_sub()
assert instantiated == 2
assert MySingleton_sub() is not MySingleton()
assert MySingleton_sub() is MySingleton_sub()
def test_preorder_traversal():
expr = Basic(b21, b3)
assert list(
preorder_traversal(expr)) == [expr, b21, b2, b1, b1, b3, b2, b1]
assert list(preorder_traversal(('abc', ('d', 'ef')))) == [
('abc', ('d', 'ef')), 'abc', ('d', 'ef'), 'd', 'ef']
result = []
pt = preorder_traversal(expr)
for i in pt:
result.append(i)
if i == b2:
pt.skip()
assert result == [expr, b21, b2, b1, b3, b2]
w, x, y, z = symbols('w:z')
expr = z + w*(x + y)
assert list(preorder_traversal([expr], keys=default_sort_key)) == \
[[w*(x + y) + z], w*(x + y) + z, z, w*(x + y), w, x + y, x, y]
assert list(preorder_traversal((x + y)*z, keys=True)) == \
[z*(x + y), z, x + y, x, y]
def test_sorted_args():
x = symbols('x')
assert b21._sorted_args == b21.args
raises(AttributeError, lambda: x._sorted_args)
def test_call():
x, y = symbols('x y')
# See the long history of this in issues 5026 and 5105.
raises(TypeError, lambda: sin(x)({ x : 1, sin(x) : 2}))
raises(TypeError, lambda: sin(x)(1))
# No effect as there are no callables
assert sin(x).rcall(1) == sin(x)
assert (1 + sin(x)).rcall(1) == 1 + sin(x)
# Effect in the pressence of callables
l = Lambda(x, 2*x)
assert (l + x).rcall(y) == 2*y + x
assert (x**l).rcall(2) == x**4
# TODO UndefinedFunction does not subclass Expr
#f = Function('f')
#assert (2*f)(x) == 2*f(x)
assert (Q.real & Q.positive).rcall(x) == Q.real(x) & Q.positive(x)
def test_literal_evalf_is_number_is_zero_is_comparable():
from sympy.integrals.integrals import Integral
from sympy.core.symbol import symbols
from sympy.core.function import Function
from sympy.functions.elementary.trigonometric import cos, sin
x = symbols('x')
f = Function('f')
# the following should not be changed without a lot of dicussion
# `foo.is_number` should be equivalent to `not foo.free_symbols`
# it should not attempt anything fancy; see is_zero, is_constant
# and equals for more rigorous tests.
assert f(1).is_number is True
i = Integral(0, (x, x, x))
# expressions that are symbolically 0 can be difficult to prove
# so in case there is some easy way to know if something is 0
# it should appear in the is_zero property for that object;
# if is_zero is true evalf should always be able to compute that
# zero
assert i.n() == 0
assert i.is_zero
assert i.is_number is False
assert i.evalf(2, strict=False) == 0
# issue 10268
n = sin(1)**2 + cos(1)**2 - 1
assert n.is_comparable is False
assert n.n(2).is_comparable is False
assert n.n(2).n(2).is_comparable
|
bsd-3-clause
| 397,055,437,555,265,340 | -8,464,086,409,747,500,000 | 28.455882 | 75 | 0.600599 | false |
frewsxcv/servo
|
tests/wpt/css-tests/tools/html5lib/html5lib/tests/support.py
|
450
|
5496
|
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import codecs
import glob
import xml.sax.handler
base_path = os.path.split(__file__)[0]
test_dir = os.path.join(base_path, 'testdata')
sys.path.insert(0, os.path.abspath(os.path.join(base_path,
os.path.pardir,
os.path.pardir)))
from html5lib import treebuilders
del base_path
# Build a dict of avaliable trees
treeTypes = {"DOM": treebuilders.getTreeBuilder("dom")}
# Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
import lxml.etree as lxml # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml")
def get_data_files(subdirectory, files='*.dat'):
return glob.glob(os.path.join(test_dir, subdirectory, files))
class DefaultDict(dict):
def __init__(self, default, *args, **kwargs):
self.default = default
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key, self.default)
class TestData(object):
def __init__(self, filename, newTestHeading="data", encoding="utf8"):
if encoding is None:
self.f = open(filename, mode="rb")
else:
self.f = codecs.open(filename, encoding=encoding)
self.encoding = encoding
self.newTestHeading = newTestHeading
def __del__(self):
self.f.close()
def __iter__(self):
data = DefaultDict(None)
key = None
for line in self.f:
heading = self.isSectionHeading(line)
if heading:
if data and heading == self.newTestHeading:
# Remove trailing newline
data[key] = data[key][:-1]
yield self.normaliseOutput(data)
data = DefaultDict(None)
key = heading
data[key] = "" if self.encoding else b""
elif key is not None:
data[key] += line
if data:
yield self.normaliseOutput(data)
def isSectionHeading(self, line):
"""If the current heading is a test section heading return the heading,
otherwise return False"""
# print(line)
if line.startswith("#" if self.encoding else b"#"):
return line[1:].strip()
else:
return False
def normaliseOutput(self, data):
# Remove trailing newlines
for key, value in data.items():
if value.endswith("\n" if self.encoding else b"\n"):
data[key] = value[:-1]
return data
def convert(stripChars):
def convertData(data):
"""convert the output of str(document) to the format used in the testcases"""
data = data.split("\n")
rv = []
for line in data:
if line.startswith("|"):
rv.append(line[stripChars:])
else:
rv.append(line)
return "\n".join(rv)
return convertData
convertExpected = convert(2)
def errorMessage(input, expected, actual):
msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
(repr(input), repr(expected), repr(actual)))
if sys.version_info.major == 2:
msg = msg.encode("ascii", "backslashreplace")
return msg
class TracingSaxHandler(xml.sax.handler.ContentHandler):
def __init__(self):
xml.sax.handler.ContentHandler.__init__(self)
self.visited = []
def startDocument(self):
self.visited.append('startDocument')
def endDocument(self):
self.visited.append('endDocument')
def startPrefixMapping(self, prefix, uri):
# These are ignored as their order is not guaranteed
pass
def endPrefixMapping(self, prefix):
# These are ignored as their order is not guaranteed
pass
def startElement(self, name, attrs):
self.visited.append(('startElement', name, attrs))
def endElement(self, name):
self.visited.append(('endElement', name))
def startElementNS(self, name, qname, attrs):
self.visited.append(('startElementNS', name, qname, dict(attrs)))
def endElementNS(self, name, qname):
self.visited.append(('endElementNS', name, qname))
def characters(self, content):
self.visited.append(('characters', content))
def ignorableWhitespace(self, whitespace):
self.visited.append(('ignorableWhitespace', whitespace))
def processingInstruction(self, target, data):
self.visited.append(('processingInstruction', target, data))
def skippedEntity(self, name):
self.visited.append(('skippedEntity', name))
|
mpl-2.0
| -6,225,717,207,310,540,000 | -7,732,936,261,814,126,000 | 30.050847 | 101 | 0.618814 | false |
jmcarp/regulations-parser
|
regparser/layer/interpretations.py
|
7
|
2506
|
from collections import defaultdict
from regparser.citations import Label
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.interpretation import text_to_labels
class Interpretations(Layer):
"""Supplement I (interpretations) provides (sometimes very lengthy) extra
information about particular paragraphs. This layer provides those
interpretations."""
def __init__(self, *args, **kwargs):
Layer.__init__(self, *args, **kwargs)
self.lookup_table = defaultdict(list)
def pre_process(self):
"""Create a lookup table for each interpretation"""
def per_node(node):
if (node.node_type != struct.Node.INTERP
or node.label[-1] != struct.Node.INTERP_MARK):
return
# Always add a connection based on the interp's label
self.lookup_table[tuple(node.label[:-1])].append(node)
# Also add connections based on the title
for label in text_to_labels(node.title or '',
Label.from_node(node),
warn=False):
label = tuple(label[:-1]) # Remove Interp marker
if node not in self.lookup_table[label]:
self.lookup_table[label].append(node)
struct.walk(self.tree, per_node)
def process(self, node):
"""Is there an interpretation associated with this node? If yes,
return the associated layer information. @TODO: Right now, this only
associates if there is a direct match. It should also associate if any
parents match"""
label = tuple(node.label)
if self.lookup_table[label]: # default dict; will always be present
interp_labels = [n.label_id() for n in self.lookup_table[label]
if not self.empty_interpretation(n)]
return [{'reference': l} for l in interp_labels] or None
def empty_interpretation(self, interp):
"""We don't want to include empty (e.g. \n\n) nodes as
interpretations unless their children are subparagraphs. We
distinguish subparagraphs from structural children by checking the
location of the 'Interp' delimiter."""
if interp.text.strip():
return False
return all(not child.label
or child.label[-1] == struct.Node.INTERP_MARK
for child in interp.children)
|
cc0-1.0
| 8,955,824,740,394,148,000 | 859,421,962,308,366,800 | 42.964912 | 78 | 0.610934 | false |
Spiderlover/Toontown
|
toontown/suit/SuitInvasionManagerAI.py
|
1
|
11138
|
import time
from random import random, randint, choice
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from toontown.battle import SuitBattleGlobals
from toontown.toonbase.ToontownGlobals import IDES_OF_MARCH
import SuitDNA
from SuitInvasionGlobals import *
class SuitInvasionManagerAI:
notify = directNotify.newCategory('SuitInvasionManagerAI')
def __init__(self, air):
self.air = air
self.invading = False
self.start = 0
self.remaining = 0
self.total = 0
self.suitDeptIndex = None
self.suitTypeIndex = None
self.megaInvasion = None
self.megaInvasionCog = None
self.megaInvasionFlags = None
self.flags = 0
self.isSkelecog = 0
self.isV2 = 0
self.isWaiter = 0
self.isVirtual = 0
self.isRental = 0
self.flags = [0, 0, 0, 0, 0]
self.air.netMessenger.accept(
'startInvasion', self, self.handleStartInvasion)
self.air.netMessenger.accept(
'stopInvasion', self, self.handleStopInvasion)
# We want to handle shard status queries so that a ShardStatusReceiver
# being created after we're created will know where we're at:
self.air.netMessenger.accept('queryShardStatus', self, self.sendInvasionStatus)
self.safeHarbours = []
tempSafeHarbours = config.GetString('safe-harbours','')
if tempSafeHarbours != '':
for safeHarbour in tempSafeHarbours.split(","):
safeHarbour = safeHarbour.strip()
self.safeHarbours.append(safeHarbour)
if config.GetBool('want-mega-invasions', False):
self.randomInvasionProbability = config.GetFloat('mega-invasion-probability', 0.65)
if self.air.distributedDistrict.name in self.safeHarbours:
self.notify.debug("Can't summon mega invasion in safe harbour!")
elif self.air.holidayManager.isHolidayRunning(IDES_OF_MARCH):#Temp
self.megaInvasion = IDES_OF_MARCH
#if self.megaInvasion:
# self.megaInvasionCog = megaInvasionDict[self.megaInvasion][0]
taskMgr.doMethodLater(randint(1800, 5400), self.__randomInvasionTick, 'random-invasion-tick')
self.sendInvasionStatus()
def getInvading(self):
return self.invading
def getInvadingCog(self):
return (self.suitDeptIndex, self.suitTypeIndex, self.flags)
def startInvasion(self, suitDeptIndex=None, suitTypeIndex=None, flags=[0, 0, 0, 0, 0],
type=INVASION_TYPE_NORMAL):
if self.invading:
# An invasion is currently in progress; ignore this request.
return False
if (suitDeptIndex is None) and (suitTypeIndex is None) and (not flags):
# This invasion is no-op.
return False
if((flags[2] == 1) and (flags[0] == 1 or flags[4] == 1)):
return False
if((flags[0] == 1) and (flags[1] == 1 or flags[2] == 1 or flags[4] == 1)):
return False
if (suitDeptIndex is None) and (suitTypeIndex is not None):
# It's impossible to determine the invading Cog.
return False
if (suitDeptIndex is not None) and (suitDeptIndex >= len(SuitDNA.suitDepts)):
# Invalid suit department.
return False
if (suitTypeIndex is not None) and (suitTypeIndex >= SuitDNA.suitsPerDept):
# Invalid suit type.
return False
if type not in (INVASION_TYPE_NORMAL, INVASION_TYPE_MEGA):
# Invalid invasion type.
return False
# Looks like we're all good. Begin the invasion:
self.invading = True
self.start = int(time.time())
self.suitDeptIndex = suitDeptIndex
self.suitTypeIndex = suitTypeIndex
self.flags = flags
self.isSkelecog = flags[0]
self.isV2 = flags[1]
self.isWaiter = flags[2]
self.isVirtual = flags[3]
self.isRental = flags[4]
# How many suits do we want?
if type == INVASION_TYPE_NORMAL:
self.total = 1000
elif type == INVASION_TYPE_MEGA:
self.total = randint(1800, 5400)
self.remaining = self.total
self.flySuits()
self.notifyInvasionStarted()
# Update the invasion tracker on the districts page in the Shticker Book:
if self.suitDeptIndex is not None:
self.air.districtStats.b_setInvasionStatus(self.suitDeptIndex + 1)
else:
self.air.districtStats.b_setInvasionStatus(5)
# If this is a normal invasion, and the players take too long to defeat
# all of the Cogs, we'll want the invasion to timeout:
if type == INVASION_TYPE_NORMAL:
timeout = config.GetInt('invasion-timeout', 1800)
taskMgr.doMethodLater(timeout, self.stopInvasion, 'invasionTimeout')
self.sendInvasionStatus()
return True
def stopInvasion(self, task=None):
if not self.invading:
# We are not currently invading.
return False
# Stop the invasion timeout task:
taskMgr.remove('invasionTimeout')
# Update the invasion tracker on the districts page in the Shticker Book:
self.air.districtStats.b_setInvasionStatus(0)
# Revert what was done when the invasion started:
self.notifyInvasionEnded()
self.invading = False
self.start = 0
self.suitDeptIndex = None
self.suitTypeIndex = None
self.flags = None
self.total = 0
self.remaining = 0
self.flySuits()
self.sendInvasionStatus()
return True
def getSuitName(self):
if self.suitDeptIndex is not None:
if self.suitTypeIndex is not None:
return SuitDNA.getSuitName(self.suitDeptIndex, self.suitTypeIndex)
else:
return SuitDNA.suitDepts[self.suitDeptIndex]
else:
return SuitDNA.suitHeadTypes[0]
def notifyInvasionStarted(self):
msgType = SuitInvasionBegin
if self.isSkelecog:
msgType = SkelecogInvasionBegin
elif self.isV2:
msgType = V2InvasionBegin
elif self.isWaiter:
msgType = WaiterInvasionBegin
elif self.isVirtual:
msgType = VirtualInvasionBegin
elif self.isRental:
msgType = RentalInvasionBegin
self.air.newsManager.sendUpdate(
'setInvasionStatus',
[msgType, self.getSuitName(), self.total, self.flags])
def notifyInvasionEnded(self):
msgType = SuitInvasionEnd
if self.isSkelecog:
msgType = SkelecogInvasionEnd
elif self.isV2:
msgType = V2InvasionEnd
elif self.isWaiter:
msgType = WaiterInvasionEnd
elif self.isVirtual:
msgType = VirtualInvasionEnd
elif self.isRental:
msgType = RentalInvasionEnd
self.air.newsManager.sendUpdate(
'setInvasionStatus', [msgType, self.getSuitName(), 0, self.flags])
def notifyInvasionUpdate(self):
self.air.newsManager.sendUpdate(
'setInvasionStatus',
[SuitInvasionUpdate, self.getSuitName(),
self.remaining, self.flags])
def notifyInvasionBulletin(self, avId):
msgType = SuitInvasionBulletin
if self.isSkelecog:
msgType = SkelecogInvasionBulletin
elif self.isV2:
msgType = V2InvasionBulletin
elif self.isWaiter:
msgType = WaiterInvasionBulletin
elif self.isVirtual:
msgType = VirtualInvasionBulletin
elif self.isRental:
msgType = RentalInvasionBulletin
self.air.newsManager.sendUpdateToAvatarId(
avId, 'setInvasionStatus',
[msgType, self.getSuitName(), self.remaining, self.flags])
def flySuits(self):
for suitPlanner in self.air.suitPlanners.values():
suitPlanner.flySuits()
def handleSuitDefeated(self):
self.remaining -= 1
if self.remaining == 0:
self.stopInvasion()
elif self.remaining == (self.total/2):
self.notifyInvasionUpdate()
self.sendInvasionStatus()
def handleStartInvasion(self, shardId, *args):
if shardId == self.air.ourChannel:
self.startInvasion(*args)
def handleStopInvasion(self, shardId):
if shardId == self.air.ourChannel:
self.stopInvasion()
def sendInvasionStatus(self):
if self.invading:
if self.suitDeptIndex is not None:
if self.suitTypeIndex is not None:
type = SuitBattleGlobals.SuitAttributes[self.getSuitName()]['name']
else:
type = SuitDNA.getDeptFullname(self.getSuitName())
else:
type = None
status = {
'invasion': {
'type': type,
'flags': [self.isSkelecog, self.isV2, self.isWaiter, self.isVirtual, self.isRental],
'remaining': self.remaining,
'total': self.total,
'start': self.start
}
}
else:
status = {'invasion': None}
self.air.netMessenger.send('shardStatus', [self.air.ourChannel, status])
def __randomInvasionTick(self, task=None):
"""
Each hour, have a tick to check if we want to start an invasion in
the current district. This works by having a random invasion
probability, and each tick it will generate a random float between
0 and 1, and then if it's less than or equal to the probablity, it
will spawn the invasion.
An invasion will not be started if there is an invasion already
on-going.
"""
# Generate a new tick delay.
task.delayTime = randint(1800, 5400)
if self.getInvading():
# We're already running an invasion. Don't start a new one.
self.notify.debug('Invasion tested but already running invasion!')
return task.again
if random() <= self.randomInvasionProbability:
# We want an invasion!
self.notify.debug('Invasion probability hit! Starting invasion.')
if config.GetBool('want-mega-invasions', False):
suitDept = megaInvasionDict[self.megaInvasion][0][0]
suitIndex = megaInvasionDict[self.megaInvasion][0][1]
if megaInvasionDict[self.megaInvasion][2]:
rngFlag = randint(0, 4)
flags = [0, 0, 0, 0, 0]
flags[rngFlag] = 1
else:
flags = megaInvasionDict[self.megaInvasion][1]
self.startInvasion(suitDept, suitIndex, flags, INVASION_TYPE_MEGA)
return task.again
|
mit
| -6,906,306,188,466,542,000 | 92,148,963,031,533,340 | 36.628378 | 109 | 0.5993 | false |
qmarlats/pyquizz
|
env-3/lib/python3.5/site-packages/pygments/lexers/iolang.py
|
47
|
1904
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.iolang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Io language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number
__all__ = ['IoLexer']
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
.. versionadded:: 0.10
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
(r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_]\w*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
|
gpl-3.0
| 682,142,148,330,357,000 | 1,718,136,341,635,151,400 | 29.222222 | 78 | 0.442227 | false |
morreene/tradenews
|
venv/Lib/site-packages/sqlalchemy/ext/baked.py
|
32
|
16967
|
# sqlalchemy/ext/baked.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
def call(initial_fn, *args):
return cls(_bakery, initial_fn, args)
return call
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._cache_key, None)
if query is None:
query = self._as_query(session)
self._bakery[self._cache_key] = query.with_session(None)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
'_correlate', '_from_obj', '_mapper_adapter_map',
'_joinpath', '_joinpoint'):
query.__dict__.pop(attr, None)
self._bakery[self._cache_key] = context
return context
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def _as_query(self):
return self.bq._as_query(self.session).params(self._params)
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._cache_key, None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(self.session, context, self._params)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
return context.query.params(self._params).\
with_session(self.session)._execute_and_instances(context)
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def one_or_none(self):
"""Return one or zero results, or raise an exception for multiple
rows.
Equivalent to :meth:`.Query.one_or_none`.
.. versionadded:: 1.0.9
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one_or_none()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause, )
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:]
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class BakedLazyLoader(strategies.LazyLoader):
def _emit_lazyload(self, session, state, ident_key, passive):
q = BakedQuery(
self.mapper._compiled_cache,
lambda session: session.query(self.mapper))
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q:
q.select_from(self.mapper, self.parent_property.secondary))
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_path:
q.spoil()
q.add_criteria(
lambda q:
q._with_current_path(state.load_path[self.parent_property]))
if state.load_options:
q.spoil()
q.add_criteria(
lambda q: q._conditional_options(*state.load_options))
if self.use_get:
return q(session)._load_on_ident(
session.query(self.mapper), ident_key)
if self.parent_property.order_by:
q.add_criteria(
lambda q:
q.order_by(*util.to_list(self.parent_property.order_by)))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, strategies.LazyLoader):
q.add_criteria(
lambda q:
q.options(
strategy_options.Load(
rev.parent).baked_lazyload(rev.key)))
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if orm_util._none_set.intersection(params.values()):
return None
q.add_criteria(lambda q: q.filter(lazy_clause))
result = q(session).params(**params).all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {})
@baked_lazyload._add_unbound_all_fn
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {})
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
|
bsd-3-clause
| -1,044,044,708,188,729,600 | -1,354,870,724,896,553,000 | 31.441683 | 82 | 0.58991 | false |
bakhtout/odoo-educ
|
addons/website_mail/models/mail_thread.py
|
338
|
1454
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
# TODO for trunk, remove me
class MailThread(osv.AbstractModel):
_inherit = 'mail.thread'
_columns = {
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Messages',
help="Website communication history",
),
}
|
agpl-3.0
| 593,079,450,797,214,700 | 4,702,252,709,516,984,000 | 37.263158 | 78 | 0.575653 | false |
super13/tensorflow-speech-recognition-pai
|
src/smodels/RNN/utils.py
|
1
|
1207
|
import os
import tensorflow as tf
from configparser import ConfigParser
from utilities.set_dirs import get_conf_dir
conf_dir = get_conf_dir(debug=False)
parser = ConfigParser(os.environ)
parser.read(os.path.join(conf_dir, 'neural_network.ini'))
# AdamOptimizer
beta1 = parser.getfloat('optimizer', 'beta1')
beta2 = parser.getfloat('optimizer', 'beta2')
epsilon = parser.getfloat('optimizer', 'epsilon')
learning_rate = parser.getfloat('optimizer', 'learning_rate')
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2,
epsilon=epsilon)
return optimizer
|
mit
| -6,453,633,469,605,366,000 | 6,873,872,533,460,543,000 | 32.527778 | 87 | 0.654515 | false |
akloster/bokeh
|
bokeh/properties.py
|
20
|
42601
|
""" Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import re
import types
import difflib
import datetime
import dateutil.parser
import collections
from importlib import import_module
from copy import copy
from warnings import warn
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums
from .util.string import nice_join
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class Property(object):
""" Base class for all type properties. """
def __init__(self, default=None, help=None):
""" This is how the descriptor is created in the class declaration """
if isinstance(default, types.FunctionType): # aka. lazy value
self.validate(default())
else:
self.validate(default)
self._default = default
self.__doc__ = help
self.alternatives = []
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@property
def default(self):
if not isinstance(self._default, types.FunctionType):
return copy(self._default)
else:
value = self._default()
self.validate(value)
return value
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def from_json(self, json, models=None):
return json
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def _get(self, obj):
if not hasattr(obj, self._name):
setattr(obj, self._name, self.default)
return getattr(obj, self._name)
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Include(object):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop.delegate
if prop.use_prefix:
prefix = re.sub("_props$", "", name) + "_"
else:
prefix = ""
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
if "%s" in prop.help:
doc = prop.help % subpropname.replace('_', ' ')
else:
doc = prop.help
try:
includes[fullpropname] = subprop(help=doc)
except TypeError:
includes[fullpropname] = subprop
subprop.__doc__ = doc
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
units_to_add = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
if hasattr(prop, '_units_type'):
units_to_add[name+"_units"] = prop._units_type
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
for name, prop in units_to_add.items():
prop.name = name
names.add(name)
class_dict[name] = prop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._changed_vars = set()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
props = sorted(self.properties())
if name.startswith("_") or name in props:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.changed_properties_with_values())
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties(self):
return self.changed_vars()
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = (bool,)
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (float, ) + bokeh_integer_types
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (complex, float) + bokeh_integer_types
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedProperty(Property):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
""" Base class for Container-like type properties. """
pass
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# opional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(Property):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .plot_object import PlotObject
if issubclass(self.instance_type, PlotObject):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserilize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property):
""" Any type property accepts any values. """
pass
class Function(Property):
""" Function type property. """
pass
class Event(Property):
""" Event type property. """
pass
class Interval(ParameterizedProperty):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
help = kwargs.get("help")
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(Property):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
# Check for None value
if val is None:
return dict(value=None)
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r)" % (self.__class__.__name__, val)
class NumberSpec(DataSpec):
def __init__(self, default, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
super(StringSpec, self).__set__(obj, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
super(FontSizeSpec, self).__set__(obj, value)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
self._units_type.validate(units_default)
self._units_type._default = units_default
def to_dict(self, obj):
d = super(UnitsSpec, self).to_dict(obj)
d["units"] = getattr(obj, self.name+"_units")
return d
def __set__(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units: setattr(obj, self.name+"_units", units)
super(UnitsSpec, self).__set__(obj, value)
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r, units_default=%r)" % (self.__class__.__name__, val, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DistanceSpec, self).__set__(obj, value)
class ScreenDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "screen"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(ScreenDistanceSpec, self).__set__(obj, value)
class DataDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "data"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DataDistanceSpec, self).__set__(obj, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor._values)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
|
bsd-3-clause
| 8,806,869,091,572,428,000 | 7,611,781,370,170,114,000 | 33.0808 | 137 | 0.595596 | false |
40223234/2015cdb_g1_0134
|
static/Brython3.1.1-20150328-091302/Lib/test/test_re.py
|
718
|
56009
|
# FIXME: brython: implement test.support
#from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
# cpython_only
verbose = True
# FIXME: brython: Not used in this module ?
#import io
import re
# FIXME: brython: implement re.Scanner
#from re import Scanner
import sre_constants
import sys
import string
import traceback
# FIXME: brython: implement _weakref
#from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
# FIXME: brython: implement test.support
# def test_keep_buffer(self):
# # See bug 14212
# b = bytearray(b'x')
# it = re.finditer(b'a', b)
# with self.assertRaises(BufferError):
# b.extend(b'x'*400)
# list(it)
# del it
# gc_collect()
# b.extend(b'x'*400)
# FIXME: brython: implement _weakref
# def test_weakref(self):
# s = 'QabbbcR'
# x = re.compile('ab+c')
# y = proxy(x)
# self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
# New valid/invalid identifiers in Python 3
re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.assertRaises(re.error, re.compile, '(?P<©>x)')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
# New valid/invalid identifiers in Python 3
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
"abc")
# There's a word boundary at the start of a string.
self.assertTrue(re.match(r"\b", "abc"))
# A non-empty string includes a non-boundary zero-length match.
self.assertTrue(re.search(r"\B", "abc"))
# There is no non-boundary match at the start of a string.
self.assertFalse(re.match(r"\B", "abc"))
# However, an empty string contains no word boundaries, and also no
# non-boundaries.
self.assertEqual(re.search(r"\B", ""), None)
# This one is questionable and different from the perlre behaviour,
# but describes current behavior.
self.assertEqual(re.search(r"\b", ""), None)
# A single word-character string has two boundaries, but no
# non-boundary gaps.
self.assertEqual(len(re.findall(r"\b", "a")), 2)
self.assertEqual(len(re.findall(r"\B", "a")), 0)
# If there are no words, there are no boundaries
self.assertEqual(len(re.findall(r"\b", " ")), 0)
self.assertEqual(len(re.findall(r"\b", " ")), 0)
# Can match around the whitespace.
self.assertEqual(len(re.findall(r"\B", " ")), 2)
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_big_codesize(self):
# Issue #1160
r = re.compile('|'.join(('%d'%x for x in range(10000))))
self.assertIsNotNone(r.match('1000'))
self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def assertMatch(self, pattern, text, match=None, span=None,
matcher=re.match):
if match is None and span is None:
# the pattern matches the whole text
match = text
span = (0, len(text))
elif match is None or span is None:
raise ValueError('If match is not None, span should be specified '
'(and vice versa).')
m = matcher(pattern, text)
self.assertTrue(m)
self.assertEqual(m.group(), match)
self.assertEqual(m.span(), span)
def test_re_escape(self):
alnum_chars = string.ascii_letters + string.digits + '_'
p = ''.join(chr(i) for i in range(256))
for c in p:
if c in alnum_chars:
self.assertEqual(re.escape(c), c)
elif c == '\x00':
self.assertEqual(re.escape(c), '\\000')
else:
self.assertEqual(re.escape(c), '\\' + c)
self.assertMatch(re.escape(c), c)
self.assertMatch(re.escape(p), p)
def test_re_escape_byte(self):
alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii')
p = bytes(range(256))
for i in p:
b = bytes([i])
if b in alnum_chars:
self.assertEqual(re.escape(b), b)
elif i == 0:
self.assertEqual(re.escape(b), b'\\000')
else:
self.assertEqual(re.escape(b), b'\\' + b)
self.assertMatch(re.escape(b), b)
self.assertMatch(re.escape(p), p)
def test_re_escape_non_ascii(self):
s = 'xxx\u2620\u2620\u2620xxx'
s_escaped = re.escape(s)
self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx')
self.assertMatch(s_escaped, s)
self.assertMatch('.%s+.' % re.escape('\u2620'), s,
'x\u2620\u2620\u2620x', (2, 7), re.search)
def test_re_escape_non_ascii_bytes(self):
b = 'y\u2620y\u2620y'.encode('utf-8')
b_escaped = re.escape(b)
self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y')
self.assertMatch(b_escaped, b)
res = re.findall(re.escape('\u2620'.encode('utf-8')), b)
self.assertEqual(len(res), 2)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"\%03o" % i, chr(i)))
self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8"))
self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z"))
if i < 0x10000:
self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\0", "\000"))
self.assertIsNotNone(re.match(r"\08", "\0008"))
self.assertIsNotNone(re.match(r"\01", "\001"))
self.assertIsNotNone(re.match(r"\018", "\0018"))
self.assertIsNotNone(re.match(r"\567", chr(0o167)))
self.assertRaises(re.error, re.match, r"\911", "")
self.assertRaises(re.error, re.match, r"\x1", "")
self.assertRaises(re.error, re.match, r"\x1z", "")
self.assertRaises(re.error, re.match, r"\u123", "")
self.assertRaises(re.error, re.match, r"\u123z", "")
self.assertRaises(re.error, re.match, r"\U0001234", "")
self.assertRaises(re.error, re.match, r"\U0001234z", "")
self.assertRaises(re.error, re.match, r"\U00110000", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i)))
if i < 0x10000:
self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e"))
self.assertRaises(re.error, re.match, r"[\911]", "")
self.assertRaises(re.error, re.match, r"[\x1z]", "")
self.assertRaises(re.error, re.match, r"[\u123z]", "")
self.assertRaises(re.error, re.match, r"[\U0001234z]", "")
self.assertRaises(re.error, re.match, r"[\U00110000]", "")
def test_sre_byte_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8"))
self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z"))
self.assertIsNotNone(re.match(br"\u", b'u'))
self.assertIsNotNone(re.match(br"\U", b'U'))
self.assertIsNotNone(re.match(br"\0", b"\000"))
self.assertIsNotNone(re.match(br"\08", b"\0008"))
self.assertIsNotNone(re.match(br"\01", b"\001"))
self.assertIsNotNone(re.match(br"\018", b"\0018"))
self.assertIsNotNone(re.match(br"\567", bytes([0o167])))
self.assertRaises(re.error, re.match, br"\911", b"")
self.assertRaises(re.error, re.match, br"\x1", b"")
self.assertRaises(re.error, re.match, br"\x1z", b"")
def test_sre_byte_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match(br"[\u]", b'u'))
self.assertIsNotNone(re.match(br"[\U]", b'U'))
self.assertRaises(re.error, re.match, br"[\911]", "")
self.assertRaises(re.error, re.match, br"[\x1z]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_unlimited_zero_width_repeat(self):
# Issue #9669
self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
# def test_scanner(self):
# def s_ident(scanner, token): return token
# def s_operator(scanner, token): return "op%s" % token
# def s_float(scanner, token): return float(token)
# def s_int(scanner, token): return int(token)
#
# scanner = Scanner([
# (r"[a-zA-Z_]\w*", s_ident),
# (r"\d+\.\d*", s_float),
# (r"\d+", s_int),
# (r"=|\+|-|\*|/", s_operator),
# (r"\s+", None),
# ])
#
# self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
#
# self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
# (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
# 'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def test_search_dot_unicode(self):
self.assertIsNotNone(re.search("123.*-", '123abc-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9-'))
self.assertIsNotNone(re.search("123.*-", '123\u20ac-'))
self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-'))
def test_compile(self):
# Test return value when given string and pattern as parameter
pattern = re.compile('random pattern')
self.assertIsInstance(pattern, re._pattern_type)
same_pattern = re.compile(pattern)
self.assertIsInstance(same_pattern, re._pattern_type)
self.assertIs(same_pattern, pattern)
# Test behaviour when not given a string or pattern as parameter
self.assertRaises(TypeError, re.compile, 0)
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
['A', 'B', '\b', 'C', 'Z'])
# FIXME: brython: implement test.support
# @bigmemtest(size=_2G, memuse=1)
# def test_large_search(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# m = re.search('$', s)
# self.assertIsNotNone(m)
# self.assertEqual(m.start(), size)
# self.assertEqual(m.end(), size)
# FIXME: brython: implement test.support
# The huge memuse is because of re.sub() using a list and a join()
# to create the replacement result.
# @bigmemtest(size=_2G, memuse=16 + 2)
# def test_large_subn(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# r, n = re.subn('', '', s)
# self.assertEqual(r, s)
# self.assertEqual(n, size + 1)
def test_bug_16688(self):
# Issue 16688: Backreferences make case-insensitive regex fail on
# non-ASCII strings.
self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a'])
self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2))
def test_repeat_minmax_overflow(self):
# Issue #13169
string = "x" * 100000
self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
# 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
# FIXME: brython: implement test.support
# @cpython_only
# def test_repeat_minmax_overflow_maxrepeat(self):
# try:
# from _sre import MAXREPEAT
# except ImportError:
# self.skipTest('requires _sre.MAXREPEAT constant')
# string = "x" * 100000
# self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
# self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
# (0, 100000))
# self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
# self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
def test_backref_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '<foo>'):
re.compile('(?P=<foo>)')
def test_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '\?foo'):
re.compile('(?P<?foo>)')
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
# FIXME: brython: implement test.support
# run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
|
gpl-3.0
| -1,818,957,002,528,281,600 | -9,076,075,710,563,269,000 | 45.93283 | 86 | 0.508668 | false |
hthompson6/contrail-controller
|
src/config/utils/service-instance.py
|
9
|
9286
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import errno
import pprint
import subprocess
import time
import argparse
sys.path.insert(0, os.path.realpath('/usr/lib/python2.7/site-packages'))
sys.path.insert(
0,
os.path.realpath('/usr/lib/python2.7/site-packages/vnc_cfg_api_server/'))
from vnc_api.vnc_api import *
from vnc_api.common import exceptions as vnc_exceptions
import vnc_cfg_api_server
from svc_monitor import svc_monitor
from novaclient import client as nc
from novaclient import exceptions as nc_exc
class ServiceInstanceCmd(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
self._proj_fq_name = [self._args.domain_name, self._args.proj_name]
self._si_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.instance_name]
self._st_fq_name = [self._args.domain_name, self._args.template_name]
self._domain_fq_name = [self._args.domain_name]
if self._args.left_vn:
self._left_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.left_vn]
if self._args.right_vn:
self._right_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.right_vn]
if self._args.mgmt_vn:
self._mgmt_vn_fq_name = [self._args.domain_name,
self._args.proj_name,
self._args.mgmt_vn]
self._novaclient_init()
self._vnc_lib = VncApi('u', 'p',
api_server_host=self._args.api_server_ip,
api_server_port=self._args.api_server_port)
# end __init__
def _parse_args(self, args_str):
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
global_defaults = {
'domain_name': 'default-domain',
'template_name': None,
'instance_name': None,
'proj_name': 'demo',
'mgmt_vn': None,
'left_vn': None,
'right_vn': None,
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
}
if not args.conf_file:
args.conf_file = '/etc/contrail/contrail-svc-monitor.conf'
config = ConfigParser.SafeConfigParser()
ret = config.read([args.conf_file])
if args.conf_file not in ret:
print "Error: Unable to read the config file %s" % args.conf_file
sys.exit(-1)
global_defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**global_defaults)
subparsers = parser.add_subparsers()
create_parser = subparsers.add_parser('add')
create_parser.add_argument(
"instance_name", help="service instance name")
create_parser.add_argument(
"template_name", help="service template name")
create_parser.add_argument(
"--proj_name", help="name of project [default: demo]")
create_parser.add_argument(
"--mgmt_vn", help="name of management vn [default: none]")
create_parser.add_argument(
"--left_vn", help="name of left vn [default: none]")
create_parser.add_argument(
"--right_vn", help="name of right vn [default: none]")
create_parser.add_argument("--max_instances", type=int, default=1,
help="max instances to launch [default: 1]")
create_parser.add_argument(
"--auto_scale", action="store_true", default=False,
help="enable auto-scale from 1 to max_instances")
create_parser.set_defaults(func=self.create_si)
delete_parser = subparsers.add_parser('del')
delete_parser.add_argument(
"instance_name", help="service instance name")
delete_parser.add_argument(
"template_name", help="service instance name")
delete_parser.add_argument(
"--proj_name", help="name of project [default: demo]")
delete_parser.set_defaults(func=self.delete_si)
list_parser = subparsers.add_parser('list')
list_parser.set_defaults(func=self.list_si)
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def _novaclient_init(self):
self._nova = nc.Client(
'2', username='admin',
project_id=self._args.proj_name, api_key='contrail123',
auth_url='http://' + self._args.ifmap_server_ip + ':5000/v2.0')
# end _novaclient_init
# create service instance
def create_si(self):
# get service template
try:
st_obj = self._vnc_lib.service_template_read(
fq_name=self._st_fq_name)
st_prop = st_obj.get_service_template_properties()
if st_prop is None:
print "Error: Service template %s properties not found"\
% (self._args.template_name)
return
except NoIdError:
print "Error: Service template %s not found"\
% (self._args.template_name)
return
if st_prop.get_image_name():
# check if image exists
try:
self._nova.images.find(name=st_prop.get_image_name())
except nc_exc.NotFound:
print "Error: Image %s not found" % (st_prop.get_image_name())
return
# check if passed VNs exist
if self._args.left_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._left_vn_fq_name)
except NoIdError:
print "Error: Left VN %s not found" % (self._left_vn_fq_name)
return
if self._args.right_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._right_vn_fq_name)
except NoIdError:
print "Error: Right VN %s not found" % (self._right_vn_fq_name)
return
if self._args.mgmt_vn:
try:
self._vnc_lib.virtual_network_read(
fq_name=self._mgmt_vn_fq_name)
except NoIdError:
print "Error: Management VN %s not found" % (self._mgmt_vn_fq_name)
return
else:
self._mgmt_vn_fq_name = []
# create si
print "Creating service instance %s" % (self._args.instance_name)
project = self._vnc_lib.project_read(fq_name=self._proj_fq_name)
try:
si_obj = self._vnc_lib.service_instance_read(
fq_name=self._si_fq_name)
si_uuid = si_obj.uuid
except NoIdError:
si_obj = ServiceInstance(
self._args.instance_name, parent_obj=project)
si_uuid = self._vnc_lib.service_instance_create(si_obj)
si_prop = ServiceInstanceType(
left_virtual_network=':'.join(self._left_vn_fq_name),
management_virtual_network=':'.join(self._mgmt_vn_fq_name),
right_virtual_network=':'.join(self._right_vn_fq_name))
# set scale out
scale_out = ServiceScaleOutType(
max_instances=self._args.max_instances,
auto_scale=self._args.auto_scale)
si_prop.set_scale_out(scale_out)
si_obj.set_service_instance_properties(si_prop)
st_obj = self._vnc_lib.service_template_read(id=st_obj.uuid)
si_obj.set_service_template(st_obj)
self._vnc_lib.service_instance_update(si_obj)
return si_uuid
# end create_si
def delete_si(self):
try:
print "Deleting service instance %s" % (self._args.instance_name)
self._vnc_lib.service_instance_delete(fq_name=self._si_fq_name)
except NoIdError:
return
# delete_si
def list_si(self):
print "List service instances"
instances = self._vnc_lib.service_instances_list()
pprint.pprint(instances)
# list_si
# end class ServiceInstanceCmd
def main(args_str=None):
si = ServiceInstanceCmd(args_str)
si._args.func()
# end main
if __name__ == "__main__":
main()
|
apache-2.0
| -4,038,587,116,984,986,600 | -3,976,097,807,307,613,700 | 35.849206 | 83 | 0.556967 | false |
sguotciq/django-test
|
mysite/mysite/urls.py
|
1
|
1262
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
urlpatterns = [
# Examples:
url(r'^$', 'learn.views.index', name='default'),# homepage
url(r'^home/$', 'learn.views.home', name='home'),# homepage
url(r'^add/$', 'learn.views.add', name='add'),#add test
url(r'^add/(\d+)/(\d+)/$', 'learn.views.add2', name='add2'),#add test2
url(r'^admin/', include(admin.site.urls)),
]
# urlpatterns = patterns('',
# # Examples:
# url(r'^$', 'learn.views.index', name='home'),# Notice this line
# # url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
# )
|
gpl-2.0
| 5,675,516,515,841,849,000 | 2,734,465,121,029,617,000 | 36.147059 | 77 | 0.644216 | false |
jamespacileo/django-france
|
tests/regressiontests/forms/tests/formsets.py
|
50
|
41032
|
# -*- coding: utf-8 -*-
from django.forms import Form, CharField, IntegerField, ValidationError, DateField
from django.forms.formsets import formset_factory, BaseFormSet
from django.utils.unittest import TestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
# FormSet allows us to use multiple instance of the same form on 1 page. For now,
# the best way to create a FormSet is by using the formset_factory function.
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError("Clean method called")
# Let's define a FormSet that takes a list of favorite drinks, but raises an
# error if there are any duplicates. Used in ``test_clean_hook``,
# ``test_regression_6926`` & ``test_regression_12878``.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm,
formset=BaseFavoriteDrinksFormSet, extra=3)
class FormsFormsetTestCase(TestCase):
def test_basic_formset(self):
# A FormSet constructor takes the same arguments as Form. Let's create a FormSet
# for adding data. By default, it displays 1 blank form. It can display more,
# but we'll look at how to do so later.
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertEqual(str(formset), """<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" /></td></tr>
<tr><th>Votes:</th><td><input type="text" name="choices-0-votes" /></td></tr>""")
# On thing to note is that there needs to be a special value in the data. This
# value tells the FormSet how many forms were displayed so it can tell how
# many forms it needs to clean and validate. You could use javascript to create
# new forms on the client side, but they won't get validated unless you increment
# the TOTAL_FORMS field appropriately.
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
# We treat FormSet pretty much like we would treat a normal Form. FormSet has an
# is_valid method, and a cleaned_data or errors attribute depending on whether all
# the forms passed validation. However, unlike a Form instance, cleaned_data and
# errors will be a list of dicts rather than just a single dict.
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}])
# If a FormSet was not passed any data, its is_valid method should return False.
formset = ChoiceFormSet()
self.assertFalse(formset.is_valid())
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': [u'This field is required.']}])
def test_formset_initial_data(self):
# We can also prefill a FormSet with existing data by providing an ``initial``
# argument to the constructor. ``initial`` should be a list of dicts. By default,
# an extra blank form is included.
initial = [{'choice': u'Calexico', 'votes': 100}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>""")
# Let's simulate what would happen if we submitted this form.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}, {}])
def test_second_form_partially_filled(self):
# But the second form was blank! Shouldn't we get some errors? No. If we display
# a form as blank, it's ok for it to be submitted as blank. If we fill out even
# one of the fields of a blank form though, it will be validated. We may want to
# required that at least x number of forms are completed, but we'll show how to
# handle that later.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': [u'This field is required.']}])
def test_delete_prefilled_data(self):
# If we delete data that was pre-filled, we should get an error. Simply removing
# data from form fields isn't the proper way to delete it. We'll see how to
# handle that case later.
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '1', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '', # deleted value
'choices-0-votes': '', # deleted value
'choices-1-choice': '',
'choices-1-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': [u'This field is required.'], 'choice': [u'This field is required.']}, {}])
def test_displaying_more_than_one_blank_form(self):
# Displaying more than 1 blank form ###########################################
# We can also display more than 1 empty form at a time. To do so, pass a
# extra argument to formset_factory.
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" /></li>
<li>Votes: <input type="text" name="choices-0-votes" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>""")
# Since we displayed every form as blank, we will also accept them back as blank.
# This may seem a little strange, but later we will show how to require a minimum
# number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_single_form_completed(self):
# We can just fill out one of the forms.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': u'Calexico'}, {}, {}])
def test_second_form_partially_filled_2(self):
# And once again, if we try to partially complete a form, validation will fail.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': [u'This field is required.']}, {}])
def test_more_initial_data(self):
# The extra argument also works when the formset is pre-filled with initial
# data.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
initial = [{'choice': u'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Choice: <input type="text" name="choices-1-choice" /></li>
<li>Votes: <input type="text" name="choices-1-votes" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="text" name="choices-3-votes" /></li>""")
# Make sure retrieving an empty form works, and it shows up in the form list
self.assertTrue(formset.empty_form.empty_permitted)
self.assertEqual(formset.empty_form.as_ul(), """<li>Choice: <input type="text" name="choices-__prefix__-choice" /></li>
<li>Votes: <input type="text" name="choices-__prefix__-votes" /></li>""")
def test_formset_with_deletion(self):
# FormSets with deletion ######################################################
# We can easily add deletion ability to a FormSet with an argument to
# formset_factory. This will add a boolean field to each form instance. When
# that boolean field is True, the form will be in formset.deleted_forms
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': u'Calexico', 'votes': 100}, {'choice': u'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>""")
# To delete something, we just need to set that form's special delete field to
# 'on'. Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'DELETE': False, 'choice': u'Calexico'}, {'votes': 900, 'DELETE': True, 'choice': u'Fergie'}, {}])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'choice': u'Fergie'}])
# If we fill a form with something and then we check the can_delete checkbox for
# that form, that form's errors should not make the entire formset invalid since
# it's going to be deleted.
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If we remove the deletion flag now we will have our validation back.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
# Should be able to get deleted_forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(
form=Person,
can_delete=True)
p = PeopleForm(
{'form-0-name': u'', 'form-0-DELETE': u'on', # no name!
'form-TOTAL_FORMS': 1, 'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1})
self.assertTrue(p.is_valid())
self.assertEqual(len(p.deleted_forms), 1)
def test_formsets_with_ordering(self):
# FormSets with ordering ######################################################
# We can also add ordering ability to a FormSet with an argument to
# formset_factory. This will add a integer field to each form instance. When
# form validation succeeds, [form.cleaned_data for form in formset.forms] will have the data in the correct
# order specified by the ordering fields. If a number is duplicated in the set
# of ordering fields, for instance form 0 and form 3 are both marked as 1, then
# the form index used as a secondary ordering criteria. In order to put
# something at the front of the list, you'd need to set it's order to 0.
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': u'Calexico', 'votes': 100}, {'choice': u'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="text" name="choices-0-ORDER" value="1" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="text" name="choices-1-ORDER" value="2" /></li>
<li>Choice: <input type="text" name="choices-2-choice" /></li>
<li>Votes: <input type="text" name="choices-2-votes" /></li>
<li>Order: <input type="text" name="choices-2-ORDER" /></li>""")
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'ORDER': 0, 'choice': u'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': u'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': u'Fergie'},
])
def test_empty_ordered_fields(self):
# Ordering fields are allowed to be left blank, and if they *are* left blank,
# they will be sorted below everything else.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 100, 'ORDER': 1, 'choice': u'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': u'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': u'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': u'Basia Bulat'},
])
def test_ordering_blank_fieldsets(self):
# Ordering should work with blank fieldsets.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [])
def test_formset_with_ordering_and_deletion(self):
# FormSets with ordering + deletion ###########################################
# Let's try throwing ordering and deletion into the same form.
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': u'Calexico', 'votes': 100},
{'choice': u'Fergie', 'votes': 900},
{'choice': u'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
form_output = []
for form in formset.forms:
form_output.append(form.as_ul())
self.assertEqual('\n'.join(form_output), """<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>
<li>Order: <input type="text" name="choices-0-ORDER" value="1" /></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE" /></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie" /></li>
<li>Votes: <input type="text" name="choices-1-votes" value="900" /></li>
<li>Order: <input type="text" name="choices-1-ORDER" value="2" /></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE" /></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists" /></li>
<li>Votes: <input type="text" name="choices-2-votes" value="500" /></li>
<li>Order: <input type="text" name="choices-2-ORDER" value="3" /></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE" /></li>
<li>Choice: <input type="text" name="choices-3-choice" /></li>
<li>Votes: <input type="text" name="choices-3-votes" /></li>
<li>Order: <input type="text" name="choices-3-ORDER" /></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE" /></li>""")
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
form_output = []
for form in formset.ordered_forms:
form_output.append(form.cleaned_data)
self.assertEqual(form_output, [
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': u'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': u'Calexico'},
])
self.assertEqual([form.cleaned_data for form in formset.deleted_forms], [{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': u'Fergie'}])
def test_invalid_deleted_form_with_ordering(self):
# Should be able to get ordered forms from a valid formset even if a
# deleted form would have been invalid.
class Person(Form):
name = CharField()
PeopleForm = formset_factory(form=Person, can_delete=True, can_order=True)
p = PeopleForm({
'form-0-name': u'',
'form-0-DELETE': u'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(p.is_valid())
self.assertEqual(p.ordered_forms, [])
def test_clean_hook(self):
# FormSet clean hook ##########################################################
# FormSets have a hook for doing extra validation that shouldn't be tied to any
# particular form. It follows the same pattern as the clean hook on Forms.
# We start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# Make sure we didn't break the valid case.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Bloody Mary',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
# Limiting the maximum number of forms ########################################
# Base case for max_num.
# When not passed, max_num will take its default value of None, i.e. unlimited
# number of forms, only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th><td><input type="text" name="form-2-name" id="id_form-2-name" /></td></tr>""")
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
# Ensure that max_num has no effect when extra is less than max_num.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" id="id_form-0-name" /></td></tr>""")
def test_max_num_with_initial_data(self):
# max_num with initial data
# When not passed, max_num will take its default value of None, i.e. unlimited
# number of forms, only controlled by the values of the initial and extra
# parameters.
initial = [
{'name': 'Fernet and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_max_num_zero(self):
# If max_num is 0 then no form is rendered at all, even if extra and initial
# are specified.
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), "")
def test_more_initial_than_max_num(self):
# More initial forms than max_num will result in only the first max_num of
# them to be displayed with no extra forms.
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" value="Bloody Mary" id="id_form-1-name" /></td></tr>""")
# One form from initial and extra=3 with max_num=2 should result in the one
# initial form and one extra.
initial = [
{'name': 'Gin Tonic'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
form_output = []
for form in formset.forms:
form_output.append(str(form))
self.assertEqual('\n'.join(form_output), """<tr><th><label for="id_form-0-name">Name:</label></th><td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name" /></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th><td><input type="text" name="form-1-name" id="id_form-1-name" /></td></tr>""")
def test_regression_6926(self):
# Regression test for #6926 ##################################################
# Make sure the management form has the correct prefix.
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_regression_12878(self):
# Regression test for #12878 #################################################
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [u'You may only specify a drink once.'])
def test_formset_iteration(self):
# Regression tests for #16455 -- formset instances are iterable
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# confirm iterated formset yields formset.forms
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# confirm indexing of formset
self.assertEqual(formset[0], forms[0])
try:
formset[3]
self.fail('Requesting an invalid formset index should raise an exception')
except IndexError:
pass
# Formets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
for form in reversed(self.forms):
yield form
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# confirm that __iter__ modifies rendering order
# compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
data = {
'choices-TOTAL_FORMS': '1', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FormsetAsFooTests(TestCase):
def test_as_table(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_table(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice" value="Calexico" /></td></tr>
<tr><th>Votes:</th><td><input type="text" name="choices-0-votes" value="100" /></td></tr>""")
def test_as_p(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_p(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<p>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></p>
<p>Votes: <input type="text" name="choices-0-votes" value="100" /></p>""")
def test_as_ul(self):
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.as_ul(),"""<input type="hidden" name="choices-TOTAL_FORMS" value="1" /><input type="hidden" name="choices-INITIAL_FORMS" value="0" /><input type="hidden" name="choices-MAX_NUM_FORMS" value="0" />
<li>Choice: <input type="text" name="choices-0-choice" value="Calexico" /></li>
<li>Votes: <input type="text" name="choices-0-votes" value="100" /></li>""")
# Regression test for #11418 #################################################
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(TestCase):
def test_no_data_raises_validation_error(self):
self.assertRaises(ValidationError, ArticleFormSet, {})
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': u'1',
'form-INITIAL_FORMS': u'0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_cought_by_formset(self):
data = {
'form-TOTAL_FORMS': u'2',
'form-INITIAL_FORMS': u'0',
'form-0-title': u'Test',
'form-0-pub_date': u'1904-06-16',
'form-1-title': u'Test',
'form-1-pub_date': u'', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': [u'This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': u'1',
'form-INITIAL_FORMS': u'0',
'form-0-title': u'Test',
'form-0-pub_date': u'1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = []
empty_forms.append(unbound_formset.empty_form)
empty_forms.append(bound_formset.empty_form)
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(TestCase):
"Test that an empty formset still calls clean()"
def test_empty_formset_is_valid(self):
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'0'},prefix="form")
formset2 = EmptyFsetWontValidateFormset(data={'form-INITIAL_FORMS':'0', 'form-TOTAL_FORMS':'1', 'form-0-name':'bah' },prefix="form")
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
|
bsd-3-clause
| 8,709,939,033,129,116,000 | 3,283,301,767,930,047,500 | 44.040615 | 231 | 0.59729 | false |
SDRC-AUV/ardusub
|
Tools/scripts/generate-manifest.py
|
22
|
11584
|
#!/usr/bin/python
from __future__ import print_function
import sys
import json
import os
import re
class Firmware():
def __init__(self, date=None, platform=None, vehicletype=None, filepath=None, git_sha=None, frame=None):
self.atts = dict()
self.atts["date"] = date
self.atts["platform"] = platform
self.atts["vehicletype"] = vehicletype
self.atts["filepath"] = filepath
self.atts["git_sha"] = git_sha
self.atts["frame"] = frame
self.atts["release-type"] = None
self.atts["firmware-version"] = None
def __getitem__(self,what):
return self.atts[what]
def __setitem__(self,name,value):
self.atts[name] = value
class ManifestGenerator():
'''Return a JSON string describing "binary" directory contents under basedir'''
def __init__(self,basedir,baseurl):
self.basedir = basedir
self.baseurl = baseurl
def frame_map(self, frame):
'''translate from ArduPilot frame type terminology into mavlink terminology'''
frame_to_mavlink_dict = {
"quad": "QUADROTOR",
"hexa": "HEXAROTOR",
"y6": "ARDUPILOT_Y6",
"tri": "TRICOPTER",
"octa": "OCTOROTOR",
"octa-quad": "ARDUPILOT_OCTAQUAD",
"heli": "HELICOPTER",
"Plane": "FIXED_WING",
"AntennaTracker": "ANTENNA_TRACKER",
"Rover": "GROUND_ROVER",
"PX4IO": "ARDUPILOT_PX4IO",
}
if frame in frame_to_mavlink_dict:
return frame_to_mavlink_dict[frame]
return frame
def releasetype_map(self, releasetype):
'''translate from ArduPilot release type terminology into mavlink terminology'''
if releasetype == 'stable': return 'OFFICIAL'
return releasetype.upper()
def looks_like_binaries_directory(self, dir):
'''returns True if dir looks like it is a build_binaries.sh output directory'''
for entry in os.listdir(dir):
if entry in {"AntennaTracker", "Copter", "Plane", "Rover"}:
return True
return False
def git_sha_from_git_version(self, filepath):
'''parses get-version.txt (as emitted by build_binaries.sh, returns git sha from it'''
content = open(filepath).read()
sha_regex = re.compile("commit (?P<sha>[0-9a-f]+)")
m = sha_regex.search(content)
if m is None:
raise Exception("filepath (%s) does not appear to contain a git sha" % (filepath,))
return m.group("sha")
def add_firmware_data_from_dir(self, dir, firmware_data, vehicletype, releasetype="dev"):
'''accumulate additional information about firmwares from a directory'''
platform_frame_regex = re.compile("(?P<board>PX4|navio|pxf)(-(?P<frame>.+))?")
variant_firmware_regex = re.compile("[^-]+-(?P<variant>v\d+)[.px4]")
for platformdir in os.listdir(dir):
some_dir = os.path.join(dir, platformdir)
try:
git_sha = self.git_sha_from_git_version(os.path.join(some_dir, "git-version.txt"))
except Exception as e:
continue
try:
firmware_version = open(os.path.join(some_dir, "firmware-version.txt")).read()
firmware_version = firmware_version.strip()
except Exception as e:
# this exception is swallowed.... the current archive
# is incomplete.
firmware_version = None
m = platform_frame_regex.match(platformdir)
if m is not None:
# the model type (quad/tri) is
# encoded in the platform name
# (e.g. navio-octa)
platform = m.group("board") # e.g. navio
frame = m.group("frame") # e.g. octa
if frame is None:
frame = vehicletype
else:
frame = vehicletype # e.g. Plane
platform = platformdir # e.g. apm2
for file in os.listdir(some_dir):
if file == "git-version.txt":
continue
if file == "firmware-version.txt":
continue
m = variant_firmware_regex.match(file)
if m:
# the platform variant is
# encoded in the firmware filename
# (e.g. the "v1" in
# ArduCopter-v1.px4)
variant = m.group("variant")
file_platform = "-".join([platform,variant])
else:
file_platform = platform
firmware_format = "".join(file.split(".")[-1:])
if not vehicletype in firmware_data:
firmware_data[vehicletype] = dict()
if not file_platform in firmware_data[vehicletype]:
firmware_data[vehicletype][file_platform] = dict()
if not git_sha in firmware_data[vehicletype][file_platform]:
firmware_data[vehicletype][file_platform][git_sha] = dict()
if not firmware_format in firmware_data[vehicletype][file_platform][git_sha]:
firmware_data[vehicletype][file_platform][git_sha][firmware_format] = dict()
if not frame in firmware_data[vehicletype][file_platform][git_sha][firmware_format]:
firmware_data[vehicletype][file_platform][git_sha][firmware_format][frame] = Firmware()
firmware = firmware_data[vehicletype][file_platform][git_sha][firmware_format][frame]
# translate from supplied "release type" into both a
# "latest" flag andan actual release type. Also sort
# out which filepath we should use:
firmware["latest" ] = 0
if releasetype == "dev":
if firmware["filepath"] is None:
firmware["filepath"] = os.path.join(some_dir, file)
if firmware["release-type"] is None:
firmware["release-type"] = "dev"
elif releasetype == "latest":
firmware["latest"] = 1
firmware["filepath"] = os.path.join(some_dir, file)
if firmware["release-type"] is None:
firmware["release-type"] = "dev"
else:
if (not firmware["latest"]):
firmware["filepath"] = os.path.join(some_dir, file)
firmware["release-type"] = releasetype
firmware["platform"] = file_platform
firmware["vehicletype"] = vehicletype
firmware["git_sha"] = git_sha
firmware["frame"] = frame
firmware["timestamp"] = os.path.getctime(firmware["filepath"])
firmware["format"] = firmware_format
firmware["firmware-version"] = firmware_version
def xfirmwares_to_firmwares(self, xfirmwares):
'''takes hash structure of firmwares, returns list of them'''
if isinstance(xfirmwares, dict):
ret = []
for value in xfirmwares.values():
o = self.xfirmwares_to_firmwares(value)
for oo in o:
ret.append(oo)
return ret
else:
return [xfirmwares]
known_release_types = {
"beta" : 1,
"latest" : 1,
"stable" : 1
}
def parse_fw_version(self, version):
(version_numbers,release_type) = version.split("-")
(major,minor,patch) = version_numbers.split(".")
return (major,minor,patch,version)
def walk_directory(self, basedir):
'''walks directory structure created by build_binaries, returns Python structure representing releases in that structure'''
year_month_regex = re.compile("(?P<year>\d{4})-(?P<month>\d{2})")
xfirmwares = dict()
# used to listdir basedir here, but since this is also a web document root, there's a lot of other stuff accumulated...
vehicletypes = [ 'AntennaTracker', 'Copter', 'Plane', 'PX4IO', 'Rover' ]
for vehicletype in vehicletypes:
vdir = os.listdir(os.path.join(basedir, vehicletype))
for firstlevel in vdir:
if year_month_regex.match(firstlevel):
# this is a dated directory e.g. binaries/Copter/2016-02
year_month_path = os.path.join(basedir, vehicletype, firstlevel)
for fulldate in os.listdir(year_month_path):
self.add_firmware_data_from_dir(os.path.join(year_month_path, fulldate), xfirmwares, vehicletype)
else:
# assume this is a release directory such as
# "beta", or the "latest" directory (treated as a
# release and handled specially later)
tag = firstlevel
if tag not in self.known_release_types:
print("Unknown tag (%s) in directory (%s)" %
(tag, vdir))
tag_path = os.path.join(basedir, vehicletype, tag)
self.add_firmware_data_from_dir(tag_path, xfirmwares, vehicletype, releasetype=tag)
firmwares = self.xfirmwares_to_firmwares(xfirmwares)
# convert from ardupilot-naming conventions to common JSON format:
firmware_json = []
for firmware in firmwares:
filepath = firmware["filepath"]
# replace the base directory with the base URL
urlifier = re.compile("^" + re.escape(basedir))
url = re.sub(urlifier, self.baseurl, filepath)
some_json = dict({
"mav-autopilot": "ARDUPILOTMEGA",
# "vehicletype": firmware["vehicletype"],
"platform": firmware["platform"],
"git-sha": firmware["git_sha"],
"url": url,
"mav-type": self.frame_map(firmware["frame"]),
"mav-firmware-version-type": self.releasetype_map(firmware["release-type"]),
"latest": firmware["latest"],
"format": firmware["format"],
})
if firmware["firmware-version"]:
(major,minor,patch,release_type) = self.parse_fw_version(firmware["firmware-version"])
some_json["mav-firmware-version"] = ".".join([major,minor,patch])
some_json["mav-firmware-version-major"] = major
some_json["mav-firmware-version-minor"] = minor
some_json["mav-firmware-version-patch"] = patch
firmware_json.append(some_json)
ret = {
"format-version": "1.0.0", # semantic versioning
"firmware": firmware_json
}
return ret
def json(self):
'''walk directory supplied in constructor, return json string'''
if not self.looks_like_binaries_directory(self.basedir):
print("Warning: this does not look like a binaries directory", file=sys.stderr)
structure = self.walk_directory(self.basedir)
return json.dumps(structure, indent=4)
def usage():
return '''Usage:
generate-manifest.py basedir baseurl'''
if __name__ == "__main__":
if len(sys.argv) != 3:
print(usage())
sys.exit(1)
generator = ManifestGenerator(sys.argv[1], sys.argv[2])
print(generator.json())
|
gpl-3.0
| -6,232,703,356,322,434,000 | -4,227,483,461,040,636,400 | 41.745387 | 131 | 0.550155 | false |
pyload/pyload
|
src/pyload/webui/app/blueprints/cnl_blueprint.py
|
1
|
5834
|
# -*- coding: utf-8 -*-
import os
from base64 import standard_b64decode
from functools import wraps
from urllib.parse import unquote
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import flask
from flask.json import jsonify
from pyload.core.api import Destination
from pyload.core.utils.convert import to_str
from pyload.core.utils.misc import eval_js
#: url_prefix here is intentional since it should not be affected py path prefix
bp = flask.Blueprint("flash", __name__, url_prefix="/")
#: decorator
def local_check(func):
@wraps(func)
def wrapper(*args, **kwargs):
remote_addr = flask.request.environ.get("REMOTE_ADDR", "0")
http_host = flask.request.environ.get("HTTP_HOST", "0")
if remote_addr in ("127.0.0.1", "::ffff:127.0.0.1", "::1", "localhost") or http_host in (
"127.0.0.1:9666",
"[::1]:9666",
):
return func(*args, **kwargs)
else:
return "Forbidden", 403
return wrapper
@bp.after_request
def add_cors(response):
response.headers.update({
'Access-Control-Max-Age': 1800,
'Access-Control-Allow-Origin': "*",
'Access-Control-Allow-Methods': "OPTIONS, GET, POST"
})
return response
@bp.route("/flash/", methods=["GET", "POST"], endpoint="index")
@bp.route("/flash/<id>", methods=["GET", "POST"], endpoint="index")
@local_check
def index(id="0"):
return "JDownloader\r\n"
@bp.route("/flash/add", methods=["POST"], endpoint="add")
@local_check
def add():
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
urls = [url for url in flask.request.form["urls"].replace(' ', '\n').split("\n") if url.strip()]
if not urls:
return jsonify(False)
api = flask.current_app.config["PYLOAD_API"]
try:
if package:
api.add_package(package, urls, Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.COLLECTOR)
except Exception as e:
return "failed " + e.args[0] + "\r\n"
return "success\r\n"
@bp.route("/flash/addcrypted", methods=["POST"], endpoint="addcrypted")
@local_check
def addcrypted():
api = flask.current_app.config["PYLOAD_API"]
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
dl_path = api.get_config_value("general", "storage_folder")
dlc_path = os.path.join(
dl_path, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc"
)
dlc = flask.request.form["crypted"].replace(" ", "+")
with open(dlc_path, mode="wb") as fp:
fp.write(dlc)
try:
api.add_package(package, [dlc_path], Destination.COLLECTOR)
except Exception:
flask.abort(500)
else:
return "success\r\n"
@bp.route("/flash/addcrypted2", methods=["POST"], endpoint="addcrypted2")
@local_check
def addcrypted2():
package = flask.request.form.get(
"package", flask.request.form.get("source", flask.request.form.get("referer"))
)
crypted = flask.request.form["crypted"]
jk = flask.request.form["jk"]
crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
jk = eval_js(f"{jk} f()")
try:
IV = key = bytes.fromhex(jk)
except Exception:
return "Could not decrypt key", 500
cipher = Cipher(
algorithms.AES(key), modes.CBC(IV), backend=default_backend()
)
decryptor = cipher.decryptor()
decrypted = decryptor.update(crypted) + decryptor.finalize()
urls = to_str(decrypted).replace("\x00", "").replace("\r", "").split("\n")
urls = [url for url in urls if url.strip()]
api = flask.current_app.config["PYLOAD_API"]
try:
if package:
api.add_package(package, urls, Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.COLLECTOR)
except Exception:
return "failed can't add", 500
else:
return "success\r\n"
@bp.route("/flashgot", methods=["POST"], endpoint="flashgot")
@bp.route("/flashgot_pyload", methods=["POST"], endpoint="flashgot")
@local_check
def flashgot():
if flask.request.referrer not in (
"http://localhost:9666/flashgot",
"http://127.0.0.1:9666/flashgot",
):
flask.abort(500)
package = flask.request.form.get("package")
urls = [url for url in flask.request.form["urls"].split("\n") if url.strip()]
# folder = flask.request.form.get('dir', None)
autostart = int(flask.request.form.get("autostart", 0))
api = flask.current_app.config["PYLOAD_API"]
if package:
api.add_package(package, urls, Destination.QUEUE if autostart else Destination.COLLECTOR)
else:
api.generate_and_add_packages(urls, Destination.QUEUE if autostart else Destination.COLLECTOR)
@bp.route("/crossdomain.xml", endpoint="crossdomain")
@local_check
def crossdomain():
rep = '<?xml version="1.0"?>\n'
rep += '<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">\n'
rep += "<cross-domain-policy>\n"
rep += '<allow-access-from domain="*" />\n'
rep += "</cross-domain-policy>"
return rep
@bp.route("/flash/checkSupportForUrl", methods=["POST"], endpoint="checksupport")
@local_check
def checksupport():
api = flask.current_app.config["PYLOAD_API"]
url = flask.request.form["url"]
res = api.check_urls([url])
supported = not res[0][1] is None
return str(supported).lower()
@bp.route("/jdcheck.js", endpoint="jdcheck")
@local_check
def jdcheck():
rep = "jdownloader=true;\r\n"
rep += "var version='42707';\r\n"
return rep
|
agpl-3.0
| -9,161,673,231,501,976,000 | 4,661,658,426,362,718,000 | 29.705263 | 113 | 0.632842 | false |
bckwltn/SickRage
|
lib/requests/packages/chardet/charsetgroupprober.py
|
2929
|
3791
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
|
gpl-3.0
| 3,977,909,801,347,276,300 | -9,151,837,936,824,649,000 | 34.764151 | 69 | 0.56766 | false |
drinkssu/YourVoiceAlarmBackend
|
lib/flask/wrappers.py
|
773
|
6709
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from .debughelpers import attach_enctype_error_multidict
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: the internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
#: if matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# switched by the request context until 1.0 to opt in deprecated
# module functionality
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the `MAX_CONTENT_LENGTH` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be `None`.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
:param force: if set to `True` the mimetype is ignored.
:param silent: if set to `False` this method will fail silently
and return `False`.
:param cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# in debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
|
apache-2.0
| -3,108,885,485,467,455,500 | -1,187,158,978,117,683,000 | 35.461957 | 79 | 0.631987 | false |
gabrielaraujof/beets
|
beetsplug/permissions.py
|
24
|
3104
|
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
"""
# Getting the config.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(lib.directory,
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = 'There was a problem setting permission on {}'.format(
path)
print(message)
|
mit
| -1,700,824,919,776,124,200 | -1,357,027,019,029,233,400 | 29.732673 | 78 | 0.627255 | false |
thaim/ansible
|
test/units/modules/source_control/test_bitbucket_access_key.py
|
37
|
13671
|
from ansible.module_utils.source_control.bitbucket import BitbucketHelper
from ansible.modules.source_control.bitbucket import bitbucket_access_key
from units.compat import unittest
from units.compat.mock import patch
from units.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
class TestBucketAccessKeyModule(ModuleTestCase):
def setUp(self):
super(TestBucketAccessKeyModule, self).setUp()
self.module = bitbucket_access_key
def test_missing_key_with_present_state(self):
with self.assertRaises(AnsibleFailJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'key name',
'state': 'present',
})
self.module.main()
self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key'])
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_create_deploy_key(self, *args):
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'public_key',
'label': 'key name',
'state': 'present',
})
self.module.main()
self.assertEqual(create_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_create_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'public_key',
'label': 'key name',
'state': 'present',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_update_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 1)
self.assertEqual(create_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "new public key",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_dont_update_same_value(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_update_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'key': 'new public key',
'label': 'mykey',
'state': 'present',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(create_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_delete_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
def test_delete_absent_deploy_key(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
"id": 123,
"label": "mykey",
"created_on": "2019-03-23T10:15:21.517377+00:00",
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
"type": "deploy_key",
"comment": "",
"last_used": None,
"repository": {
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
},
"html": {
"href": "https://bitbucket.org/mleu/test"
},
"avatar": {
"href": "..."
}
},
"type": "repository",
"name": "test",
"full_name": "mleu/test",
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
},
"links": {
"self": {
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
}
},
})
def test_delete_deploy_key_check_mode(self, *args):
with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'label': 'mykey',
'state': 'absent',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(delete_deploy_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
if __name__ == '__main__':
unittest.main()
|
mit
| -8,804,652,220,153,268,000 | 2,698,367,248,150,873,000 | 39.566766 | 104 | 0.487601 | false |
wangjun/wakatime
|
wakatime/packages/requests/packages/chardet/universaldetector.py
|
1776
|
6840
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
bsd-3-clause
| 7,930,054,497,926,250,000 | -2,441,314,375,330,718,000 | 39.235294 | 86 | 0.556579 | false |
SoundGoof/NIPAP
|
tests/nipapbase.py
|
7
|
39082
|
#!/usr/bin/env python
# vim: et :
import logging
import unittest
import sys
sys.path.insert(0, '../nipap/')
from nipap.backend import Nipap, NipapError, NipapInputError, NipapMissingInputError, NipapExtraneousInputError, NipapValueError
from nipap.authlib import SqliteAuth
from nipap.nipapconfig import NipapConfig
class NipapTest(unittest.TestCase):
""" Tests the NIPAP class
"""
nipap = None
def setUp(self):
""" Better start from a clean slate!
"""
cfg = NipapConfig('/etc/nipap/nipap.conf')
self.nipap = Nipap()
# create dummy auth object
# As the authentication is performed before the query hits the Nipap
# class, it does not matter what user we use here
self.auth = SqliteAuth('local', 'unittest', 'unittest', 'unittest')
self.auth.authenticated_as = 'unittest'
self.auth.full_name = 'Unit test'
self.nipap._execute("TRUNCATE ip_net_plan, ip_net_pool, ip_net_vrf, ip_net_log, ip_net_asn")
self.schema_attrs = {
'name': 'test-schema1',
'description': 'Test schema numero uno!'
}
self.schema_attrs['id'] = self.nipap.add_schema(self.auth, self.schema_attrs)
self.schema_attrs2 = {
'name': 'test-schema2',
'description': 'Test schema numero dos!'
}
self.schema_attrs2['id'] = self.nipap.add_schema(self.auth, self.schema_attrs2)
self.pool_attrs = {
'name': 'test-pool1',
'description': 'Test pool numero uno!',
'default_type': 'assignment',
'ipv4_default_prefix_length': 30,
'ipv6_default_prefix_length': 112
}
self.pool_attrs['id'] = self.nipap.add_pool(self.auth, {'id': self.schema_attrs['id']}, self.pool_attrs)
self.prefix_attrs1 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.3.0/24',
'type': 'assignment',
'description': ''
}
self.prefix_attrs1['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs1)
self.prefix_attrs = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.3.1/32',
'type': 'host',
'description': 'Test prefix numero uno!'
}
self.prefix_attrs['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs)
self.prefix_attrs2 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.2.0/23',
'type': 'reservation',
'description': ''
}
self.prefix_attrs2['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs2)
self.prefix_attrs3 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.0.0/16',
'type': 'reservation',
'description': ''
}
self.prefix_attrs3['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix_attrs3)
self.prefix_attrs4 = {
'authoritative_source': 'nipaptest',
'prefix': '1.3.0.0/17',
'type': 'reservation',
'description': ''
}
self.prefix_attrs4['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs2['id']}, self.prefix_attrs4)
self.prefix6_attrs1 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::/112',
'type': 'assignment',
'description': ''
}
self.prefix6_attrs1['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs1)
self.prefix6_attrs = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::1/128',
'type': 'host',
'description': 'Test prefix numero uno!'
}
self.prefix6_attrs['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs)
self.prefix6_attrs2 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:3::/64',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs2['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs2)
self.prefix6_attrs3 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:0::/48',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs3['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs['id']}, self.prefix6_attrs3)
self.prefix6_attrs4 = {
'authoritative_source': 'nipaptest',
'prefix': '2001:0db8:3:0::/56',
'type': 'reservation',
'description': ''
}
self.prefix6_attrs4['id'] = self.nipap.add_prefix(self.auth, {'id': self.schema_attrs2['id']}, self.prefix6_attrs4)
def test_schema_basic(self):
""" Basic schema test
1. Add a new schema
2. List with filters to get newly created schema
3. Verify listed schema coincides with input args for added schema
4. Remove schema
"""
attrs = {
'name': 'test-schema-wrong',
'description': 'A simple test schema with incorrect name!'
}
attrs['id'] = self.nipap.add_schema(self.auth, attrs)
schema = self.nipap.list_schema(self.auth, { 'id': attrs['id'] })
for a in attrs:
self.assertEqual(schema[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_schema_add_crap_input(self):
""" Try to input junk into add_schema and expect error
"""
attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!',
'crap': 'this is just some crap'
}
# missing everything
self.assertRaises(NipapMissingInputError, self.nipap.add_schema, self.auth, { })
# missing description
self.assertRaises(NipapMissingInputError, self.nipap.add_schema, self.auth, { 'name': 'crapson' })
# have required and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap.add_schema, self.auth, attrs)
def test_expand_schema_spec(self):
""" Test the expand_schema_spec()
The _expand_schema_spec() function is used throughout the schema
functions to expand the schema specification input and so we test
the separately.
"""
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, 'string')
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, 1)
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_schema_spec, [])
# missing keys
self.assertRaises(NipapMissingInputError, self.nipap._expand_schema_spec, { })
# crap key
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'crap': self.schema_attrs['name'] })
# required keys and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'name': self.schema_attrs['name'], 'crap': 'crap' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_schema_spec, { 'id': '3' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_schema_spec, { 'name': 3 })
# both id and name
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_schema_spec, { 'id': 3, 'name': '3' })
# proper key - id
where, params = self.nipap._expand_schema_spec({ 'id': 3 })
self.assertEqual(where, 'id = %(spec_id)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_id': 3}, "Improperly expanded params dict")
# proper spec - name
where, params = self.nipap._expand_schema_spec({ 'name': 'test' })
def test_schema_edit_crap_input(self):
""" Try to input junk into edit_schema and expect error
"""
attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!'
}
crap_attrs = {
'name': 'test-schema-crap',
'description': 'A simple test schema with incorrect name!',
'crap': 'this is just some crap'
}
# spec is tested elsewhere, just test attrs part
self.assertRaises(NipapExtraneousInputError, self.nipap.edit_schema, self.auth, { 'name': self.schema_attrs['name'] }, crap_attrs)
def test_schema_list_crap_input(self):
""" Try to input junk into list_schema and expect error
"""
# TODO: what do we really expect?
self.assertRaises(NipapExtraneousInputError, self.nipap.list_schema, self.auth, { 'crap': 'crap crap' })
def test_schema_dupe(self):
""" Check so we can't create duplicate schemas
There are unique indices in the database that should prevent us
from creating duplicate schema (ie, with the same name).
"""
schema_attrs = {
'name': 'test-schema-dupe',
'description': 'Testing dupe'
}
self.nipap.add_schema(self.auth, schema_attrs)
self.assertRaises(NipapDuplicateError, self.nipap.add_schema, self.auth, schema_attrs)
def test_schema_rename(self):
""" Rename a schema
Uses the edit_schema() functionality to rename our previously
created and incorrectly named schema so it hereafter has the
correct name. Also tests the list_schema() functionality since we
use that to list the modified schema.
"""
spec = { 'name': 'test-schema1' }
attrs = {
'name': 'test-schema',
'description': 'A simple test schema with correct name!'
}
self.nipap.edit_schema(self.auth, spec, attrs)
# check that search for old record doesn't return anything
schema = self.nipap.list_schema(self.auth, spec)
self.assertEqual(schema, [], 'Old entry still exists')
schema = self.nipap.list_schema(self.auth, { 'name': 'test-schema' })
for a in attrs:
self.assertEqual(schema[0][a], attrs[a], 'Modified schema differ from listed on attribute: ' + a)
def test_schema_remove(self):
""" Remove a schema
Remove the schema previously modified and make sure it's not there.
"""
spec = { 'name': 'test-schema' }
self.nipap.remove_schema(self.auth, spec)
# check that search for old record doesn't return anything
schema = self.nipap.list_schema(self.auth, spec)
self.assertEqual(schema, [], 'Old entry still exists')
def test_expand_pool_spec(self):
""" Test the function which expands pool spec to SQL.
"""
schema = {'id': self.schema_attrs['id']}
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, 'string')
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, 1)
# wrong type
self.assertRaises(NipapInputError, self.nipap._expand_pool_spec, [])
# missing keys
self.assertRaises(NipapMissingInputError, self.nipap._expand_pool_spec, { })
# crap key
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'crap': self.pool_attrs['name'] })
# required keys and extra crap
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'id': self.pool_attrs['id'], 'schema': self.schema_attrs['id'], 'crap': 'crap' })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_pool_spec, { 'id': '3', 'schema': self.schema_attrs['id'] })
# proper key but incorrect value (int vs string)
self.assertRaises(NipapValueError, self.nipap._expand_pool_spec, { 'name': 3, 'schema': self.schema_attrs['id'] })
# both id and name
self.assertRaises(NipapExtraneousInputError, self.nipap._expand_pool_spec, { 'id': 3, 'name': '3', 'schema': self.schema_attrs['id'] })
# proper key - id
where, params = self.nipap._expand_pool_spec({ 'id': 3, 'schema': self.schema_attrs['id'] })
self.assertEqual(where, 'po.id = %(spec_id)s AND po.schema = %(spec_schema)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_id': 3, 'spec_schema': self.schema_attrs['id']}, "Improperly expanded params dict")
# proper spec - name
where, params = self.nipap._expand_pool_spec({ 'name': 'test', 'schema': self.schema_attrs['id'] })
self.assertEqual(where, 'po.name = %(spec_name)s AND po.schema = %(spec_schema)s', "Improperly expanded WHERE clause")
self.assertEqual(params, {'spec_name': 'test', 'spec_schema': self.schema_attrs['id'] }, "Improperly expanded params dict")
def test_pool_add1(self):
""" Add a pool and check it's there using list functions
Refer to schema by id
"""
attrs = {
'name': 'test-pool-wrong',
'description': 'A simple test pool with incorrect name!',
'default_type': 'reservation',
'ipv4_default_prefix_length': 30,
'ipv6_default_prefix_length': 112
}
schema = {'id': self.schema_attrs['id']}
pool_id = self.nipap.add_pool(self.auth, schema, attrs)
pool = self.nipap.list_pool(self.auth, schema, { 'id': pool_id })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: %s %s!=%s' % (a, attrs[a], pool[0][a]))
def test_pool_add2(self):
""" Add a pool and check it's there using list functions
Refer to schema by name
"""
schema = {'id': self.schema_attrs['id']}
attrs = {
'name': 'test-pool-wrong',
'default_type': 'reservation',
'description': 'A simple test pool with incorrect name!'
}
pool_id = self.nipap.add_pool(self.auth, schema, attrs)
pool = self.nipap.list_pool(self.auth, schema, { 'id': pool_id })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_edit_pool_by_name(self):
""" Try to rename a pool using edit_pool() function
Pool is not uniquely identified (empty spec) so this should raise an error
"""
schema = {'id': self.schema_attrs['id']}
spec = { }
attrs = {
'name': self.pool_attrs['name'],
'default_type': 'assignment',
'description': 'A simple test pool with correct name!'
}
self.assertRaises(NipapInputError, self.nipap.edit_pool, self.auth, schema, spec, attrs)
def test_edit_pool(self):
""" Rename a pool using edit_pool() function
"""
schema = {'id': self.schema_attrs['id']}
spec = { 'id': self.pool_attrs['id'] }
attrs = {
'name': 'test-pool',
'default_type': 'assignment',
'description': 'A simple test pool with correct name!',
'ipv4_default_prefix_length': 32,
'ipv6_default_prefix_length': 128
}
self.nipap.edit_pool(self.auth, schema, spec, attrs)
# check that search for old record doesn't return anything
pool = self.nipap.list_pool(self.auth, schema, { 'name': self.pool_attrs['name'] })
self.assertEqual(pool, [], 'Old entry still exists')
pool = self.nipap.list_pool(self.auth, schema, { 'name': attrs['name'] })
for a in attrs:
self.assertEqual(pool[0][a], attrs[a], 'Added object differ from listed on attribute: ' + a)
def test_remove_pool_by_id(self):
""" Remove a pool by id
"""
schema = {'id': self.schema_attrs['id']}
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Record must exist before we can delete it')
for a in self.pool_attrs:
self.assertEqual(pool[0][a], self.pool_attrs[a], 'Listed attribute differ from original')
# remove the pool
self.nipap.remove_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# check that search for old record doesn't return anything
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
self.assertEqual(pool, [], 'Old entry still exists')
def test_prefix_in_a_pool(self):
""" Add prefixes to a poll and list!
"""
schema = {'id': self.schema_attrs['id']}
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'1.2.2.0/32',
'1.2.2.1/32',
'1.2.2.2/32',
'1.2.2.3/32',
'1.2.2.4/32',
'1.2.2.5/32'
]
for p in pfxs:
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': p,
'type': 'host',
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# list again
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
self.assertNotEqual(pool[0], [], 'Pool must exist!')
self.assertEqual(set(pfxs), set(pool[0]['prefixes']), 'Returned prefixes do not match added ones')
def test_prefix_basic(self):
""" Test basic prefix functions
"""
schema = {'id': self.schema_attrs['id']}
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '1.3.3.7/32',
'type': 'host',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
prefix = self.nipap.list_prefix(self.auth, schema, { 'prefix': prefix_attrs['prefix'] })
for a in prefix_attrs:
self.assertEqual(prefix[0][a], prefix_attrs[a], 'Added object differ from listed on attribute: ' + a)
# fetch many prefixes - all in a schema
prefix = self.nipap.list_prefix(self.auth, schema, {})
self.assertNotEqual(len(prefix), 0, 'Found 0 prefixes in schema ' + self.schema_attrs['name'])
def test_add_prefix(self):
""" Test add_prefix in a bit more detail
"""
schema = {'id': self.schema_attrs['id']}
# we need a bloody pool first!
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'10.0.0.0/24',
'10.0.1.0/24',
'10.0.2.0/24',
'10.0.3.0/24',
'10.0.4.0/24'
]
for p in pfxs:
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': p,
'type': 'reservation',
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# get an address based on from-prefix
prefix_attrs = {
'type': 'assignment',
'authoritative_source': 'nipap-test',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
res = self.nipap.add_prefix(self.auth, schema, prefix_attrs, { 'from-prefix': ['10.0.0.0/24'], 'prefix_length': 30 })
p = self.nipap.list_prefix(self.auth, schema, { 'id': res })
self.assertEqual(p[0]['prefix'], '10.0.0.0/30', "New prefix differ from what it should be!")
self.nipap.add_schema(self.auth, { 'name': 'testtest', 'description': 'another test schema!' })
# pass different schemas in attr and args
# TODO: Find something similar?
#self.assertRaises(NipapInputError, self.nipap.add_prefix, schema, { 'authoritative_source': 'nipap-test', 'description': 'tjong' }, { 'from-prefix': ['10.0.0.0/24'], 'prefix_length': 30 })
def test_prefix_search_simple(self):
""" Test the simple prefix search function.
"""
schema = {'id': self.schema_attrs['id']}
# First, perform e few tests to verify search string expansion.
query_keys = dict()
query_keys['testing testing'] = "description"
query_keys['1.2.3.4'] = "prefix"
# build query string
query_str = ""
for key, val in query_keys.items():
if val == "description":
query_str += "\"%s\" " % key
else:
query_str += "%s " % key
res = self.nipap.smart_search_prefix(self.auth, schema, query_str)
for interp in res['interpretation']:
self.assertEqual(interp['string'] in query_keys, True, "Function returned unknown interpreted string %s" % interp['string'])
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '1.3.3.77/32',
'type': 'host',
'description': 'test-ish prefix',
'comment': 'Test prefix #77! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
res = self.nipap.smart_search_prefix(self.auth, schema, r"""1.3.3.77 "-ish" """)
self.assertEqual(res['result'][-1]['prefix'], '1.3.3.77/32', 'Prefix not found')
def test_prefix_search_smart(self):
""" Test the smart prefix search function.
"""
schema = {'id': self.schema_attrs['id']}
# test full ipv4 address
res = self.nipap.smart_search_prefix(self.auth, schema, '1.3.3.7')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv4 address')
res = self.nipap.smart_search_prefix(self.auth, schema, '1.1')
self.assertEqual(res['interpretation'][0]['interpretation'], 'text', "Incorrectly interpreted '1.1' as : " + res['interpretation'][0]['interpretation'])
res = self.nipap.smart_search_prefix(self.auth, schema, '10/8')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv4 prefix')
res = self.nipap.smart_search_prefix(self.auth, schema, '2000:0::01')
self.assertEqual(res['interpretation'][0]['interpretation'], 'IPv6 address')
def test_prefix_remove(self):
""" Remove a prefix
"""
schema = {'id': self.schema_attrs['id']}
prefix = self.nipap.list_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
# first make sure our prefix exists
self.assertEqual(prefix[0]['id'], self.prefix_attrs['id'], 'Record must exist before we can delete it')
# remove the prefix, by id
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
# check that search for old record doesn't return anything
prefix = self.nipap.list_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] })
self.assertEqual(prefix, [], 'Old entry still exists')
def test_prefix_indent_ipv4(self):
""" Check that our indentation calculation is working for IPv4
Prefixes gets an indent value automatically assigned to help in
displaying prefix information. The indent value is written on
updates to the table and this test is to make sure it is correctly
calculated.
"""
schema = {'id': self.schema_attrs['id']}
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.1/32' })[0]
p2 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.0/24' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.0.0/16' })[0]
self.assertEqual(p1['indent'], 4, "Indent calc on add failed")
self.assertEqual(p2['indent'], 3, "Indent calc on add failed")
self.assertEqual(p3['indent'], 0, "Indent calc on add failed")
# remove middle prefix
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix_attrs2['id'] })
# check that child prefix indent level has decreased
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.3.1/32' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '1.3.0.0/16' })[0]
self.assertEqual(p1['indent'], 3, "Indent calc on remove failed")
self.assertEqual(p3['indent'], 0, "Indent calc on remove failed")
def test_prefix_indent_ipv6(self):
""" Check that our indentation calculation is working for IPv6
Prefixes gets an indent value automatically assigned to help in
displaying prefix information. The indent value is written on
updates to the table and this test is to make sure it is correctly
calculated.
"""
schema = {'id': self.schema_attrs['id']}
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::1/128' })[0]
p2 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::/64' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:0::/48' })[0]
self.assertEqual(p1['indent'], 4, "Indent calc on add failed")
self.assertEqual(p2['indent'], 2, "Indent calc on add failed")
self.assertEqual(p3['indent'], 0, "Indent calc on add failed")
# remove middle prefix
self.nipap.remove_prefix(self.auth, schema, { 'id': self.prefix6_attrs2['id'] })
# check that child prefix indent level has decreased
p1 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:3::1/128' })[0]
p3 = self.nipap.list_prefix(self.auth, schema, { 'prefix': '2001:0db8:3:0::/48' })[0]
self.assertEqual(p1['indent'], 3, "Indent calc on remove failed for " + p1['prefix'] + " indent: " + str(p1['indent']))
self.assertEqual(p3['indent'], 0, "Indent calc on remove failed for " + p3['prefix'] + " indent: " + str(p3['indent']))
def test_find_free_prefix_input(self):
""" Mostly input testing of find_free_prefix
Try to stress find_free_prefix and send a lot of junk..
"""
schema = {'id': self.schema_attrs['id']}
# set up a prefix not used elsewhere so we have a known good state
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '100.0.0.0/16',
'type': 'reservation',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# no schema, should raise error!
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': ['100.0.0.0/16'] })
# incorrect from-prefix type, string instead of list of strings (looking like an IP address)
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': '100.0.0.0/16' })
# missing prefix_length
self.assertRaises(NipapMissingInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16' ], 'count': 1 })
# try giving both IPv4 and IPv6 in from-prefix which shouldn't work
self.assertRaises(NipapInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '2a00:800::0/25' ], 'prefix_length': 24, 'count': 1 })
# try giving non-integer as wanted prefix length
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': '24', 'count': 1 })
# try giving to high a number as wanted prefix length for IPv4
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': 35, 'count': 1 })
# try giving to high a number as wanted prefix length for IPv6
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '2a00:800::1/25'], 'prefix_length': 150, 'count': 1 })
# try giving a high number for result count (max is 1000)
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-prefix': [ '100.0.0.0/16'], 'prefix_length': 30, 'count': 55555 })
# don't pass 'family', which is required when specifying 'from-pool'
self.assertRaises(NipapMissingInputError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1 })
# pass crap as family, wrong type even
self.assertRaises(ValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1, 'family': 'crap' })
# pass 7 as family
self.assertRaises(NipapValueError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'prefix_length': 24, 'count': 1, 'family': 7 })
# pass non existent pool
self.assertRaises(NipapNonExistentError, self.nipap.find_free_prefix, self.auth, schema, { 'from-pool': { 'name': 'crap' }, 'prefix_length': 24, 'count': 1, 'family': 4 })
def test_find_free_prefix1(self):
""" Functionality testing of find_free_prefix
Mostly based on 'from-prefix'
"""
schema = { 'id': self.schema_attrs['id'] }
# set up a prefix not used elsewhere so we have a known good state
prefix_attrs = {
'authoritative_source': 'nipap-test',
'prefix': '100.0.0.0/16',
'type': 'assignment',
'description': 'test prefix',
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# simple test
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '1.3.3.0/24' ], 'prefix_length': 24, 'count': 1 })
self.assertEqual(res, ['100.0.0.0/24'], "Incorrect prefix set returned")
# simple test - only one input prefix (which did cause a bug, thus keeping it)
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16' ], 'prefix_length': 24, 'count': 1 })
self.assertEqual(res, ['100.0.0.0/24'], "Incorrect prefix set returned")
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-prefix': [ '100.0.0.0/16', '1.3.3.0/24' ], 'prefix_length': 24, 'count': 999 })
self.assertEqual(len(res), 256, "Incorrect prefix set returned")
def test_find_free_prefix2(self):
""" Functionality testing of find_free_prefix
Mostly based on 'from-pool'
"""
schema = { 'id': self.schema_attrs['id'] }
# we need a bloody pool first!
pool = self.nipap.list_pool(self.auth, schema, { 'id': self.pool_attrs['id'] })
# first make sure our pool exists
self.assertNotEqual(pool[0], [], 'Pool must exist!')
pfxs = [
'10.0.0.0/24',
'10.0.1.0/24',
'10.0.2.0/24',
'10.0.3.0/24',
'10.0.4.0/24'
]
for p in pfxs:
prefix_attrs = {
'type': 'reservation',
'authoritative_source': 'nipap-test',
'prefix': p,
'description': 'test prefix',
'pool_id': self.pool_attrs['id'],
'comment': 'test comment, please remove! ;)'
}
self.nipap.add_prefix(self.auth, schema, prefix_attrs)
# from-pool test
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'count': 1, 'family': 4})
self.assertEqual(res, ['10.0.1.0/30'], "Incorrect prefix set returned when requesting default prefix-length")
# from-pool test, specify wanted prefix length
res = self.nipap.find_free_prefix(self.auth, schema, { 'from-pool': { 'name': self.pool_attrs['name'] }, 'count': 1, 'family': 4, 'prefix_length': 31})
self.assertEqual(res, ['10.0.1.0/31'], "Incorrect prefix set returned with explicit prefix-length")
def test_edit_prefix(self):
""" Functionality testing of edit_prefix.
"""
schema = { 'id': self.schema_attrs['id'] }
data = {
'prefix': '192.0.2.0/24',
'description': 'foo',
'comment': 'bar',
'order_id': '0xBEEF',
'customer_id': 'CUST-EEF-DERP',
'alarm_priority': 'low',
'type': 'assignment',
'node': 'TOK-CORE-1',
'country': 'EE',
'authoritative_source': 'unittest',
'pool': self.pool_attrs['id']
}
# basic edit
self.nipap.edit_prefix(self.auth, schema, { 'id': self.prefix_attrs['id'] }, data)
p = self.nipap.list_prefix(self.auth, schema, {'id': self.prefix_attrs['id']})[0]
# remove what we did not touch
for k, v in data.keys():
if k not in p:
del p[k]
self.assertEqual(data, p, "Prefix data incorrect after edit.")
# create a collision
self.assertRaises(NipapError, self.nipap.edit_prefix, self.auth, schema, {'id': self.prefix_attrs2['id']}, {'prefix': data['prefix']})
# try to change schema - disallowed
self.assertRaises(NipapExtraneousInputError, self.nipap_edit_prefix, self.auth, schema, {'id': self.prefix_attrs2['id']}, {'schema': self.schema_attrs2['id']})
def test_add_asn(self):
""" Test adding ASNs to NIPAP.
"""
data = {
'asn': 1,
'name': 'Test ASN #1'
}
self.assertEqual(self.nipap.add_asn(self.auth, data), 1, "add_asn did not return correct ASN.")
asn = self.nipap.list_asn(self.auth, { 'asn': 1 })[0]
self.assertEquals(data, asn, "ASN in database not equal to what was added.")
self.assertRaises(NipapDuplicateError, self.nipap.add_asn, self.auth, data)
def test_remove_asn(self):
""" Test removing ASNs from NIPAP.
"""
data = {
'asn': 2,
'name': 'Test ASN #2'
}
asn = self.nipap.add_asn(self.auth, data)
self.nipap.remove_asn(self.auth, asn)
self.assertEquals(0, len(self.nipap.list_asn(self.auth, { 'asn': 2 })), "Removed ASN still in database")
def test_edit_asn(self):
""" Test editing ASNs.
"""
data = {
'asn': 3,
'name': 'Test ASN #3'
}
asn = self.nipap.add_asn(self.auth, data)
self.nipap.edit_asn(self.auth, data['asn'], { 'name': 'b0rk' })
self.assertEquals(self.nipap.list_asn(self.auth, { 'asn': 3 })[0]['name'], 'b0rk', "Edited ASN still has it's old name.")
self.assertRaises(NipapExtraneousInputError, self.nipap.edit_asn, self.auth, {'asn': 3}, {'asn': 4, 'name': 'Test ASN #4'})
def test_search_asn(self):
""" Test searching ASNs.
"""
data = {
'asn': 4,
'name': 'This is AS number 4'
}
asn = self.nipap.add_asn(self.auth, data)
q = {
'operator': 'equals',
'val1': 'asn',
'val2': data['asn']
}
res = self.nipap.search_asn(self.auth, q)
self.assertEquals(len(res['result']), 1, "equal search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['name'], data['name'], "search hit got wrong name")
q = {
'operator': 'regex_match',
'val1': 'name',
'val2': 'number'
}
res = self.nipap.search_asn(self.auth, q)
self.assertEquals(len(res['result']), 1, "regex search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
def test_smart_search_asn(self):
""" Test smart_search_asn function.
"""
data = {
'asn': 5,
'name': 'Autonomous System Number 5'
}
asn = self.nipap.add_asn(self.auth, data)
res = self.nipap.smart_search_asn(self.auth, "Autonomous")
self.assertEquals(len(res['result']), 1, "search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
self.assertEquals(res['interpretation'][0]['attribute'], 'name', "search term interpretated as wrong type")
res = self.nipap.smart_search_asn(self.auth, "5")
self.assertEquals(len(res['result']), 1, "search resulted in wrong number of hits")
self.assertEquals(res['result'][0]['asn'], data['asn'], "search hit got wrong asn")
self.assertEquals(res['interpretation'][0]['attribute'], 'asn', "search term interpretated as wrong type")
def main():
if sys.version_info >= (2,7):
unittest.main(verbosity=2)
else:
unittest.main()
if __name__ == '__main__':
log_format = "%(levelname)-8s %(message)s"
logging.basicConfig(format=log_format)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
main()
|
mit
| -3,149,467,238,557,047,300 | 7,624,449,988,792,192,000 | 42.715884 | 197 | 0.566527 | false |
torchingloom/django-registration
|
registration/forms.py
|
2
|
3191
|
"""
Forms and validation code for user registration.
Note that all of these forms assume Django's bundle default ``User``
model; since it's not possible for a form to anticipate in advance the
needs of custom user models, you will need to write your own forms if
you're using a custom model.
"""
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
from registration.users import UserModel
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
email = forms.EmailField(label=_("E-mail"))
class Meta:
model = UserModel()
fields = ("username", "email")
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput,
label=_('I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if UserModel().objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
|
bsd-3-clause
| 2,445,654,338,224,964,600 | -3,790,401,990,922,716,700 | 33.311828 | 141 | 0.67189 | false |
codebox/algorithms
|
graph_data_structures.py
|
1
|
1452
|
class DirectedAdjacencyMatrix:
def __init__(self, n):
self.n = n
self.data = [[0] * n for i in range(n)]
def connect(self, i_from, i_to):
self.data[i_from][i_to] += 1
def disconnect(self, i_from, i_to):
self.data[i_from][i_to] = max(0, self.data[i_from][i_to] - 1)
def are_connected(self, n1, n2):
return self.data[n1][n2] > 0 or self.data[n2][n1] > 0
def get_outgoing(self, i_from):
return [i for i in range(self.n) if self.data[i_from][i]]
def get_incoming(self, i_to):
return [i for i in range(self.n) if self.data[i][i_to]]
class DirectedAdjacencyList:
def __init__(self, n):
self.n = n
self.vertices = range(n)
self.edges = []
def connect(self, i_from, i_to):
self.edges.append([i_from, i_to])
def disconnect(self, i_from, i_to):
for edge in self.edges:
if edge[0] == i_from and edge[1] == i_to:
self.edges.remove(edge)
break
def are_connected(self, n1, n2):
for edge in self.edges:
if (edge[0] == n1 and edge[1] == n2) or (edge[0] == n2 and edge[1] == n1):
return True
return False
def get_outgoing(self, i_from):
return list(set([edge[1] for edge in self.edges if edge[0] == i_from]))
def get_incoming(self, i_to):
return list(set([edge[0] for edge in self.edges if edge[1] == i_to]))
|
mit
| 8,376,232,044,224,459,000 | -7,054,102,232,032,389,000 | 29.893617 | 86 | 0.544077 | false |
Elder-of-Ozone/i3pystatus
|
i3pystatus/network.py
|
6
|
14457
|
import netifaces
from i3pystatus import IntervalModule
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.util import make_graph, round_dict, make_bar
def count_bits(integer):
bits = 0
while (integer):
integer &= integer - 1
bits += 1
return bits
def v6_to_int(v6):
return int(v6.replace(":", ""), 16)
def prefix6(mask):
return count_bits(v6_to_int(mask))
def cidr6(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix6(mask))
def v4_to_int(v4):
sum = 0
mul = 1
for part in reversed(v4.split(".")):
sum += int(part) * mul
mul *= 2 ** 8
return sum
def prefix4(mask):
return count_bits(v4_to_int(mask))
def cidr4(addr, mask):
return "{addr}/{bits}".format(addr=addr, bits=prefix4(mask))
def get_bonded_slaves():
try:
with open("/sys/class/net/bonding_masters") as f:
masters = f.read().split()
except FileNotFoundError:
return {}
slaves = {}
for master in masters:
with open("/sys/class/net/{}/bonding/slaves".format(master)) as f:
for slave in f.read().split():
slaves[slave] = master
return slaves
def sysfs_interface_up(interface, unknown_up=False):
try:
with open("/sys/class/net/{}/operstate".format(interface)) as f:
status = f.read().strip()
except FileNotFoundError:
# Interface doesn't exist
return False
return status == "up" or unknown_up and status == "unknown"
class NetworkInfo():
"""
Retrieve network information.
"""
def __init__(self, interface, ignore_interfaces, detached_down, unknown_up, get_wifi_info=False):
if interface not in netifaces.interfaces() and not detached_down:
raise RuntimeError(
"Unknown interface {iface}!".format(iface=interface))
self.ignore_interfaces = ignore_interfaces
self.detached_down = detached_down
self.unknown_up = unknown_up
self.get_wifi_info = get_wifi_info
def get_info(self, interface):
format_dict = dict(v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="")
iface_up = sysfs_interface_up(interface, self.unknown_up)
if not iface_up:
return format_dict
network_info = netifaces.ifaddresses(interface)
slaves = get_bonded_slaves()
try:
master = slaves[interface]
except KeyError:
pass
else:
if sysfs_interface_up(interface, self.unknown_up):
master_info = netifaces.ifaddresses(master)
for af in (netifaces.AF_INET, netifaces.AF_INET6):
try:
network_info[af] = master_info[af]
except KeyError:
pass
try:
mac = network_info[netifaces.AF_PACKET][0]["addr"]
except KeyError:
mac = "NONE"
format_dict['mac'] = mac
if iface_up:
format_dict.update(self.extract_network_info(network_info))
format_dict.update(self.extract_wireless_info(interface))
return format_dict
@staticmethod
def extract_network_info(network_info):
info = dict()
if netifaces.AF_INET in network_info:
v4 = network_info[netifaces.AF_INET][0]
info["v4"] = v4["addr"]
info["v4mask"] = v4["netmask"]
info["v4cidr"] = cidr4(v4["addr"], v4["netmask"])
if netifaces.AF_INET6 in network_info:
for v6 in network_info[netifaces.AF_INET6]:
info["v6"] = v6["addr"]
info["v6mask"] = v6["netmask"]
info["v6cidr"] = cidr6(v6["addr"], v6["netmask"])
if not v6["addr"].startswith("fe80::"): # prefer non link-local addresses
break
return info
def extract_wireless_info(self, interface):
info = dict(essid="", freq="", quality=0.0, quality_bar="")
# Just return empty values if we're not using any Wifi functionality
if not self.get_wifi_info:
return info
import basiciw
try:
iwi = basiciw.iwinfo(interface)
except Exception:
# Not a wireless interface
return info
info["essid"] = iwi["essid"]
info["freq"] = iwi["freq"]
quality = iwi["quality"]
if quality["quality_max"] > 0:
info["quality"] = quality["quality"] / quality["quality_max"]
else:
info["quality"] = quality["quality"]
info["quality"] *= 100
info["quality_bar"] = make_bar(info["quality"])
info["quality"] = round(info["quality"])
return info
class NetworkTraffic():
"""
Retrieve network traffic information
"""
pnic = None
pnic_before = None
def __init__(self, unknown_up, divisor, round_size):
self.unknown_up = unknown_up
self.divisor = divisor
self.round_size = round_size
def update_counters(self, interface):
import psutil
self.pnic_before = self.pnic
counters = psutil.net_io_counters(pernic=True)
self.pnic = counters[interface] if interface in counters else None
def clear_counters(self):
self.pnic_before = None
self.pnic = None
def get_bytes_sent(self):
return (self.pnic.bytes_sent - self.pnic_before.bytes_sent) / self.divisor
def get_bytes_received(self):
return (self.pnic.bytes_recv - self.pnic_before.bytes_recv) / self.divisor
def get_packets_sent(self):
return self.pnic.packets_sent - self.pnic_before.packets_sent
def get_packets_received(self):
return self.pnic.packets_recv - self.pnic_before.packets_recv
def get_rx_tot_Mbytes(self, interface):
try:
with open("/sys/class/net/{}/statistics/rx_bytes".format(interface)) as f:
return int(f.readline().split('\n')[0]) / (1024 * 1024)
except FileNotFoundError:
return False
def get_tx_tot_Mbytes(self, interface):
try:
with open("/sys/class/net/{}/statistics/tx_bytes".format(interface)) as f:
return int(f.readline().split('\n')[0]) / (1024 * 1024)
except FileNotFoundError:
return False
def get_usage(self, interface):
self.update_counters(interface)
usage = dict(bytes_sent=0, bytes_recv=0, packets_sent=0, packets_recv=0)
if not sysfs_interface_up(interface, self.unknown_up) or not self.pnic_before:
return usage
else:
usage["bytes_sent"] = self.get_bytes_sent()
usage["bytes_recv"] = self.get_bytes_received()
usage["packets_sent"] = self.get_packets_sent()
usage["packets_recv"] = self.get_packets_received()
usage["rx_tot_Mbytes"] = self.get_rx_tot_Mbytes(interface)
usage["tx_tot_Mbytes"] = self.get_tx_tot_Mbytes(interface)
round_dict(usage, self.round_size)
return usage
class Network(IntervalModule, ColorRangeModule):
"""
Displays network information for an interface.
Requires the PyPI packages `colour`, `netifaces`, `psutil` (optional, see below)
and `basiciw` (optional, see below).
.. rubric:: Available formatters
Network Information Formatters:
* `{interface}` — same as setting
* `{v4}` — IPv4 address
* `{v4mask}` — subnet mask
* `{v4cidr}` — IPv4 address in cidr notation (i.e. 192.168.2.204/24)
* `{v6}` — IPv6 address
* `{v6mask}` — subnet mask
* `{v6cidr}` — IPv6 address in cidr notation
* `{mac}` — MAC of interface
Wireless Information Formatters (requires PyPI package `basiciw`):
* `{essid}` — ESSID of currently connected wifi
* `{freq}` — Current frequency
* `{quality}` — Link quality in percent
* `{quality_bar}` —Bar graphically representing link quality
Network Traffic Formatters (requires PyPI pacakge `psutil`):
* `{interface}` — the configured network interface
* `{kbs}` – Float representing kb\s
* `{network_graph}` – Unicode graph representing network usage
* `{bytes_sent}` — bytes sent per second (divided by divisor)
* `{bytes_recv}` — bytes received per second (divided by divisor)
* `{packets_sent}` — bytes sent per second (divided by divisor)
* `{packets_recv}` — bytes received per second (divided by divisor)
* `{rx_tot_Mbytes}` — total Mbytes received
* `{tx_tot_Mbytes}` — total Mbytes sent
"""
settings = (
("format_up", "format string"),
("format_down", "format string"),
"color_up",
"color_down",
("interface", "Interface to watch, eg 'eth0'"),
("dynamic_color", "Set color dynamically based on network traffic. Note: this overrides color_up"),
("start_color", "Hex or English name for start of color range, eg '#00FF00' or 'green'"),
("end_color", "Hex or English name for end of color range, eg '#FF0000' or 'red'"),
("graph_width", "Width of the network traffic graph"),
("graph_style", "Graph style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake')"),
("upper_limit",
"Expected max kb/s. This value controls how the network traffic graph is drawn and in what color"),
("graph_type", "Whether to draw the network traffic graph for input or output. "
"Allowed values 'input' or 'output'"),
("divisor", "divide all byte values by this value"),
("ignore_interfaces", "Array of interfaces to ignore when cycling through "
"on click, eg, ['lo']"),
("round_size", "defines number of digits in round"),
("detached_down", "If the interface doesn't exist, display it as if it were down"),
("unknown_up", "If the interface is in unknown state, display it as if it were up"),
)
interval = 1
interface = 'eth0'
format_up = "{interface} {network_graph}{kbs}KB/s"
format_down = "{interface}: DOWN"
color_up = "#00FF00"
color_down = "#FF0000"
dynamic_color = True
graph_type = 'input'
graph_width = 15
graph_style = 'blocks'
upper_limit = 150.0
# Network traffic settings
divisor = 1024
round_size = None
# Network info settings
detached_down = True
unknown_up = False
ignore_interfaces = ["lo"]
on_leftclick = "nm-connection-editor"
on_rightclick = "cycle_interface"
on_upscroll = ['cycle_interface', 1]
on_downscroll = ['cycle_interface', -1]
def init(self):
# Don't require importing basiciw unless using the functionality it offers.
if any(s in self.format_up or s in self.format_up for s in
['essid', 'freq', 'quality', 'quality_bar']):
get_wifi_info = True
else:
get_wifi_info = False
self.network_info = NetworkInfo(self.interface, self.ignore_interfaces, self.detached_down, self.unknown_up,
get_wifi_info)
# Don't require importing psutil unless using the functionality it offers.
if any(s in self.format_up or s in self.format_down for s in
['bytes_sent', 'bytes_recv', 'packets_sent', 'packets_recv', 'network_graph',
'rx_tot_Mbytes', 'tx_tot_Mbytes', 'kbs']):
self.network_traffic = NetworkTraffic(self.unknown_up, self.divisor, self.round_size)
else:
self.network_traffic = None
if not self.dynamic_color:
self.end_color = self.start_color
self.colors = self.get_hex_color_range(self.start_color, self.end_color, int(self.upper_limit))
self.kbs_arr = [0.0] * self.graph_width
def cycle_interface(self, increment=1):
"""Cycle through available interfaces in `increment` steps. Sign indicates direction."""
interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces]
if self.interface in interfaces:
next_index = (interfaces.index(self.interface) + increment) % len(interfaces)
self.interface = interfaces[next_index]
elif len(interfaces) > 0:
self.interface = interfaces[0]
if self.network_traffic:
self.network_traffic.clear_counters()
self.kbs_arr = [0.0] * self.graph_width
def get_network_graph(self, kbs):
# Cycle array by inserting at the start and chopping off the last element
self.kbs_arr.insert(0, kbs)
self.kbs_arr = self.kbs_arr[:self.graph_width]
return make_graph(self.kbs_arr, 0.0, self.upper_limit, self.graph_style)
def run(self):
format_values = dict(kbs="", network_graph="", bytes_sent="", bytes_recv="", packets_sent="", packets_recv="",
rx_tot_Mbytes="", tx_tot_Mbytes="",
interface="", v4="", v4mask="", v4cidr="", v6="", v6mask="", v6cidr="", mac="",
essid="", freq="", quality="", quality_bar="")
if self.network_traffic:
network_usage = self.network_traffic.get_usage(self.interface)
format_values.update(network_usage)
if self.graph_type == 'input':
kbs = network_usage['bytes_recv']
elif self.graph_type == 'output':
kbs = network_usage['bytes_sent']
else:
raise Exception("graph_type must be either 'input' or 'output'!")
format_values['network_graph'] = self.get_network_graph(kbs)
format_values['kbs'] = "{0:.1f}".format(round(kbs, 2)).rjust(6)
color = self.get_gradient(kbs, self.colors, self.upper_limit)
else:
color = None
if sysfs_interface_up(self.interface, self.unknown_up):
if not color:
color = self.color_up
format_str = self.format_up
else:
color = self.color_down
format_str = self.format_down
network_info = self.network_info.get_info(self.interface)
format_values.update(network_info)
format_values['interface'] = self.interface
self.output = {
"full_text": format_str.format(**format_values),
'color': color,
}
|
mit
| 1,648,212,434,812,359,000 | -7,285,058,623,659,633,000 | 35.12782 | 118 | 0.589178 | false |
mahadeva604/ansible-modules-extras
|
windows/win_dotnet_ngen.py
|
5
|
1778
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_dotnet_ngen
version_added: "2.0"
short_description: Runs ngen to recompile DLLs after .NET updates
description:
- After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host.
- This happens via scheduled task, usually at some inopportune time.
- This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time.
- "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx"
notes:
- there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem
- there's no way to test if they've been completed (?)
- the stdout is quite likely to be several megabytes
options:
author: Peter Mounce
'''
EXAMPLES = '''
# Run ngen tasks
win_dotnet_ngen:
'''
|
gpl-3.0
| -5,163,036,195,424,216,000 | -1,984,889,467,518,672,400 | 38.511111 | 137 | 0.737908 | false |
ChinaMassClouds/copenstack-server
|
openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/overview/panel.py
|
1
|
1101
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Overview(horizon.Panel):
name = _("Overview")
slug = 'overview'
img = '/static/dashboard/img/nav/overview1.png'
dashboard.Project.register(Overview)
|
gpl-2.0
| -632,502,140,326,434,300 | -2,434,933,116,963,143,700 | 32.40625 | 78 | 0.723887 | false |
NickDaly/GemRB-FixConfig-Branch
|
gemrb/GUIScripts/iwd/GUIWORLD.py
|
2
|
12851
|
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIW.py - scripts to control some windows from GUIWORLD winpack
# except of Actions, Portrait, Options and Dialog windows
#################################################################
import GemRB
from GUIDefines import *
from ie_restype import *
import GUICommon
import GUICommonWindows
import GUIClasses
FRAME_PC_SELECTED = 0
FRAME_PC_TARGET = 1
ContainerWindow = None
ContinueWindow = None
ReformPartyWindow = None
OldActionsWindow = None
OldMessageWindow = None
Container = None
def CloseContinueWindow ():
if ContinueWindow:
# don't close the actual window now to avoid flickering: we might still want it open
GemRB.SetVar ("DialogChoose", GemRB.GetVar ("DialogOption"))
def NextDialogState ():
global ContinueWindow, OldActionsWindow
if ContinueWindow == None:
return
hideflag = GemRB.HideGUI ()
if ContinueWindow:
ContinueWindow.Unload ()
GemRB.SetVar ("ActionsWindow", OldActionsWindow.ID)
ContinueWindow = None
OldActionsWindow = None
if hideflag:
GemRB.UnhideGUI ()
def OpenEndMessageWindow ():
global ContinueWindow, OldActionsWindow
hideflag = GemRB.HideGUI ()
if not ContinueWindow:
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContinueWindow = Window = GemRB.LoadWindow (9)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
#end dialog
Button = ContinueWindow.GetControl (0)
Button.SetText (9371)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseContinueWindow)
if GUICommonWindows.PortraitWindow:
GUICommonWindows.UpdatePortraitWindow ()
if hideflag:
GemRB.UnhideGUI ()
def OpenContinueMessageWindow ():
global ContinueWindow, OldActionsWindow
hideflag = GemRB.HideGUI ()
if not ContinueWindow:
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContinueWindow = Window = GemRB.LoadWindow (9)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
#continue
Button = ContinueWindow.GetControl (0)
Button.SetText (9372)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, CloseContinueWindow)
if hideflag:
GemRB.UnhideGUI ()
def CloseContainerWindow ():
global OldActionsWindow, OldMessageWindow, ContainerWindow
if ContainerWindow == None:
return
hideflag = GemRB.HideGUI ()
if ContainerWindow:
ContainerWindow.Unload ()
ContainerWindow = None
GemRB.SetVar ("ActionsWindow", OldActionsWindow.ID)
GemRB.SetVar ("MessageWindow", OldMessageWindow.ID)
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 2)
#play closing sound if applicable
if tmp!='*':
GemRB.PlaySound (tmp)
#it is enough to close here
if hideflag:
GemRB.UnhideGUI ()
def UpdateContainerWindow ():
global Container
Window = ContainerWindow
pc = GemRB.GameGetFirstSelectedPC ()
GUICommon.SetEncumbranceLabels( Window, 0x10000043, 0x10000044, pc)
party_gold = GemRB.GameGetPartyGold ()
Text = Window.GetControl (0x10000036)
Text.SetText (str (party_gold))
Container = GemRB.GetContainer(0) #will use first selected pc anyway
LeftCount = Container['ItemCount']
ScrollBar = Window.GetControl (52)
Count = LeftCount/3
if Count<1:
Count=1
ScrollBar.SetVarAssoc ("LeftTopIndex", Count)
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
ScrollBar = Window.GetControl (53)
Count = RightCount/2
if Count<1:
Count=1
ScrollBar.SetVarAssoc ("RightTopIndex", Count)
RedrawContainerWindow ()
def RedrawContainerWindow ():
Window = ContainerWindow
LeftTopIndex = GemRB.GetVar ("LeftTopIndex") * 3
LeftIndex = GemRB.GetVar ("LeftIndex")
RightTopIndex = GemRB.GetVar ("RightTopIndex") * 2
RightIndex = GemRB.GetVar ("RightIndex")
LeftCount = Container['ItemCount']
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
RightCount = len(inventory_slots)
for i in range (6):
#this is an autoselected container, but we could use PC too
Slot = GemRB.GetContainerItem (0, i+LeftTopIndex)
Button = Window.GetControl (i)
if Slot != None:
Button.SetVarAssoc ("LeftIndex", LeftTopIndex+i)
else:
Button.SetVarAssoc ("LeftIndex", -1)
GUICommon.UpdateInventorySlot (pc, Button, Slot, "container")
for i in range (4):
if i+RightTopIndex<RightCount:
Slot = GemRB.GetSlotItem (pc, inventory_slots[i+RightTopIndex])
else:
Slot = None
Button = Window.GetControl (i+10)
if Slot!=None:
Button.SetVarAssoc ("RightIndex", RightTopIndex+i)
else:
Button.SetVarAssoc ("RightIndex", -1)
GUICommon.UpdateInventorySlot (pc, Button, Slot, "inventory")
def OpenContainerWindow ():
global OldActionsWindow, OldMessageWindow
global ContainerWindow, Container
if ContainerWindow:
return
hideflag = GemRB.HideGUI ()
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ContainerWindow = Window = GemRB.LoadWindow (8)
OldActionsWindow = GUIClasses.GWindow( GemRB.GetVar ("ActionsWindow") )
OldMessageWindow = GUIClasses.GWindow( GemRB.GetVar ("MessageWindow") )
GemRB.SetVar ("ActionsWindow", Window.ID)
GemRB.SetVar ("MessageWindow", -1)
Container = GemRB.GetContainer(0)
# 0 - 5 - Ground Item
# 10 - 13 - Personal Item
# 50 hand
# 52, 53 scroller ground, scroller personal
# 54 - encumbrance
for i in range (6):
Button = Window.GetControl (i)
Button.SetVarAssoc ("LeftIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, TakeItemContainer)
for i in range (4):
Button = Window.GetControl (i+10)
Button.SetVarAssoc ("RightIndex", i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, DropItemContainer)
# left scrollbar
ScrollBar = Window.GetControl (52)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
# right scrollbar
ScrollBar = Window.GetControl (53)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RedrawContainerWindow)
Button = Window.GetControl (54)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.CreateLabelOnButton (0x10000043, "NUMBER", IE_FONT_ALIGN_LEFT|IE_FONT_ALIGN_TOP)
Button.CreateLabelOnButton (0x10000044, "NUMBER", IE_FONT_ALIGN_RIGHT|IE_FONT_ALIGN_BOTTOM)
Button = Window.GetControl (50)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Table = GemRB.LoadTable ("containr")
row = Container['Type']
tmp = Table.GetValue (row, 0)
if tmp!='*':
GemRB.PlaySound (tmp)
tmp = Table.GetValue (row, 1)
if tmp!='*':
Button.SetSprites (tmp, 0, 0, 0, 0, 0 )
# Done
Button = Window.GetControl (51)
#no text
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, LeaveContainer)
GemRB.SetVar ("LeftTopIndex", 0)
GemRB.SetVar ("RightTopIndex", 0)
UpdateContainerWindow ()
if hideflag:
GemRB.UnhideGUI ()
#doing this way it will inform the core system too, which in turn will call
#CloseContainerWindow ()
def LeaveContainer ():
GemRB.LeaveContainer()
def DropItemContainer ():
RightIndex = GemRB.GetVar ("RightIndex")
if RightIndex<0:
return
#we need to get the right slot number
pc = GemRB.GameGetFirstSelectedPC ()
inventory_slots = GemRB.GetSlots (pc, 0x8000)
if RightIndex >= len(inventory_slots):
return
GemRB.ChangeContainerItem (0, inventory_slots[RightIndex], 0)
UpdateContainerWindow ()
def TakeItemContainer ():
LeftIndex = GemRB.GetVar ("LeftIndex")
if LeftIndex<0:
return
if LeftIndex >= Container['ItemCount']:
return
GemRB.ChangeContainerItem (0, LeftIndex, 1)
UpdateContainerWindow ()
def UpdateReformWindow ():
Window = ReformPartyWindow
select = GemRB.GetVar ("Selected")
need_to_drop = GemRB.GetPartySize ()-PARTY_SIZE
if need_to_drop<0:
need_to_drop = 0
#excess player number
Label = Window.GetControl (0x1000000f)
Label.SetText (str(need_to_drop) )
#done
Button = Window.GetControl (8)
if need_to_drop:
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
Button.SetState (IE_GUI_BUTTON_ENABLED)
#remove
Button = Window.GetControl (15)
if select:
Button.SetState (IE_GUI_BUTTON_ENABLED)
else:
Button.SetState (IE_GUI_BUTTON_DISABLED)
for i in range (PARTY_SIZE+1):
Button = Window.GetControl (i)
Button.EnableBorder (FRAME_PC_SELECTED, select == i+2 )
#+2 because protagonist is skipped
pic = GemRB.GetPlayerPortrait (i+2,1)
if not pic:
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetState (IE_GUI_BUTTON_LOCKED)
continue
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_ALIGN_BOTTOM|IE_GUI_BUTTON_ALIGN_LEFT, OP_SET)
Button.SetPicture (pic, "NOPORTSM")
GUICommonWindows.UpdatePortraitWindow ()
return
def RemovePlayer ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
if ReformPartyWindow:
ReformPartyWindow.Unload ()
ReformPartyWindow = Window = GemRB.LoadWindow (25)
GemRB.SetVar ("OtherWindow", Window.ID)
#are you sure
Label = Window.GetControl (0x0fffffff)
Label.SetText (17518)
#confirm
Button = Window.GetControl (1)
Button.SetText (17507)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayerConfirm)
Button.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
#cancel
Button = Window.GetControl (2)
Button.SetText (13727)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayerCancel)
Button.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
def RemovePlayerConfirm ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
ReformPartyWindow.Unload ()
GemRB.SetVar ("OtherWindow", -1)
#removing selected player
ReformPartyWindow = None
if hideflag:
GemRB.UnhideGUI ()
GemRB.LeaveParty (GemRB.GetVar("Selected") )
OpenReformPartyWindow ()
return
def RemovePlayerCancel ():
global ReformPartyWindow
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
ReformPartyWindow.Unload ()
GemRB.SetVar ("OtherWindow", -1)
ReformPartyWindow = None
if hideflag:
GemRB.UnhideGUI ()
OpenReformPartyWindow ()
return
def OpenReformPartyWindow ():
global ReformPartyWindow
GemRB.SetVar ("Selected", 0)
hideflag = GemRB.HideGUI ()
if ReformPartyWindow:
if ReformPartyWindow:
ReformPartyWindow.Unload ()
ReformPartyWindow = None
GemRB.SetVar ("OtherWindow", -1)
#GemRB.LoadWindowPack ("GUIREC")
if hideflag:
GemRB.UnhideGUI ()
#re-enabling party size control
GemRB.GameSetPartySize (PARTY_SIZE)
return
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
ReformPartyWindow = Window = GemRB.LoadWindow (24)
GemRB.SetVar ("OtherWindow", Window.ID)
#PC portraits
for j in range (PARTY_SIZE+1):
Button = Window.GetControl (j)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_RADIOBUTTON|IE_GUI_BUTTON_NO_IMAGE|IE_GUI_BUTTON_PICTURE,OP_SET)
Button.SetBorder (FRAME_PC_SELECTED, 1, 1, 2, 2, 0, 255, 0, 255)
#protagonist is skipped
index = j + 2
Button.SetVarAssoc ("Selected", index)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, UpdateReformWindow)
# Remove
Button = Window.GetControl (15)
Button.SetText (17507)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, RemovePlayer)
# Done
Button = Window.GetControl (8)
Button.SetText (11973)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, OpenReformPartyWindow)
GemRB.SetVar ("ActionsWindow", -1)
UpdateReformWindow ()
if hideflag:
GemRB.UnhideGUI ()
Window.ShowModal (MODAL_SHADOW_GRAY)
return
def DeathWindow ():
#no death movie, but music is changed
GemRB.LoadMusicPL ("Theme.mus",1)
GemRB.HideGUI ()
GemRB.SetTimedEvent (DeathWindowEnd, 10)
return
def DeathWindowEnd ():
GemRB.GamePause (1,1)
GemRB.LoadWindowPack (GUICommon.GetWindowPack())
Window = GemRB.LoadWindow (17)
#reason for death
Label = Window.GetControl (0x0fffffff)
Label.SetText (16498)
#load
Button = Window.GetControl (1)
Button.SetText (15590)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, LoadPress)
#quit
Button = Window.GetControl (2)
Button.SetText (15417)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, QuitPress)
GemRB.HideGUI ()
GemRB.SetVar ("MessageWindow", -1)
GemRB.UnhideGUI ()
Window.ShowModal (MODAL_SHADOW_GRAY)
return
def QuitPress():
GemRB.QuitGame ()
GemRB.SetNextScript ("Start")
return
def LoadPress():
GemRB.QuitGame ()
GemRB.SetNextScript ("GUILOAD")
return
|
gpl-2.0
| -589,713,383,680,021,100 | -371,030,213,047,794,000 | 25.496907 | 101 | 0.740331 | false |
jjo31/ATHAM-Fluidity
|
tests/turbine_flux_dg_2d/mesh/scripts/triangle_add_edgeowner.py
|
20
|
4796
|
#!/usr/bin/env python
import sys
import triangle
import copy
import numpy
from sets import Set
#input surface_id, filename
# 5.5.2010: this script adds a new attribute to the .edge file which holds the "owner" element number of this edge
# Here is an examle geo file for this script:
# Point(1) = {0, 0, 0, 2};
# Point(2) = {1, 0, 0, 2};
# Point(3) = {1, 1, 0, 2};
# Point(4) = {0, 1, 0, 2};
# Point(5) = {0.5, 0, 0, 2};
# Point(6) = {0.5, 1, 0, 2};
# Point(7) = {0.500001, 0, 0, 2};
# Point(8) = {0.500001, 1, 0, 2};
# Point(9) = {0.4, -0.1, 0, 2};
# Point(10) = {0.4, 1.1, 0, 2};
#
#
# Line(1) = {4, 1};
# Line(2) = {1, 9};
# Line(3) = {9, 5};
# Line(4) = {5, 6};
# Line(9) = {6, 10};
# Line(10) = {10, 4};
#
# Line(5) = {8, 7};
# Line(6) = {7, 2};
# Line(7) = {2, 3};
# Line(8) = {3, 8};
#
# Physical Line(20) = {1};
# Physical Line(21) = {2};
# Physical Line(22) = {3};
# Physical Line(23) = {4};
# Physical Line(28) = {9};
# Physical Line(29) = {10};
#
# Physical Line(24) = {5};
# Physical Line(25) = {6};
# Physical Line(26) = {7};
# Physical Line(27) = {8};
#
# Line Loop(10) = {4, 9, 10, 1, 2, 3};
# Line Loop(11) = {8, 5, 6, 7};
#
# Plane Surface(11) = {10};
# Plane Surface(12) = {11};
# Physical Surface(12) = {11, 12};
########################################################################################################
def nodedupl_recursion(elein, edgein, nodeid, boundary_id):
global copy_eles, copy_edges, copy_nodes, debug, copy_surface_ids, copy_surface_id, copy_surfaceowner_ids, copy_region_ids
next_edgein=triangle.get_partner_edge(edgein, nodeid, boundary_id)
if next_edgein==None:
print "Reached one end of the surface boundary."
return
if debug>1:
print "Lets loop around nodeid", nodeid, " starting with ele", elein+1, " with boundary edge ", edgein+1, " until we reach the next surface edge with id ", next_edgein+1
next_elein_list=triangle.get_eles_on_ele_side(elein, nodeid, edgein, boundary_id)
if debug>1:
print "Duplicate edge ", next_edgein +1
copy_edges.append(triangle.edges[next_edgein])
copy_surface_ids.append(new_surface_id)
copy_surfaceowner_ids.append(next_elein_list[len(next_elein_list)-1]+1) # update copy_surfaceowner_ids for the new edge
# update copy_surfaceowner_ids for the old edge
if triangle.ele_with_edgeids(next_edgein)[0]==next_elein_list[len(next_elein_list)-1]:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[1]+1
else:
copy_surfaceowner_ids[next_edgein]=triangle.ele_with_edgeids(next_edgein)[0]+1
if (triangle.edges[next_edgein][0]==nodeid):
next_nodeid=triangle.edges[next_edgein][1]
else:
next_nodeid=triangle.edges[next_edgein][0]
nodedupl_recursion(next_elein_list[len(next_elein_list)-1], next_edgein, next_nodeid, boundary_id)
########################################################################################################
if not len(sys.argv)==2:
print "Usage: seperate_internal_boundary.py file"
print ""
print "output fixed .edge, .ele and .node file with new edge attribute holding the element owner of the edge. "
print ""
print "The outout files will be have the suffix edgow"
exit()
filename=sys.argv[1]
debug=2
triangle.read_nodefile(filename+'.node')
if triangle.dim!=2:
print "Only 2 dim meshes supported so far"
triangle.read_edgefile(filename+'.edge')
triangle.read_elefile(filename+'.ele')
copy_eles=copy.deepcopy(triangle.eles)
copy_region_ids=copy.deepcopy(triangle.region_ids)
copy_edges=copy.deepcopy(triangle.edges)
copy_surface_ids=copy.deepcopy(triangle.surface_ids)
copy_surfaceowner_ids=[-1 for i in range(0,len(triangle.surface_ids))] # Will store the elemed id for each surface edge
copy_nodes=copy.deepcopy(triangle.nodes)
# Now assign the surfaceowner_id to the external boundaries
for e in range(0,len(copy_surfaceowner_ids)):
if copy_surfaceowner_ids[e]>=0:
print "Internal Error. Ask [email protected]!"
exit()
if len(triangle.ele_with_edgeids(e))!=1:
print "Error Found internal boundary!"
exit()
copy_surfaceowner_ids[e]=triangle.ele_with_edgeids(e)[0]+1
if debug>0:
print "save node file as ", filename, "_edgow.node"
triangle.save_nodefile(copy_nodes, 2, filename+"_edgow.node")
if debug>0:
print "save ele file as ", filename, "_edgow.ele"
triangle.save_elefile(copy_eles, copy_region_ids, filename+"_edgow.ele")
if debug>0:
print "save edge file as ", filename, "_edgow.edge"
triangle.save_edgefile2(copy_edges, copy_surface_ids, copy_surfaceowner_ids, filename+"_edgow.edge")
|
lgpl-2.1
| 7,233,324,505,819,977,000 | 8,823,722,722,197,512,000 | 34.264706 | 177 | 0.609049 | false |
jsma/django-cms
|
menus/utils.py
|
11
|
4500
|
# -*- coding: utf-8 -*-
from cms.models.titlemodels import Title
from cms.utils import get_language_from_request
from cms.utils.i18n import force_language, hide_untranslated
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch, reverse, resolve
def mark_descendants(nodes):
for node in nodes:
node.descendant = True
mark_descendants(node.children)
def cut_levels(nodes, level):
"""
For cutting the nav_extender levels if you have a from_level in the navigation.
"""
if nodes:
if nodes[0].level == level:
return nodes
return sum((cut_levels(node.children, level) for node in nodes), [])
def find_selected(nodes):
"""
Finds a selected nav_extender node
"""
for node in nodes:
if hasattr(node, "selected"):
return node
elif hasattr(node, "ancestor"):
result = find_selected(node.children)
if result:
return result
def set_language_changer(request, func):
"""
Sets a language chooser function that accepts one parameter: language
The function should return a url in the supplied language
normally you would want to give it the get_absolute_url function with an optional language parameter
example:
def get_absolute_url(self, language=None):
reverse('product_view', args=[self.get_slug(language=language)])
Use this function in your nav extender views that have i18n slugs.
"""
request._language_changer = func
def language_changer_decorator(language_changer):
"""
A decorator wrapper for set_language_changer.
from menus.utils import language_changer_decorator
@language_changer_decorator(function_get_language_changer_url)
def my_view_function(request, somearg):
pass
"""
def _decorator(func):
def _wrapped(request, *args, **kwargs):
set_language_changer(request, language_changer)
return func(request, *args, **kwargs)
_wrapped.__name__ = func.__name__
_wrapped.__doc__ = func.__doc__
return _wrapped
return _decorator
class DefaultLanguageChanger(object):
def __init__(self, request):
self.request = request
self._app_path = None
@property
def app_path(self):
if self._app_path is None:
if settings.USE_I18N:
page_path = self.get_page_path(get_language_from_request(self.request))
else:
page_path = self.get_page_path(settings.LANGUAGE_CODE)
if page_path:
self._app_path = self.request.path_info[len(page_path):]
else:
self._app_path = self.request.path_info
return self._app_path
def get_page_path(self, lang):
page = getattr(self.request, 'current_page', None)
if page:
with force_language(lang):
try:
return page.get_absolute_url(language=lang, fallback=False)
except (Title.DoesNotExist, NoReverseMatch):
if hide_untranslated(lang) and settings.USE_I18N:
return '/%s/' % lang
else:
return page.get_absolute_url(language=lang, fallback=True)
else:
return '/%s/' % lang if settings.USE_I18N else '/'
def __call__(self, lang):
page_language = get_language_from_request(self.request)
with force_language(page_language):
try:
view = resolve(self.request.path_info)
except:
view = None
if hasattr(self.request, 'toolbar') and self.request.toolbar.obj:
with force_language(lang):
try:
return self.request.toolbar.obj.get_absolute_url()
except:
pass
elif view and not view.url_name in ('pages-details-by-slug', 'pages-root'):
view_name = view.url_name
if view.namespace:
view_name = "%s:%s" % (view.namespace, view_name)
url = None
with force_language(lang):
try:
url = reverse(view_name, args=view.args, kwargs=view.kwargs, current_app=view.app_name)
except NoReverseMatch:
pass
if url:
return url
return '%s%s' % (self.get_page_path(lang), self.app_path)
|
bsd-3-clause
| -3,180,294,065,992,053,000 | -7,333,183,402,738,910,000 | 33.351145 | 107 | 0.586222 | false |
Y3K/django
|
tests/multiple_database/routers.py
|
379
|
1927
|
from __future__ import unicode_literals
from django.db import DEFAULT_DB_ALIAS
class TestRouter(object):
"""
Vaguely behave like primary/replica, but the databases aren't assumed to
propagate changes.
"""
def db_for_read(self, model, instance=None, **hints):
if instance:
return instance._state.db or 'other'
return 'other'
def db_for_write(self, model, **hints):
return DEFAULT_DB_ALIAS
def allow_relation(self, obj1, obj2, **hints):
return obj1._state.db in ('default', 'other') and obj2._state.db in ('default', 'other')
def allow_migrate(self, db, app_label, **hints):
return True
class AuthRouter(object):
"""
Control all database operations on models in the contrib.auth application.
"""
def db_for_read(self, model, **hints):
"Point all read operations on auth models to 'default'"
if model._meta.app_label == 'auth':
# We use default here to ensure we can tell the difference
# between a read request and a write request for Auth objects
return 'default'
return None
def db_for_write(self, model, **hints):
"Point all operations on auth models to 'other'"
if model._meta.app_label == 'auth':
return 'other'
return None
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a model in Auth is involved"
if obj1._meta.app_label == 'auth' or obj2._meta.app_label == 'auth':
return True
return None
def allow_migrate(self, db, app_label, **hints):
"Make sure the auth app only appears on the 'other' db"
if app_label == 'auth':
return db == 'other'
return None
class WriteRouter(object):
# A router that only expresses an opinion on writes
def db_for_write(self, model, **hints):
return 'writer'
|
bsd-3-clause
| -6,174,300,085,464,104,000 | 991,773,956,319,962,800 | 30.080645 | 96 | 0.613908 | false |
trustedanalytics/spark-tk
|
regression-tests/sparktkregtests/testcases/frames/boxcox_test.py
|
12
|
5074
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test frame.box_cox() and frame.reverse_box_cox()"""
import unittest
from sparktkregtests.lib import sparktk_test
class BoxCoxTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(BoxCoxTest, self).setUp()
dataset =\
[[5.8813080107727425], [8.9771372790941797], [8.9153072947470804],
[8.1583747730768401], [0.35889585616853292]]
schema = [("y", float)]
self.frame = self.context.frame.create(dataset, schema=schema)
def test_wt_default(self):
""" Test behaviour for default params, lambda = 0 """
self.frame.box_cox("y")
actual = self.frame.to_pandas()["y_lambda_0.0"].tolist()
expected =\
[1.7717791879837133, 2.1946810429706676,
2.1877697201262163, 2.0990449791729704, -1.0247230268174008]
self.assertItemsEqual(actual, expected)
def test_lambda(self):
""" Test wt for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
actual = self.frame.to_pandas()["y_lambda_0.3"].tolist()
expected =\
[2.3384668540844573, 3.1056915770236082,
3.0923547540771801, 2.9235756971904037, -0.88218677941017198]
self.assertItemsEqual(actual, expected)
def test_reverse_default(self):
""" Test reverse transform for default lambda = 0 """
self.frame.box_cox("y")
self.frame.reverse_box_cox("y_lambda_0.0",
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727425, 8.9771372790941815,
8.9153072947470804, 8.1583747730768401, 0.35889585616853298]
self.assertItemsEqual(actual, expected)
def test_reverse_lambda(self):
""" Test reverse transform for lambda = 0.3 """
self.frame.box_cox("y", 0.3)
self.frame.reverse_box_cox("y_lambda_0.3", 0.3,
reverse_box_cox_column_name="reverse")
actual = self.frame.to_pandas()["reverse"].tolist()
expected =\
[5.8813080107727442, 8.9771372790941797,
8.9153072947470822, 8.1583747730768419,
0.35889585616853298]
self.assertItemsEqual(actual, expected)
@unittest.skip("req not clear")
def test_lambda_negative(self):
""" Test box cox for lambda -1 """
self.frame.box_cox("y", -1)
actual = self.frame.to_pandas()["y_lambda_-1.0"].tolist()
expected =\
[0.82996979614597488, 0.88860591423406388,
0.88783336715839256, 0.87742656744575354,
-1.7863236167608822]
self.assertItemsEqual(actual, expected)
def test_existing_boxcox_column(self):
""" Test behavior for existing boxcox column """
self.frame.box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.box_cox("y", 0.3)
def test_existing_reverse_column(self):
""" Test behavior for existing reverse boxcox column """
self.frame.reverse_box_cox("y", 0.3)
with self.assertRaisesRegexp(
Exception, "duplicate column name"):
self.frame.reverse_box_cox("y", 0.3)
@unittest.skip("Req not clear")
def test_negative_col_positive_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
frame.box_cox("y", 1)
actual = frame.to_pandas()["y_lambda_1.0"].tolist()
expected = [-2.0, -3.0, 0]
self.assertItemsEqual(actual, expected)
@unittest.skip("Req not clear")
def test_negative_col_frational_lambda(self):
"""Test behaviour for negative input column and negative lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y", 0.1)
@unittest.skip("Req not clear")
def test_negative_col_zero_lambda(self):
"""Test behaviour for negative input column and positive lambda"""
frame = self.context.frame.create([[-1], [-2], [1]], [("y", float)])
with self.assertRaises(Exception):
frame.box_cox("y")
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| -6,267,059,840,769,244,000 | 1,274,083,004,297,052,200 | 34.432624 | 78 | 0.617094 | false |
vmagamedov/kinko
|
kinko/compile/incremental_dom.py
|
1
|
9185
|
from json.encoder import encode_basestring
from slimit import ast as js
from ..types import NamedArgMeta, VarArgsMeta, VarNamedArgsMeta
from ..utils import split_args, normalize_args
from ..nodes import Tuple, Symbol, Placeholder, String, Number
from ..utils import Environ
from ..compat import text_type
from ..checker import HTML_TAG_TYPE, GET_TYPE, IF1_TYPE, IF2_TYPE, JOIN1_TYPE
from ..checker import JOIN2_TYPE, get_type, DEF_TYPE, EACH_TYPE
from ..checker import returns_markup, contains_markup
from ..constant import SELF_CLOSING_ELEMENTS
def _str(value):
return js.String(encode_basestring(value))
def _text(value):
return js.ExprStatement(js.FunctionCall(js.Identifier('text'), [value]))
def _ctx_var(value):
return js.BracketAccessor(js.Identifier('ctx'), _str(value))
def _yield_writes(env, node):
if returns_markup(node):
for item in compile_stmt(env, node):
yield item
else:
yield _text(compile_expr(env, node))
def _el_open(tag, key=None, attrs=None, self_close=False):
fn = 'elementVoid' if self_close else 'elementOpen'
return js.ExprStatement(js.FunctionCall(js.Identifier(fn), [
_str(tag),
_str(key or ''),
js.Array([]),
js.Array(attrs or []),
]))
def _el_close(tag):
return js.ExprStatement(js.FunctionCall(js.Identifier('elementClose'),
[_str(tag)]))
def compile_if1_expr(env, node, test, then_):
test_expr = compile_expr(env, test)
then_expr = compile_expr(env, then_)
else_expr = js.Null(None)
return js.Conditional(test_expr, then_expr, else_expr)
def compile_if2_expr(env, node, test, then_, else_):
test_expr = compile_expr(env, test)
then_expr = compile_expr(env, then_)
else_expr = compile_expr(env, else_)
return js.Conditional(test_expr, then_expr, else_expr)
def compile_get_expr(env, node, obj, attr):
obj_expr = compile_expr(env, obj)
return js.BracketAccessor(obj_expr, _str(attr.name))
def compile_func_expr(env, node, *norm_args):
sym, args = node.values[0], node.values[1:]
pos_args, kw_args = split_args(args)
name_expr = js.DotAccessor(js.Identifier('builtins'),
js.Identifier(sym.name))
compiled_args = [compile_expr(env, value)
for value in pos_args]
compiled_args.append(js.Object([
js.Label(_str(text_type(key)), compile_expr(env, value))
for key, value in kw_args.items()
]))
return js.FunctionCall(name_expr, compiled_args)
EXPR_TYPES = {
IF1_TYPE: compile_if1_expr,
IF2_TYPE: compile_if2_expr,
GET_TYPE: compile_get_expr,
}
def compile_expr(env, node):
if isinstance(node, Tuple):
sym, args = node.values[0], node.values[1:]
assert sym.__type__
pos_args, kw_args = split_args(args)
norm_args = normalize_args(sym.__type__, pos_args, kw_args)
proc = EXPR_TYPES.get(sym.__type__, compile_func_expr)
return proc(env, node, *norm_args)
elif isinstance(node, Symbol):
if node.name in env:
return js.Identifier(env[node.name])
else:
return _ctx_var(node.name)
elif isinstance(node, Placeholder):
return js.Identifier(env[node.name])
elif isinstance(node, String):
return _str(text_type(node.value))
elif isinstance(node, Number):
return js.Number(text_type(node.value))
else:
raise TypeError('Unable to compile {!r} of type {!r} as expression'
.format(node, type(node)))
def compile_def_stmt(env, node, name_sym, body):
args = [a.__arg_name__ for a in get_type(node).__args__]
with env.push(args):
yield js.FuncDecl(js.Identifier(name_sym.name),
[js.Identifier(env[arg]) for arg in args],
list(compile_stmt(env, body)))
def compile_html_tag_stmt(env, node, attrs, body):
tag_name = node.values[0].name
self_closing = tag_name in SELF_CLOSING_ELEMENTS
compiled_attrs = []
for key, value in attrs.items():
compiled_attrs.append(_str(text_type(key)))
compiled_attrs.append(compile_expr(env, value))
yield _el_open(tag_name, None, compiled_attrs,
self_close=self_closing)
if self_closing:
assert not body, ('Positional args are not expected in the '
'self-closing elements')
return
for arg in body:
for item in _yield_writes(env, arg):
yield item
yield _el_close(tag_name)
def compile_if1_stmt(env, node, test, then_):
test_expr = compile_expr(env, test)
yield js.If(test_expr, js.Block(list(_yield_writes(env, then_))), None)
def compile_if2_stmt(env, node, test, then_, else_):
test_expr = compile_expr(env, test)
yield js.If(test_expr, js.Block(list(_yield_writes(env, then_))),
js.Block(list(_yield_writes(env, else_))))
def compile_each_stmt(env, node, var, col, body):
col_expr = compile_expr(env, col)
with env.push(['_i']):
i_expr = js.Identifier(env['_i'])
with env.push([var.name]):
var_stmt = js.VarStatement([
js.Assign('=', js.Identifier(env[var.name]),
js.BracketAccessor(col_expr, i_expr)),
])
yield js.For(
js.VarStatement([js.VarDecl(i_expr, js.Number('0'))]),
js.BinOp('<', i_expr,
js.DotAccessor(col_expr,
js.Identifier('length'))),
js.UnaryOp('++', i_expr, postfix=True),
js.Block([var_stmt] + list(compile_stmt(env, body))),
)
def compile_join1_stmt(env, node, col):
for value in col.values:
for item in _yield_writes(env, value):
yield item
def compile_join2_stmt(env, node, sep, col):
for i, value in enumerate(col.values):
if i:
yield _text(_str(sep.value))
for item in _yield_writes(env, value):
yield item
STMT_TYPES = {
DEF_TYPE: compile_def_stmt,
HTML_TAG_TYPE: compile_html_tag_stmt,
IF1_TYPE: compile_if1_stmt,
IF2_TYPE: compile_if2_stmt,
EACH_TYPE: compile_each_stmt,
JOIN1_TYPE: compile_join1_stmt,
JOIN2_TYPE: compile_join2_stmt,
}
def compile_func_arg(env, type_, value):
if contains_markup(type_):
return js.FuncExpr(None, [], list(compile_stmt(env, value)))
else:
return compile_expr(env, value)
def compile_func_stmt(env, node, *norm_args):
sym = node.values[0]
arg_exprs = []
for arg_type, arg_value in zip(sym.__type__.__args__, norm_args):
if isinstance(arg_type, NamedArgMeta):
type_ = arg_type.__arg_type__
arg = compile_func_arg(env, type_, arg_value)
elif isinstance(arg_type, VarArgsMeta):
type_ = arg_type.__arg_type__
arg = js.Array([compile_func_arg(env, type_, v)
for v in arg_value])
elif isinstance(arg_type, VarNamedArgsMeta):
type_ = arg_type.__arg_type__
arg = js.Object([js.Label(_str(k), compile_func_arg(env, type_, v))
for k, v in arg_value.items()])
else:
arg = compile_func_arg(env, arg_type, arg_value)
arg_exprs.append(arg)
if sym.ns:
if sym.ns == '.':
name_expr = js.Identifier(sym.rel)
else:
name_expr = js.DotAccessor(js.Identifier(sym.ns),
js.Identifier(sym.rel))
else:
name_expr = js.DotAccessor(js.Identifier('builtins'),
js.Identifier(sym.name))
yield js.ExprStatement(js.FunctionCall(name_expr, arg_exprs))
def compile_stmt(env, node):
if isinstance(node, Tuple):
sym, args = node.values[0], node.values[1:]
assert sym.__type__
pos_args, kw_args = split_args(args)
norm_args = normalize_args(sym.__type__, pos_args, kw_args)
proc = STMT_TYPES.get(sym.__type__, compile_func_stmt)
for item in proc(env, node, *norm_args):
yield item
elif isinstance(node, Symbol):
if node.name in env:
yield _text(js.Identifier(env[node.name]))
else:
yield _text(_ctx_var(node.name))
elif isinstance(node, Placeholder):
yield js.ExprStatement(js.FunctionCall(js.Identifier(env[node.name]),
[]))
elif isinstance(node, String):
yield _text(js.String(node.value))
elif isinstance(node, Number):
yield _text(js.Number(node.value))
else:
raise TypeError('Unable to compile {!r} of type {!r} as statement'
.format(node, type(node)))
def compile_stmts(env, nodes):
for node in nodes:
for item in compile_stmt(env, node):
yield item
def compile_module(body):
env = Environ()
mod = js.Program(list(compile_stmts(env, body.values)))
return mod
def dumps(node):
return node.to_ecma() + '\n'
|
bsd-3-clause
| 2,259,233,165,926,284,500 | 1,367,898,165,341,492,200 | 30.135593 | 79 | 0.587371 | false |
mottosso/pyblish-magenta
|
pyblish_magenta/vendor/capture.py
|
1
|
15009
|
"""Maya Capture
Playblasting with independent viewport, camera and display options
"""
import re
import sys
import contextlib
from maya import cmds
version_info = (1, 1, 0)
__version__ = "%s.%s.%s" % version_info
__license__ = "MIT"
def capture(camera=None,
width=None,
height=None,
filename=None,
start_frame=None,
end_frame=None,
frame=None,
format='qt',
compression='h264',
off_screen=False,
viewer=True,
isolate=None,
maintain_aspect_ratio=True,
overwrite=False,
raw_frame_numbers=False,
camera_options=None,
viewport_options=None,
display_options=None,
complete_filename=None):
"""Playblast in an independent panel
Arguments:
camera (str, optional): Name of camera, defaults to "persp"
width (int, optional): Width of output in pixels
height (int, optional): Height of output in pixels
filename (str, optional): Name of output file. If
none is specified, no files are saved.
start_frame (float, optional): Defaults to current start frame.
end_frame (float, optional): Defaults to current end frame.
frame (float or tuple, optional): A single frame or list of frames.
Use this to capture a single frame or an arbitrary sequence of
frames.
format (str, optional): Name of format, defaults to "qt".
compression (str, optional): Name of compression, defaults to "h264"
off_screen (bool, optional): Whether or not to playblast off screen
viewer (bool, optional): Display results in native player
isolate (list): List of nodes to isolate upon capturing
maintain_aspect_ratio (bool, optional): Modify height in order to
maintain aspect ratio.
overwrite (bool, optional): Whether or not to overwrite if file
already exists. If disabled and file exists and error will be
raised.
raw_frame_numbers (bool, optional): Whether or not to use the exact
frame numbers from the scene or capture to a sequence starting at
zero. Defaults to False. When set to True `viewer` can't be used
and will be forced to False.
camera_options (CameraOptions, optional): Supplied camera options,
using :class:`CameraOptions`
viewport_options (ViewportOptions, optional): Supplied viewport
options, using :class:`ViewportOptions`
display_options (DisplayOptions, optional): Supplied display
options, using :class:`DisplayOptions`
complete_filename (str, optional): Exact name of output file. Use this
to override the output of `filename` so it excludes frame padding.
Example:
>>> # Launch default capture
>>> capture()
>>> # Launch capture with custom viewport settings
>>> view_opts = ViewportOptions()
>>> view_opts.grid = False
>>> view_opts.polymeshes = True
>>> view_opts.displayAppearance = "wireframe"
>>> cam_opts = CameraOptions()
>>> cam_opts.displayResolution = True
>>> capture('myCamera', 800, 600,
... viewport_options=view_opts,
... camera_options=cam_opts)
"""
camera = camera or "persp"
# Ensure camera exists
if not cmds.objExists(camera):
raise RuntimeError("Camera does not exist: {0}".format(camera))
width = width or cmds.getAttr("defaultResolution.width")
height = height or cmds.getAttr("defaultResolution.height")
if maintain_aspect_ratio:
ratio = cmds.getAttr("defaultResolution.deviceAspectRatio")
height = width / ratio
start_frame = start_frame or cmds.playbackOptions(minTime=True, query=True)
end_frame = end_frame or cmds.playbackOptions(maxTime=True, query=True)
# We need to wrap `completeFilename`, otherwise even when None is provided
# it will use filename as the exact name. Only when lacking as argument
# does it function correctly.
playblast_kwargs = dict()
if complete_filename:
playblast_kwargs['completeFilename'] = complete_filename
if frame:
playblast_kwargs['frame'] = frame
# (#21) Bugfix: `maya.cmds.playblast` suffers from undo bug where it
# always sets the currentTime to frame 1. By setting currentTime before
# the playblast call it'll undo correctly.
cmds.currentTime(cmds.currentTime(q=1))
padding = 10 # Extend panel to accommodate for OS window manager
with _independent_panel(width=width + padding,
height=height + padding) as panel:
cmds.setFocus(panel)
with contextlib.nested(
_maintain_camera(panel, camera),
_applied_viewport_options(viewport_options, panel),
_applied_camera_options(camera_options, panel, camera),
_applied_display_options(display_options),
_isolated_nodes(isolate, panel),
_maintained_time()):
output = cmds.playblast(
compression=compression,
format=format,
percent=100,
quality=100,
viewer=viewer,
startTime=start_frame,
endTime=end_frame,
offScreen=off_screen,
forceOverwrite=overwrite,
filename=filename,
widthHeight=[width, height],
rawFrameNumbers=raw_frame_numbers,
**playblast_kwargs)
return output
def snap(*args, **kwargs):
"""Single frame playblast in an independent panel.
The arguments of `capture` are all valid here as well, except for
`start_frame` and `end_frame`.
Arguments:
frame (float, optional): The frame to snap. If not provided current
frame is used.
clipboard (bool, optional): Whether to add the output image to the
global clipboard. This allows to easily paste the snapped image
into another application, eg. into Photoshop.
Keywords:
See `capture`.
"""
# capture single frame
frame = kwargs.pop('frame', cmds.currentTime(q=1))
kwargs['start_frame'] = frame
kwargs['end_frame'] = frame
kwargs['frame'] = frame
if not isinstance(frame, (int, float)):
raise TypeError("frame must be a single frame (integer or float). "
"Use `capture()` for sequences.")
# override capture defaults
format = kwargs.pop('format', "image")
compression = kwargs.pop('compression', "png")
viewer = kwargs.pop('viewer', False)
raw_frame_numbers = kwargs.pop('raw_frame_numbers', True)
kwargs['compression'] = compression
kwargs['format'] = format
kwargs['viewer'] = viewer
kwargs['raw_frame_numbers'] = raw_frame_numbers
# pop snap only keyword arguments
clipboard = kwargs.pop('clipboard', False)
# perform capture
output = capture(*args, **kwargs)
def replace(m):
"""Substitute # with frame number"""
return str(int(frame)).zfill(len(m.group()))
output = re.sub("#+", replace, output)
# add image to clipboard
if clipboard:
_image_to_clipboard(output)
return output
class ViewportOptions:
"""Viewport options for :func:`capture`"""
useDefaultMaterial = False
wireframeOnShaded = False
displayAppearance = 'smoothShaded'
selectionHiliteDisplay = False
headsUpDisplay = True
# Visibility flags
nurbsCurves = False
nurbsSurfaces = False
polymeshes = True
subdivSurfaces = False
cameras = False
lights = False
grid = False
joints = False
ikHandles = False
deformers = False
dynamics = False
fluids = False
hairSystems = False
follicles = False
nCloths = False
nParticles = False
nRigids = False
dynamicConstraints = False
locators = False
manipulators = False
dimensions = False
handles = False
pivots = False
textures = False
strokes = False
class CameraOptions:
"""Camera settings for :func:`capture`
Camera options are applied to the specified camera and
then reverted once the capture is complete.
"""
displayGateMask = False
displayResolution = False
displayFilmGate = False
displayFieldChart = False
displaySafeAction = False
displaySafeTitle = False
displayFilmPivot = False
displayFilmOrigin = False
overscan = 1.0
class DisplayOptions:
"""Display options for :func:`capture`
Use this struct for background color, anti-alias and other
display-related options.
"""
displayGradient = True
background = (0.631, 0.631, 0.631)
backgroundTop = (0.535, 0.617, 0.702)
backgroundBottom = (0.052, 0.052, 0.052)
def _parse_options(options):
"""Return dictionary of properties from option-objects"""
opts = dict()
for attr in dir(options):
if attr.startswith("__"):
continue
opts[attr] = getattr(options, attr)
return opts
@contextlib.contextmanager
def _independent_panel(width, height):
"""Create capture-window context without decorations
Arguments:
width (int): Width of panel
height (int): Height of panel
Example:
>>> with _independent_panel(800, 600):
... cmds.capture()
"""
# center panel on screen
screen_width, screen_height = _get_screen_size()
topLeft = [int((screen_height-height)/2.0),
int((screen_width-width)/2.0)]
window = cmds.window(width=width,
height=height,
topLeftCorner=topLeft,
menuBarVisible=False,
titleBar=False)
cmds.paneLayout()
panel = cmds.modelPanel(menuBarVisible=False,
label='CapturePanel')
# Hide icons under panel menus
bar_layout = cmds.modelPanel(panel, q=True, barLayout=True)
cmds.frameLayout(bar_layout, e=True, collapse=True)
cmds.showWindow(window)
# Set the modelEditor of the modelPanel as the active view so it takes
# the playback focus. Does seem redundant with the `refresh` added in.
editor = cmds.modelPanel(panel, query=True, modelEditor=True)
cmds.modelEditor(editor, e=1, activeView=True)
# Force a draw refresh of Maya so it keeps focus on the new panel
# This focus is required to force preview playback in the independent panel
cmds.refresh(force=True)
try:
yield panel
finally:
# Delete the panel to fix memory leak (about 5 mb per capture)
cmds.deleteUI(panel, panel=True)
cmds.deleteUI(window)
@contextlib.contextmanager
def _applied_viewport_options(options, panel):
"""Context manager for applying `options` to `panel`"""
options = options or ViewportOptions()
options = _parse_options(options)
cmds.modelEditor(panel,
edit=True,
allObjects=False,
grid=False,
manipulators=False)
cmds.modelEditor(panel, edit=True, **options)
yield
@contextlib.contextmanager
def _applied_camera_options(options, panel, camera):
"""Context manager for applying `options` to `camera`"""
options = options or CameraOptions()
options = _parse_options(options)
old_options = dict()
for opt in options:
try:
old_options[opt] = cmds.getAttr(camera + "." + opt)
except:
sys.stderr.write("Could not get camera attribute "
"for capture: %s" % opt)
delattr(options, opt)
for opt, value in options.iteritems():
cmds.setAttr(camera + "." + opt, value)
try:
yield
finally:
if old_options:
for opt, value in old_options.iteritems():
cmds.setAttr(camera + "." + opt, value)
@contextlib.contextmanager
def _applied_display_options(options):
"""Context manager for setting background color display options."""
options = options or DisplayOptions()
colors = ['background', 'backgroundTop', 'backgroundBottom']
preferences = ['displayGradient']
# Store current settings
original = {}
for color in colors:
original[color] = cmds.displayRGBColor(color, query=True) or []
for preference in preferences:
original[preference] = cmds.displayPref(query=True, **{preference: True})
# Apply settings
for color in colors:
value = getattr(options, color)
cmds.displayRGBColor(color, *value)
for preference in preferences:
value = getattr(options, preference)
cmds.displayPref(**{preference: value})
try:
yield
finally:
# Restore original settings
for color in colors:
cmds.displayRGBColor(color, *original[color])
for preference in preferences:
cmds.displayPref(**{preference: original[preference]})
@contextlib.contextmanager
def _isolated_nodes(nodes, panel):
"""Context manager for isolating `nodes` in `panel`"""
if nodes is not None:
cmds.isolateSelect(panel, state=True)
for obj in nodes:
cmds.isolateSelect(panel, addDagObject=obj)
yield
@contextlib.contextmanager
def _maintained_time():
"""Context manager for preserving (resetting) the time after the context"""
current_time = cmds.currentTime(query=1)
try:
yield
finally:
cmds.currentTime(current_time)
@contextlib.contextmanager
def _maintain_camera(panel, camera):
state = {}
if not _in_standalone():
cmds.lookThru(panel, camera)
else:
state = dict((camera, cmds.getAttr(camera + ".rnd"))
for camera in cmds.ls(type="camera"))
cmds.setAttr(camera + ".rnd", True)
try:
yield
finally:
for camera, renderable in state.iteritems():
cmds.setAttr(camera + ".rnd", renderable)
def _image_to_clipboard(path):
"""Copies the image at path to the system's global clipboard."""
if _in_standalone():
raise Exception("Cannot copy to clipboard from Maya Standalone")
import PySide.QtGui
image = PySide.QtGui.QImage(path)
clipboard = PySide.QtGui.QApplication.clipboard()
clipboard.setImage(image, mode=PySide.QtGui.QClipboard.Clipboard)
def _get_screen_size():
"""Return available screen size without space occupied by taskbar"""
if _in_standalone():
return [0, 0]
import PySide.QtGui
rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)
return [rect.width(), rect.height()]
def _in_standalone():
return not hasattr(cmds, "about") or cmds.about(batch=True)
|
lgpl-3.0
| 5,175,430,323,990,665,000 | 7,350,294,231,026,901,000 | 30.399582 | 81 | 0.625225 | false |
embecosm/bachmann-gdb
|
gdb/python/lib/gdb/prompt.py
|
137
|
4210
|
# Extended prompt utilities.
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwdu()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def _prompt_n(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': _prompt_n,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
|
gpl-2.0
| 4,497,989,687,946,363,000 | 6,685,148,393,077,300,000 | 27.445946 | 75 | 0.57791 | false |
dfalt974/SickRage
|
lib/unidecode/x096.py
|
252
|
4610
|
data = (
'Fa ', # 0x00
'Ge ', # 0x01
'He ', # 0x02
'Kun ', # 0x03
'Jiu ', # 0x04
'Yue ', # 0x05
'Lang ', # 0x06
'Du ', # 0x07
'Yu ', # 0x08
'Yan ', # 0x09
'Chang ', # 0x0a
'Xi ', # 0x0b
'Wen ', # 0x0c
'Hun ', # 0x0d
'Yan ', # 0x0e
'E ', # 0x0f
'Chan ', # 0x10
'Lan ', # 0x11
'Qu ', # 0x12
'Hui ', # 0x13
'Kuo ', # 0x14
'Que ', # 0x15
'Ge ', # 0x16
'Tian ', # 0x17
'Ta ', # 0x18
'Que ', # 0x19
'Kan ', # 0x1a
'Huan ', # 0x1b
'Fu ', # 0x1c
'Fu ', # 0x1d
'Le ', # 0x1e
'Dui ', # 0x1f
'Xin ', # 0x20
'Qian ', # 0x21
'Wu ', # 0x22
'Yi ', # 0x23
'Tuo ', # 0x24
'Yin ', # 0x25
'Yang ', # 0x26
'Dou ', # 0x27
'E ', # 0x28
'Sheng ', # 0x29
'Ban ', # 0x2a
'Pei ', # 0x2b
'Keng ', # 0x2c
'Yun ', # 0x2d
'Ruan ', # 0x2e
'Zhi ', # 0x2f
'Pi ', # 0x30
'Jing ', # 0x31
'Fang ', # 0x32
'Yang ', # 0x33
'Yin ', # 0x34
'Zhen ', # 0x35
'Jie ', # 0x36
'Cheng ', # 0x37
'E ', # 0x38
'Qu ', # 0x39
'Di ', # 0x3a
'Zu ', # 0x3b
'Zuo ', # 0x3c
'Dian ', # 0x3d
'Ling ', # 0x3e
'A ', # 0x3f
'Tuo ', # 0x40
'Tuo ', # 0x41
'Po ', # 0x42
'Bing ', # 0x43
'Fu ', # 0x44
'Ji ', # 0x45
'Lu ', # 0x46
'Long ', # 0x47
'Chen ', # 0x48
'Xing ', # 0x49
'Duo ', # 0x4a
'Lou ', # 0x4b
'Mo ', # 0x4c
'Jiang ', # 0x4d
'Shu ', # 0x4e
'Duo ', # 0x4f
'Xian ', # 0x50
'Er ', # 0x51
'Gui ', # 0x52
'Yu ', # 0x53
'Gai ', # 0x54
'Shan ', # 0x55
'Xun ', # 0x56
'Qiao ', # 0x57
'Xing ', # 0x58
'Chun ', # 0x59
'Fu ', # 0x5a
'Bi ', # 0x5b
'Xia ', # 0x5c
'Shan ', # 0x5d
'Sheng ', # 0x5e
'Zhi ', # 0x5f
'Pu ', # 0x60
'Dou ', # 0x61
'Yuan ', # 0x62
'Zhen ', # 0x63
'Chu ', # 0x64
'Xian ', # 0x65
'Tou ', # 0x66
'Nie ', # 0x67
'Yun ', # 0x68
'Xian ', # 0x69
'Pei ', # 0x6a
'Pei ', # 0x6b
'Zou ', # 0x6c
'Yi ', # 0x6d
'Dui ', # 0x6e
'Lun ', # 0x6f
'Yin ', # 0x70
'Ju ', # 0x71
'Chui ', # 0x72
'Chen ', # 0x73
'Pi ', # 0x74
'Ling ', # 0x75
'Tao ', # 0x76
'Xian ', # 0x77
'Lu ', # 0x78
'Sheng ', # 0x79
'Xian ', # 0x7a
'Yin ', # 0x7b
'Zhu ', # 0x7c
'Yang ', # 0x7d
'Reng ', # 0x7e
'Shan ', # 0x7f
'Chong ', # 0x80
'Yan ', # 0x81
'Yin ', # 0x82
'Yu ', # 0x83
'Ti ', # 0x84
'Yu ', # 0x85
'Long ', # 0x86
'Wei ', # 0x87
'Wei ', # 0x88
'Nie ', # 0x89
'Dui ', # 0x8a
'Sui ', # 0x8b
'An ', # 0x8c
'Huang ', # 0x8d
'Jie ', # 0x8e
'Sui ', # 0x8f
'Yin ', # 0x90
'Gai ', # 0x91
'Yan ', # 0x92
'Hui ', # 0x93
'Ge ', # 0x94
'Yun ', # 0x95
'Wu ', # 0x96
'Wei ', # 0x97
'Ai ', # 0x98
'Xi ', # 0x99
'Tang ', # 0x9a
'Ji ', # 0x9b
'Zhang ', # 0x9c
'Dao ', # 0x9d
'Ao ', # 0x9e
'Xi ', # 0x9f
'Yin ', # 0xa0
'[?] ', # 0xa1
'Rao ', # 0xa2
'Lin ', # 0xa3
'Tui ', # 0xa4
'Deng ', # 0xa5
'Pi ', # 0xa6
'Sui ', # 0xa7
'Sui ', # 0xa8
'Yu ', # 0xa9
'Xian ', # 0xaa
'Fen ', # 0xab
'Ni ', # 0xac
'Er ', # 0xad
'Ji ', # 0xae
'Dao ', # 0xaf
'Xi ', # 0xb0
'Yin ', # 0xb1
'E ', # 0xb2
'Hui ', # 0xb3
'Long ', # 0xb4
'Xi ', # 0xb5
'Li ', # 0xb6
'Li ', # 0xb7
'Li ', # 0xb8
'Zhui ', # 0xb9
'He ', # 0xba
'Zhi ', # 0xbb
'Zhun ', # 0xbc
'Jun ', # 0xbd
'Nan ', # 0xbe
'Yi ', # 0xbf
'Que ', # 0xc0
'Yan ', # 0xc1
'Qian ', # 0xc2
'Ya ', # 0xc3
'Xiong ', # 0xc4
'Ya ', # 0xc5
'Ji ', # 0xc6
'Gu ', # 0xc7
'Huan ', # 0xc8
'Zhi ', # 0xc9
'Gou ', # 0xca
'Jun ', # 0xcb
'Ci ', # 0xcc
'Yong ', # 0xcd
'Ju ', # 0xce
'Chu ', # 0xcf
'Hu ', # 0xd0
'Za ', # 0xd1
'Luo ', # 0xd2
'Yu ', # 0xd3
'Chou ', # 0xd4
'Diao ', # 0xd5
'Sui ', # 0xd6
'Han ', # 0xd7
'Huo ', # 0xd8
'Shuang ', # 0xd9
'Guan ', # 0xda
'Chu ', # 0xdb
'Za ', # 0xdc
'Yong ', # 0xdd
'Ji ', # 0xde
'Xi ', # 0xdf
'Chou ', # 0xe0
'Liu ', # 0xe1
'Li ', # 0xe2
'Nan ', # 0xe3
'Xue ', # 0xe4
'Za ', # 0xe5
'Ji ', # 0xe6
'Ji ', # 0xe7
'Yu ', # 0xe8
'Yu ', # 0xe9
'Xue ', # 0xea
'Na ', # 0xeb
'Fou ', # 0xec
'Se ', # 0xed
'Mu ', # 0xee
'Wen ', # 0xef
'Fen ', # 0xf0
'Pang ', # 0xf1
'Yun ', # 0xf2
'Li ', # 0xf3
'Li ', # 0xf4
'Ang ', # 0xf5
'Ling ', # 0xf6
'Lei ', # 0xf7
'An ', # 0xf8
'Bao ', # 0xf9
'Meng ', # 0xfa
'Dian ', # 0xfb
'Dang ', # 0xfc
'Xing ', # 0xfd
'Wu ', # 0xfe
'Zhao ', # 0xff
)
|
gpl-3.0
| -4,134,706,003,999,951,000 | -4,282,919,073,919,835,000 | 16.868217 | 20 | 0.386985 | false |
xzYue/odoo
|
openerp/tools/image.py
|
172
|
10660
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from PIL import Image
from PIL import ImageEnhance
from random import randint
# ----------------------------------------
# Image resizing
# ----------------------------------------
def image_resize_image(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=False):
""" Function to resize an image. The image will be resized to the given
size, while keeping the aspect ratios, and holes in the image will be
filled with transparent background. The image will not be stretched if
smaller than the expected size.
Steps of the resizing:
- Compute width and height if not specified.
- if avoid_if_small: if both image sizes are smaller than the requested
sizes, the original image is returned. This is used to avoid adding
transparent content around images that we do not want to alter but
just resize if too big. This is used for example when storing images
in the 'image' field: we keep the original image, resized to a maximal
size, without adding transparent content around it if smaller.
- create a thumbnail of the source image through using the thumbnail
function. Aspect ratios are preserved when using it. Note that if the
source image is smaller than the expected size, it will not be
extended, but filled to match the size.
- create a transparent background that will hold the final image.
- paste the thumbnail on the transparent background and center it.
:param base64_source: base64-encoded version of the source
image; if False, returns False
:param size: 2-tuple(width, height). A None value for any of width or
height mean an automatically computed value based respectivelly
on height or width of the source image.
:param encoding: the output encoding
:param filetype: the output filetype, by default the source image's
:type filetype: str, any PIL image format (supported for creation)
:param avoid_if_small: do not resize if image height and width
are smaller than the expected size.
"""
if not base64_source:
return False
if size == (None, None):
return base64_source
image_stream = StringIO.StringIO(base64_source.decode(encoding))
image = Image.open(image_stream)
# store filetype here, as Image.new below will lose image.format
filetype = (filetype or image.format).upper()
filetype = {
'BMP': 'PNG',
}.get(filetype, filetype)
asked_width, asked_height = size
if asked_width is None:
asked_width = int(image.size[0] * (float(asked_height) / image.size[1]))
if asked_height is None:
asked_height = int(image.size[1] * (float(asked_width) / image.size[0]))
size = asked_width, asked_height
# check image size: do not create a thumbnail if avoiding smaller images
if avoid_if_small and image.size[0] <= size[0] and image.size[1] <= size[1]:
return base64_source
if image.size != size:
image = image_resize_and_sharpen(image, size)
if image.mode not in ["1", "L", "P", "RGB", "RGBA"]:
image = image.convert("RGB")
background_stream = StringIO.StringIO()
image.save(background_stream, filetype)
return background_stream.getvalue().encode(encoding)
def image_resize_and_sharpen(image, size, preserve_aspect_ratio=False, factor=2.0):
"""
Create a thumbnail by resizing while keeping ratio.
A sharpen filter is applied for a better looking result.
:param image: PIL.Image.Image()
:param size: 2-tuple(width, height)
:param preserve_aspect_ratio: boolean (default: False)
:param factor: Sharpen factor (default: 2.0)
"""
if image.mode != 'RGBA':
image = image.convert('RGBA')
image.thumbnail(size, Image.ANTIALIAS)
if preserve_aspect_ratio:
size = image.size
sharpener = ImageEnhance.Sharpness(image)
resized_image = sharpener.enhance(factor)
# create a transparent image for background and paste the image on it
image = Image.new('RGBA', size, (255, 255, 255, 0))
image.paste(resized_image, ((size[0] - resized_image.size[0]) / 2, (size[1] - resized_image.size[1]) / 2))
return image
def image_save_for_web(image, fp=None, format=None):
"""
Save image optimized for web usage.
:param image: PIL.Image.Image()
:param fp: File name or file object. If not specified, a bytestring is returned.
:param format: File format if could not be deduced from image.
"""
opt = dict(format=image.format or format)
if image.format == 'PNG':
opt.update(optimize=True)
alpha = False
if image.mode in ('RGBA', 'LA') or (image.mode == 'P' and 'transparency' in image.info):
alpha = image.convert('RGBA').split()[-1]
if image.mode != 'P':
# Floyd Steinberg dithering by default
image = image.convert('RGBA').convert('P', palette=Image.WEB, colors=256)
if alpha:
image.putalpha(alpha)
elif image.format == 'JPEG':
opt.update(optimize=True, quality=80)
if fp:
image.save(fp, **opt)
else:
img = StringIO.StringIO()
image.save(img, **opt)
return img.getvalue()
def image_resize_image_big(base64_source, size=(1024, 1024), encoding='base64', filetype=None, avoid_if_small=True):
""" Wrapper on image_resize_image, to resize images larger than the standard
'big' image size: 1024x1024px.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_medium(base64_source, size=(128, 128), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'medium'
image size: 180x180.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
def image_resize_image_small(base64_source, size=(64, 64), encoding='base64', filetype=None, avoid_if_small=False):
""" Wrapper on image_resize_image, to resize to the standard 'small' image
size: 50x50.
:param size, encoding, filetype, avoid_if_small: refer to image_resize_image
"""
return image_resize_image(base64_source, size, encoding, filetype, avoid_if_small)
# ----------------------------------------
# Colors
# ---------------------------------------
def image_colorize(original, randomize=True, color=(255, 255, 255)):
""" Add a color to the transparent background of an image.
:param original: file object on the original image file
:param randomize: randomize the background color
:param color: background-color, if not randomize
"""
# create a new image, based on the original one
original = Image.open(StringIO.StringIO(original))
image = Image.new('RGB', original.size)
# generate the background color, past it as background
if randomize:
color = (randint(32, 224), randint(32, 224), randint(32, 224))
image.paste(color)
image.paste(original, mask=original)
# return the new image
buffer = StringIO.StringIO()
image.save(buffer, 'PNG')
return buffer.getvalue()
# ----------------------------------------
# Misc image tools
# ---------------------------------------
def image_get_resized_images(base64_source, return_big=False, return_medium=True, return_small=True,
big_name='image', medium_name='image_medium', small_name='image_small',
avoid_resize_big=True, avoid_resize_medium=False, avoid_resize_small=False):
""" Standard tool function that returns a dictionary containing the
big, medium and small versions of the source image. This function
is meant to be used for the methods of functional fields for
models using images.
Default parameters are given to be used for the getter of functional
image fields, for example with res.users or res.partner. It returns
only image_medium and image_small values, to update those fields.
:param base64_source: base64-encoded version of the source
image; if False, all returnes values will be False
:param return_{..}: if set, computes and return the related resizing
of the image
:param {..}_name: key of the resized image in the return dictionary;
'image', 'image_medium' and 'image_small' by default.
:param avoid_resize_[..]: see avoid_if_small parameter
:return return_dict: dictionary with resized images, depending on
previous parameters.
"""
return_dict = dict()
if return_big:
return_dict[big_name] = image_resize_image_big(base64_source, avoid_if_small=avoid_resize_big)
if return_medium:
return_dict[medium_name] = image_resize_image_medium(base64_source, avoid_if_small=avoid_resize_medium)
if return_small:
return_dict[small_name] = image_resize_image_small(base64_source, avoid_if_small=avoid_resize_small)
return return_dict
if __name__=="__main__":
import sys
assert len(sys.argv)==3, 'Usage to Test: image.py SRC.png DEST.png'
img = file(sys.argv[1],'rb').read().encode('base64')
new = image_resize_image(img, (128,100))
file(sys.argv[2], 'wb').write(new.decode('base64'))
|
agpl-3.0
| -3,303,518,001,994,972,700 | 346,061,229,121,050 | 43.978903 | 118 | 0.64531 | false |
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py
|
101
|
3046
|
"""
Unit tests for degree centrality.
"""
from nose.tools import *
import networkx as nx
class TestDegreeCentrality:
def __init__(self):
self.K = nx.krackhardt_kite_graph()
self.P3 = nx.path_graph(3)
self.K5 = nx.complete_graph(5)
F = nx.Graph() # Florentine families
F.add_edge('Acciaiuoli','Medici')
F.add_edge('Castellani','Peruzzi')
F.add_edge('Castellani','Strozzi')
F.add_edge('Castellani','Barbadori')
F.add_edge('Medici','Barbadori')
F.add_edge('Medici','Ridolfi')
F.add_edge('Medici','Tornabuoni')
F.add_edge('Medici','Albizzi')
F.add_edge('Medici','Salviati')
F.add_edge('Salviati','Pazzi')
F.add_edge('Peruzzi','Strozzi')
F.add_edge('Peruzzi','Bischeri')
F.add_edge('Strozzi','Ridolfi')
F.add_edge('Strozzi','Bischeri')
F.add_edge('Ridolfi','Tornabuoni')
F.add_edge('Tornabuoni','Guadagni')
F.add_edge('Albizzi','Ginori')
F.add_edge('Albizzi','Guadagni')
F.add_edge('Bischeri','Guadagni')
F.add_edge('Guadagni','Lamberteschi')
self.F = F
G = nx.DiGraph()
G.add_edge(0,5)
G.add_edge(1,5)
G.add_edge(2,5)
G.add_edge(3,5)
G.add_edge(4,5)
G.add_edge(5,6)
G.add_edge(5,7)
G.add_edge(5,8)
self.G = G
def test_degree_centrality_1(self):
d = nx.degree_centrality(self.K5)
exact = dict(zip(range(5), [1]*5))
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_degree_centrality_2(self):
d = nx.degree_centrality(self.P3)
exact = {0:0.5, 1:1, 2:0.5}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_degree_centrality_3(self):
d = nx.degree_centrality(self.K)
exact = {0:.444, 1:.444, 2:.333, 3:.667, 4:.333,
5:.556, 6:.556, 7:.333, 8:.222, 9:.111}
for n,dc in d.items():
assert_almost_equal(exact[n], float("%5.3f" % dc))
def test_degree_centrality_4(self):
d = nx.degree_centrality(self.F)
names = sorted(self.F.nodes())
dcs = [0.071, 0.214, 0.143, 0.214, 0.214, 0.071, 0.286,
0.071, 0.429, 0.071, 0.214, 0.214, 0.143, 0.286, 0.214]
exact = dict(zip(names, dcs))
for n,dc in d.items():
assert_almost_equal(exact[n], float("%5.3f" % dc))
def test_indegree_centrality(self):
d = nx.in_degree_centrality(self.G)
exact = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0,
5: 0.625, 6: 0.125, 7: 0.125, 8: 0.125}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
def test_outdegree_centrality(self):
d = nx.out_degree_centrality(self.G)
exact = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125,
4: 0.125, 5: 0.375, 6: 0.0, 7: 0.0, 8: 0.0}
for n,dc in d.items():
assert_almost_equal(exact[n], dc)
|
agpl-3.0
| -2,233,474,000,209,062,700 | 4,636,055,260,160,759,000 | 32.108696 | 70 | 0.526264 | false |
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_virtual_address.py
|
16
|
16948
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_virtual_address
short_description: Manage LTM virtual addresses on a BIG-IP.
description:
- Manage LTM virtual addresses on a BIG-IP.
version_added: "2.4"
options:
address:
description:
- Virtual address. This value cannot be modified after it is set.
required: True
aliases:
- name
netmask:
description:
- Netmask of the provided virtual address. This value cannot be
modified after it is set.
default: 255.255.255.255
connection_limit:
description:
- Specifies the number of concurrent connections that the system
allows on this virtual address.
arp_state:
description:
- Specifies whether the system accepts ARP requests. When (disabled),
specifies that the system does not accept ARP requests. Note that
both ARP and ICMP Echo must be disabled in order for forwarding
virtual servers using that virtual address to forward ICMP packets.
If (enabled), then the packets are dropped.
choices:
- enabled
- disabled
auto_delete:
description:
- Specifies whether the system automatically deletes the virtual
address with the deletion of the last associated virtual server.
When C(disabled), specifies that the system leaves the virtual
address even when all associated virtual servers have been deleted.
When creating the virtual address, the default value is C(enabled).
choices:
- enabled
- disabled
icmp_echo:
description:
- Specifies how the systems sends responses to (ICMP) echo requests
on a per-virtual address basis for enabling route advertisement.
When C(enabled), the BIG-IP system intercepts ICMP echo request
packets and responds to them directly. When C(disabled), the BIG-IP
system passes ICMP echo requests through to the backend servers.
When (selective), causes the BIG-IP system to internally enable or
disable responses based on virtual server state; C(when_any_available),
C(when_all_available, or C(always), regardless of the state of any
virtual servers.
choices:
- enabled
- disabled
- selective
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
advertise_route:
description:
- Specifies what routes of the virtual address the system advertises.
When C(when_any_available), advertises the route when any virtual
server is available. When C(when_all_available), advertises the
route when all virtual servers are available. When (always), always
advertises the route regardless of the virtual servers available.
choices:
- always
- when_all_available
- when_any_available
use_route_advertisement:
description:
- Specifies whether the system uses route advertisement for this
virtual address. When disabled, the system does not advertise
routes for this virtual address.
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host. This is as easy as pip
install netaddr.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
address: "10.10.10.10"
delegate_to: localhost
- name: Enable route advertisement on the virtual address
bigip_virtual_address:
server: "lb.mydomain.net"
user: "admin"
password: "secret"
state: "present"
address: "10.10.10.10"
use_route_advertisement: yes
delegate_to: localhost
'''
RETURN = '''
use_route_advertisement:
description: The new setting for whether to use route advertising or not.
returned: changed
type: bool
sample: true
auto_delete:
description: New setting for auto deleting virtual address.
returned: changed
type: string
sample: enabled
icmp_echo:
description: New ICMP echo setting applied to virtual address.
returned: changed
type: string
sample: disabled
connection_limit:
description: The new connection limit of the virtual address.
returned: changed
type: int
sample: 1000
netmask:
description: The netmask of the virtual address.
returned: created
type: int
sample: 2345
arp_state:
description: The new way the virtual address handles ARP requests.
returned: changed
type: string
sample: disabled
address:
description: The address of the virtual address.
returned: created
type: int
sample: 2345
state:
description: The new state of the virtual address.
returned: changed
type: string
sample: disabled
'''
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
class Parameters(AnsibleF5Parameters):
api_map = {
'routeAdvertisement': 'use_route_advertisement',
'autoDelete': 'auto_delete',
'icmpEcho': 'icmp_echo',
'connectionLimit': 'connection_limit',
'serverScope': 'advertise_route',
'mask': 'netmask',
'arp': 'arp_state'
}
updatables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'arp_state', 'enabled', 'advertise_route'
]
returnables = [
'use_route_advertisement', 'auto_delete', 'icmp_echo', 'connection_limit',
'netmask', 'arp_state', 'address', 'state'
]
api_attributes = [
'routeAdvertisement', 'autoDelete', 'icmpEcho', 'connectionLimit',
'advertiseRoute', 'arp', 'mask', 'enabled', 'serverScope'
]
@property
def advertise_route(self):
if self._values['advertise_route'] is None:
return None
elif self._values['advertise_route'] in ['any', 'when_any_available']:
return 'any'
elif self._values['advertise_route'] in ['all', 'when_all_available']:
return 'all'
elif self._values['advertise_route'] in ['none', 'always']:
return 'none'
@property
def connection_limit(self):
if self._values['connection_limit'] is None:
return None
return int(self._values['connection_limit'])
@property
def use_route_advertisement(self):
if self._values['use_route_advertisement'] is None:
return None
elif self._values['use_route_advertisement'] in BOOLEANS_TRUE:
return 'enabled'
elif self._values['use_route_advertisement'] == 'enabled':
return 'enabled'
else:
return 'disabled'
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return 'yes'
elif self._values['enabled'] in BOOLEANS_TRUE:
return 'yes'
elif self._values['state'] == 'disabled':
return 'no'
elif self._values['enabled'] in BOOLEANS_FALSE:
return 'no'
else:
return None
@property
def address(self):
if self._values['address'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['address'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'address' is not a valid IP address"
)
@property
def netmask(self):
if self._values['netmask'] is None:
return None
try:
ip = netaddr.IPAddress(self._values['netmask'])
return str(ip)
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'netmask' is not a valid IP address"
)
@property
def auto_delete(self):
if self._values['auto_delete'] is None:
return None
elif self._values['auto_delete'] in BOOLEANS_TRUE:
return True
elif self._values['auto_delete'] == 'enabled':
return True
else:
return False
@property
def state(self):
if self.enabled == 'yes' and self._values['state'] != 'present':
return 'enabled'
elif self.enabled == 'no':
return 'disabled'
else:
return self._values['state']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def exists(self):
result = self.client.api.tm.ltm.virtual_address_s.virtual_address.exists(
name=self.want.address,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if self.want.netmask is not None:
if self.have.netmask != self.want.netmask:
raise F5ModuleError(
"The netmask cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if self.want.address is not None:
if self.have.address != self.want.address:
raise F5ModuleError(
"The address cannot be changed. Delete and recreate"
"the virtual address if you need to do this."
)
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the virtual address")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.virtual_address_s.virtual_address.create(
name=self.want.address,
partition=self.want.partition,
address=self.want.address,
**params
)
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the virtual address")
return True
def remove_from_device(self):
resource = self.client.api.tm.ltm.virtual_address_s.virtual_address.load(
name=self.want.address,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
address=dict(
type='str',
required=True,
aliases=['name']
),
netmask=dict(
type='str',
default='255.255.255.255',
),
connection_limit=dict(
type='int'
),
arp_state=dict(
choices=['enabled', 'disabled'],
),
auto_delete=dict(
choices=['enabled', 'disabled'],
),
icmp_echo=dict(
choices=['enabled', 'disabled', 'selective'],
),
advertise_route=dict(
choices=['always', 'when_all_available', 'when_any_available'],
),
use_route_advertisement=dict(
type='bool'
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
bsd-3-clause
| 8,362,831,872,174,630,000 | 6,518,781,043,633,936,000 | 30.385185 | 83 | 0.600071 | false |
spektom/incubator-airflow
|
airflow/contrib/operators/sagemaker_endpoint_operator.py
|
5
|
1207
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_endpoint`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.sagemaker_endpoint import SageMakerEndpointOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.sagemaker_endpoint`.",
DeprecationWarning, stacklevel=2
)
|
apache-2.0
| -7,547,651,863,822,364,000 | -415,111,582,759,171,100 | 42.107143 | 105 | 0.779619 | false |
glenntanner3/devedeng
|
src/devedeng/interface_manager.py
|
4
|
25492
|
# Copyright 2014 (C) Raster Software Vigo (Sergio Costas)
#
# This file is part of DeVeDe-NG
#
# DeVeDe-NG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DeVeDe-NG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gi.repository import GObject,Gdk
class interface_manager(GObject.GObject):
""" This class allows to automatically generate variables for a GLADE interface,
set the widgets in the interface to their values, and copy the current values
in the widgets to the variables """
def __init__(self):
GObject.GObject.__init__(self)
self.interface_groups = {}
self.interface_toggles = []
self.interface_dualtoggles = []
self.interface_labels = []
self.interface_text = []
self.interface_show_hide = []
self.interface_enable_disable = []
self.interface_float_adjustments = []
self.interface_integer_adjustments = []
self.interface_lists = []
self.interface_colorbuttons = []
self.interface_fontbuttons = []
self.interface_filebuttons = []
self.interface_comboboxes = []
def add_group(self,group_name,radiobutton_list,default_value,callback = None):
""" Adds a group of radiobuttons and creates an internal variable with
the name group_name, setting it to default_value. The
value for the variable will be the name of the active
radiobutton """
if (default_value != None):
exec('self.'+group_name+' = "'+str(default_value)+'"')
else:
exec('self.'+group_name+' = None')
self.interface_groups[group_name] = ( radiobutton_list, callback )
def add_toggle(self,toggle_name,default_value,callback = None):
""" Adds an internal variable with the name toggle_name, linked to a widget
element with the same name (must be or inherint from Gtk.ToogleButton).
The default value can be True of False """
exec('self.'+toggle_name+' = '+str(default_value))
self.interface_toggles.append( (toggle_name, callback) )
def add_dualtoggle(self,toggle_name,toggle2,default_value,callback = None):
""" Adds an internal variable with the name toggle_name, linked to widget
elements with names toggle_nane and toggle2 (must be or inherint from Gtk.ToogleButton).
The default value can be True of False, with True being toggle_name active, and False
being toggle2 active """
exec('self.'+toggle_name+' = '+str(default_value))
self.interface_dualtoggles.append( (toggle_name, toggle2, callback) )
def add_text(self,text_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.TextEntry or a Gtk.Label).
The default value can be a text or None """
if (default_value != None):
exec('self.'+text_name+' = "'+str(default_value).replace('\"','\\"')+'"')
else:
exec('self.'+text_name+' = None')
self.interface_text.append( (text_name, callback) )
def add_label(self,text_name,default_value):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.TextEntry or a Gtk.Label).
The default value can be a text or None. This element is copied to the UI,
but is never updated from the UI if the user changes it """
exec('self.'+text_name+' = default_value')
self.interface_labels.append(text_name)
def add_integer_adjustment(self,adjustment_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.Adjustment).
The default value must be an integer """
exec('self.'+adjustment_name+' = '+str(default_value))
self.interface_integer_adjustments.append( (adjustment_name, callback) )
def add_float_adjustment(self,adjustment_name,default_value,callback = None):
""" Adds an internal variable with the name text_name, linked to an
element with the same name (must be a Gtk.Adjustment).
The default value must be an float """
exec('self.'+adjustment_name+' = '+str(default_value))
self.interface_float_adjustments.append( (adjustment_name, callback))
def add_list(self,liststore_name,callback = None):
""" Adds an internal variable with the name liststore_name, linked to
an element with the same name (must be a Gtk.ListStore). """
exec('self.'+liststore_name+' = []')
self.interface_lists.append( (liststore_name, callback ))
def add_colorbutton(self,colorbutton_name, default_value,callback = None):
""" Adds an internal variable with the name colorbutton_name, linked to an
element with the same name (must be a Gtk.ColorButton).
The default value must be a set with RGBA values """
exec('self.'+colorbutton_name+' = default_value')
self.interface_colorbuttons.append( (colorbutton_name, callback ))
def add_fontbutton(self,fontbutton_name, default_value, callback = None):
""" Adds an internal variable with the name fontbutton_name, linked to an
element with the same name (must be a Gtk.FontButton).
The default value must be a string with the font values """
exec('self.'+fontbutton_name+' = default_value')
self.interface_fontbuttons.append( (fontbutton_name, callback ))
def add_filebutton(self,filebutton_name, default_value, callback = None):
""" Adds an internal variable with the name filebutton_name, linked to an
element with the same name (must be a Gtk.FileButton).
The default value must be a string with the font values """
exec('self.'+filebutton_name+' = default_value')
self.interface_filebuttons.append( (filebutton_name, callback ) )
def add_combobox(self,combobox_name,values,default_value,callback = None):
""" Adds an internal variable with the name combobox_name, linked to an
element with the same name (must be a Gtk.Combobox).
The default value must be an integer with the entry selected """
exec('self.'+combobox_name+' = default_value')
self.interface_comboboxes.append ( (combobox_name, values, callback) )
def add_show_hide(self,element_name,to_show,to_hide):
""" Adds an element that can be active or inactive, and two lists of elements.
The first one contains elements that will be visible when the element is
active, and invisible when it is inactive, and the second one contains
elements that will be visible when the element is inactive, and
invisible when the element is active """
self.interface_show_hide.append([element_name, to_show, to_hide])
def add_enable_disable(self,element_name,to_enable,to_disable):
""" Adds an element that can be active or inactive, and two lists of elements.
The first one contains elements that will be enabled when the element is
active, and disabled when it is inactive, and the second one contains
elements that will be enabled when the element is inactive, and
disabled when the element is active """
self.interface_enable_disable.append([element_name, to_enable, to_disable])
def update_ui(self,builder):
""" Sets the value of the widgets in base of the internal variables """
for key in self.interface_groups:
obj = eval('self.'+key)
builder.get_object(obj).set_active(True)
callback = self.interface_groups[key][1]
if (callback != None):
for element in self.interface_groups[key][0]:
obj = builder.get_object(element)
obj.connect("toggled",callback)
for element in self.interface_toggles:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
obj.set_active(value)
callback = element[1]
if (callback != None):
obj.connect("toggled",callback)
for element in self.interface_dualtoggles:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
obj2 = builder.get_object(element[1])
if value:
obj.set_active(True)
else:
obj2.set_active(True)
callback = element[2]
if (callback != None):
obj.connect("toggled",callback)
for element in self.interface_text:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_text(value)
else:
obj.set_text("")
callback = element[1]
if (callback != None):
obj.connect("changed",callback)
for element in self.interface_labels:
value = eval('self.'+element)
obj = builder.get_object(element)
if obj != None:
if (value != None):
obj.set_text(str(value))
else:
obj.set_text("")
for element in self.interface_integer_adjustments:
obj = builder.get_object(element[0])
if obj != None:
value = eval('self.'+element[0])
obj.set_value(float(value))
callback = element[1]
if (callback != None):
obj.connect("value_changed",callback)
for element in self.interface_float_adjustments:
obj = builder.get_object(element[0])
if obj != None:
value = eval('self.'+element[0])
obj.set_value(value)
callback = element[1]
if (callback != None):
obj.connect("value_changed",callback)
for element in self.interface_lists:
obj = eval('self.'+element[0])
the_liststore = builder.get_object(element[0])
the_liststore.clear()
for item in obj:
the_liststore.append(item)
callback = element[1]
if (callback != None):
the_liststore.connect("row_changed",callback)
the_liststore.connect("row_deleted",callback)
the_liststore.connect("row_inserted",callback)
the_liststore.connect("row_reordered",callback)
for element in self.interface_colorbuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
objcolor = Gdk.Color(int(value[0]*65535.0),int(value[1]*65535.0),int(value[2]*65535.0))
obj.set_color(objcolor)
obj.set_alpha(int(value[3]*65535.0))
callback = element[1]
if (callback != None):
obj.connect("color_set",callback)
for element in self.interface_fontbuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_font(value)
callback = element[1]
if (callback != None):
obj.connect("font_set",callback)
for element in self.interface_filebuttons:
value = eval('self.'+element[0])
obj = builder.get_object(element[0])
if (value != None):
obj.set_filename(value)
callback = element[1]
if (callback != None):
obj.connect("file_set",callback)
for element in self.interface_comboboxes:
obj = eval('self.'+element[0])
the_combo = builder.get_object(element[0])
the_list = the_combo.get_model()
the_list.clear()
counter = 0
dv = 0
for item in element[1]:
the_list.append([item])
if (item == obj):
dv = counter
counter += 1
the_combo.set_active(dv)
callback = element[2]
if (callback != None):
the_combo.connect("changed",callback)
self.interface_show_hide_obj = {}
for element in self.interface_show_hide:
obj = builder.get_object(element[0])
to_show = []
for e2 in element[1]:
to_show.append(builder.get_object(e2))
to_hide = []
for e3 in element[2]:
to_hide.append(builder.get_object(e3))
self.interface_show_hide_obj[obj] = [to_show, to_hide]
obj.connect('toggled',self.toggled_element)
self.toggled_element(obj)
self.interface_enable_disable_obj = {}
for element in self.interface_enable_disable:
obj = builder.get_object(element[0])
to_enable = []
for e2 in element[1]:
to_enable.append(builder.get_object(e2))
to_disable = []
for e3 in element[2]:
to_disable.append(builder.get_object(e3))
self.interface_enable_disable_obj[obj] = [to_enable, to_disable]
obj.connect('toggled',self.toggled_element2)
self.toggled_element2(obj)
def toggled_element(self,element):
""" Wenever an element with 'hide' or 'show' needs is toggled, this callback is called """
# First, show all items for each possible element
for key in self.interface_show_hide_obj:
to_show = self.interface_show_hide_obj[key][0]
to_hide = self.interface_show_hide_obj[key][1]
active = key.get_active()
for item in to_show:
if active:
item.show()
for item in to_hide:
if not active:
item.show()
# And now, hide all items that must be hiden
# This is done this way because this allows to have an item being hiden by
# one widget, and being shown by another: in that case, it will be hiden always
for key in self.interface_show_hide_obj:
to_show = self.interface_show_hide_obj[key][0]
to_hide = self.interface_show_hide_obj[key][1]
active = key.get_active()
for item in to_show:
if not active:
item.hide()
for item in to_hide:
if active:
item.hide()
def toggled_element2(self,element):
""" Wenever an element with 'enable' or 'disable' needs is toggled, this callback is called """
# First enable all items that must be enabled
for key in self.interface_enable_disable_obj:
to_enable = self.interface_enable_disable_obj[key][0]
to_disable = self.interface_enable_disable_obj[key][1]
active = key.get_active()
if (active):
for item in to_enable:
item.set_sensitive(True)
else:
for item in to_disable:
item.set_sensitive(True)
# And now, disable all items that must be disabled
# This is done this way because this allows to have an item being disabled by
# one widget, and being enabled by another: in that case, it will be disabled always
for key in self.interface_enable_disable_obj:
to_enable = self.interface_enable_disable_obj[key][0]
to_disable = self.interface_enable_disable_obj[key][1]
active = key.get_active()
if (not active):
for item in to_enable:
item.set_sensitive(False)
else:
for item in to_disable:
item.set_sensitive(False)
def store_ui(self,builder):
""" Takes the values of the widgets and stores them in the internal variables """
for key in self.interface_groups:
for element in self.interface_groups[key][0]:
obj = builder.get_object(element)
if obj.get_active():
exec('self.'+key+' = "'+element+'"')
break
for element in self.interface_toggles:
obj = builder.get_object(element[0])
if obj.get_active():
exec('self.'+element[0]+' = True')
else:
exec('self.'+element[0]+' = False')
for element in self.interface_dualtoggles:
obj = builder.get_object(element[0])
if obj.get_active():
exec('self.'+element[0]+' = True')
else:
exec('self.'+element[0]+' = False')
for element in self.interface_text:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_text()')
for element in self.interface_integer_adjustments:
obj = builder.get_object(element[0])
if obj != None:
exec('self.'+element[0]+' = int(obj.get_value())')
for element in self.interface_float_adjustments:
obj = builder.get_object(element[0])
if obj != None:
exec('self.'+element[0]+' = obj.get_value()')
for element in self.interface_colorbuttons:
obj = builder.get_object(element[0])
objcolor = obj.get_color()
alpha = obj.get_alpha()
exec('self.'+element[0]+' = ((float(objcolor.red))/65535.0, (float(objcolor.green))/65535.0, (float(objcolor.blue))/65535.0, (float(alpha))/65535.0)')
for element in self.interface_fontbuttons:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_font()')
for element in self.interface_filebuttons:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = obj.get_filename()')
for element in self.interface_lists:
exec('self.'+element[0]+' = []')
the_liststore = builder.get_object(element[0])
ncolumns = the_liststore.get_n_columns()
for row in the_liststore:
final_row = []
for c in range(0,ncolumns):
final_row.append(row.model[row.iter][c])
exec('self.'+element[0]+'.append(final_row)')
for element in self.interface_comboboxes:
obj = builder.get_object(element[0])
exec('self.'+element[0]+' = element[1][obj.get_active()]')
def save_ui(self):
""" Makes a copy of all the UI variables """
for element in self.interface_groups:
exec('self.'+element+'_backup = self.'+element)
for element in self.interface_toggles:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_dualtoggles:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_text:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_integer_adjustments:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_float_adjustments:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_colorbuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_fontbuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_filebuttons:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_lists:
exec('self.'+element[0]+'_backup = self.'+element[0])
for element in self.interface_comboboxes:
exec('self.'+element[0]+'_backup = self.'+element[0])
def restore_ui(self):
""" Restores a copy of all the UI variables """
for element in self.interface_groups:
exec('self.'+element+' = self.'+element+'_backup')
for element in self.interface_toggles:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_dualtoggles:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_text:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_integer_adjustments:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_float_adjustments:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_colorbuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_fontbuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_filebuttons:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_lists:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
for element in self.interface_comboboxes:
exec('self.'+element[0]+' = self.'+element[0]+'_backup')
def serialize(self):
""" Returns a dictionary with both the variables of the interface and its values,
which can be restored with unserialize
"""
output = {}
for element in self.interface_groups:
output[element] = eval('self.'+element)
for element in self.interface_toggles:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_dualtoggles:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_text:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_integer_adjustments:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_float_adjustments:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_colorbuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_fontbuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_filebuttons:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_lists:
output[element[0]] = eval('self.'+element[0])
for element in self.interface_comboboxes:
output[element[0]] = eval('self.'+element[0])
return output
def unserialize(self,data_list):
""" Takes a dictionary with the variables of the interface and its values,
and restores them into their variables
"""
for element in self.interface_groups:
if element in data_list:
exec('self.'+element+' = data_list["'+element+'"]')
for element in self.interface_toggles:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_dualtoggles:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_text:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_integer_adjustments:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_float_adjustments:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_colorbuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_fontbuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_filebuttons:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_lists:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
for element in self.interface_comboboxes:
if element[0] in data_list:
exec('self.'+element[0]+' = data_list["'+element[0]+'"]')
|
gpl-3.0
| 5,512,632,086,876,698,000 | -4,912,947,315,566,944,000 | 43.256944 | 162 | 0.579241 | false |
jgeskens/django
|
django/db/models/loading.py
|
8
|
10624
|
"Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
from django.utils import six
import imp
import sys
import os
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
'load_app', 'app_cache_ready')
class AppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
# Keys of app_store are the model modules for each application.
app_store=SortedDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels={},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models=SortedDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors={},
# -- Everything below here is only used when populating the cache --
loaded=False,
handled={},
postponed=[],
nesting_level=0,
_get_models_cache={},
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
cache is already initialised, it does no work.
"""
if self.loaded:
return
# Note that we want to use the import lock here - the app loading is
# in many cases initiated implicitly by importing, and thus it is
# possible to end up in deadlock when one thread initiates loading
# without holding the importer lock and another thread then tries to
# import something which also launches the app loading. For details of
# this situation see #18251.
imp.acquire_lock()
try:
if self.loaded:
return
for app_name in settings.INSTALLED_APPS:
if app_name in self.handled:
continue
self.load_app(app_name, True)
if not self.nesting_level:
for app_name in self.postponed:
self.load_app(app_name)
self.loaded = True
finally:
imp.release_lock()
def _label_for(self, app_mod):
"""
Return app_label for given models module.
"""
return app_mod.__name__.split('.')[-2]
def load_app(self, app_name, can_postpone=False):
"""
Loads the app with the provided fully qualified name, and returns the
model module.
"""
self.handled[app_name] = None
self.nesting_level += 1
app_module = import_module(app_name)
try:
models = import_module('.models', app_name)
except ImportError:
self.nesting_level -= 1
# If the app doesn't have a models module, we can just ignore the
# ImportError and return no models for it.
if not module_has_submodule(app_module, 'models'):
return None
# But if the app does have a models module, we need to figure out
# whether to suppress or propagate the error. If can_postpone is
# True then it may be that the package is still being imported by
# Python and the models module isn't available yet. So we add the
# app to the postponed list and we'll try it again after all the
# recursion has finished (in populate). If can_postpone is False
# then it's time to raise the ImportError.
else:
if can_postpone:
self.postponed.append(app_name)
return None
else:
raise
self.nesting_level -= 1
if models not in self.app_store:
self.app_store[models] = len(self.app_store)
self.app_labels[self._label_for(models)] = models
return models
def app_cache_ready(self):
"""
Returns true if the model cache is fully populated.
Useful for code that wants to cache the results of get_models() for
themselves once it is safe to do so.
"""
return self.loaded
def get_apps(self):
"Returns a list of all installed modules that contain models."
self._populate()
# Ensure the returned list is always in the same order (with new apps
# added at the end). This avoids unstable ordering on the admin app
# list page, for example.
apps = [(v, k) for k, v in self.app_store.items()]
apps.sort()
return [elt[1] for elt in apps]
def get_app(self, app_label, emptyOK=False):
"""
Returns the module containing the models for the given app_label. If
the app has no models in it and 'emptyOK' is True, returns None.
"""
self._populate()
imp.acquire_lock()
try:
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = self.load_app(app_name, False)
if mod is None:
if emptyOK:
return None
raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label)
else:
return mod
raise ImproperlyConfigured("App with label %s could not be found" % app_label)
finally:
imp.release_lock()
def get_app_errors(self):
"Returns the map of known problems with the INSTALLED_APPS."
self._populate()
return self.app_errors
def get_models(self, app_mod=None,
include_auto_created=False, include_deferred=False,
only_installed=True, include_swapped=False):
"""
Given a module containing models, returns a list of the models.
Otherwise returns a list of all installed models.
By default, auto-created models (i.e., m2m models without an
explicit intermediate table) are not included. However, if you
specify include_auto_created=True, they will be.
By default, models created to satisfy deferred attribute
queries are *not* included in the list of models. However, if
you specify include_deferred, they will be.
By default, models that aren't part of installed apps will *not*
be included in the list of models. However, if you specify
only_installed=False, they will be.
By default, models that have been swapped out will *not* be
included in the list of models. However, if you specify
include_swapped, they will be.
"""
cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
try:
return self._get_models_cache[cache_key]
except KeyError:
pass
self._populate()
if app_mod:
if app_mod in self.app_store:
app_list = [self.app_models.get(self._label_for(app_mod),
SortedDict())]
else:
app_list = []
else:
if only_installed:
app_list = [self.app_models.get(app_label, SortedDict())
for app_label in six.iterkeys(self.app_labels)]
else:
app_list = six.itervalues(self.app_models)
model_list = []
for app in app_list:
model_list.extend(
model for model in app.values()
if ((not model._deferred or include_deferred) and
(not model._meta.auto_created or include_auto_created) and
(not model._meta.swapped or include_swapped))
)
self._get_models_cache[cache_key] = model_list
return model_list
def get_model(self, app_label, model_name,
seed_cache=True, only_installed=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
return None
return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
def register_models(self, app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the app_models dictionary
model_name = model._meta.model_name
model_dict = self.app_models.setdefault(app_label, SortedDict())
if model_name in model_dict:
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__))
fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__))
# Since the filename extension could be .py the first time and
# .pyc or .pyo the second time, ignore the extension when
# comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
self._get_models_cache.clear()
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps
get_app = cache.get_app
get_app_errors = cache.get_app_errors
get_models = cache.get_models
get_model = cache.get_model
register_models = cache.register_models
load_app = cache.load_app
app_cache_ready = cache.app_cache_ready
|
bsd-3-clause
| 7,951,813,445,685,938,000 | 7,086,568,614,851,821,000 | 38.494424 | 114 | 0.589985 | false |
OptimusGitEtna/RestSymf
|
Python-3.4.2/Lib/_collections_abc.py
|
68
|
19967
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed from collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such as MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return KeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return ItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
yield from self._mapping
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
'''S.index(value) -> integer -- return first index of value.
Raises ValueError if the value is not present.
'''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
Sequence.register(memoryview)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
"""All the operations on a read-write sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, value) -- insert value before index'
raise IndexError
def append(self, value):
'S.append(value) -- append value to the end of the sequence'
self.insert(len(self), value)
def clear(self):
'S.clear() -> None -- remove all items from S'
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
|
mit
| -7,757,585,064,638,722,000 | -2,494,608,976,385,399,300 | 25.5166 | 87 | 0.561126 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.