repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
StackStorm/st2 | st2reactor/st2reactor/rules/matcher.py | 3 | 3079 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2common import log as logging
from st2common.constants.rules import RULE_TYPE_BACKSTOP
from st2reactor.rules.filter import RuleFilter, SecondPassRuleFilter
LOG = logging.getLogger("st2reactor.rules.RulesMatcher")
class RulesMatcher(object):
def __init__(self, trigger_instance, trigger, rules, extra_info=False):
self.trigger_instance = trigger_instance
self.trigger = trigger
self.rules = rules
self.extra_info = extra_info
def get_matching_rules(self):
first_pass, second_pass = self._split_rules_into_passes()
# first pass
rule_filters = [
RuleFilter(
trigger_instance=self.trigger_instance,
trigger=self.trigger,
rule=rule,
extra_info=self.extra_info,
)
for rule in first_pass
]
matched_rules = [
rule_filter.rule for rule_filter in rule_filters if rule_filter.filter()
]
LOG.debug(
"[1st_pass] %d rule(s) found to enforce for %s.",
len(matched_rules),
self.trigger["name"],
)
# second pass
rule_filters = [
SecondPassRuleFilter(
self.trigger_instance, self.trigger, rule, matched_rules
)
for rule in second_pass
]
matched_in_second_pass = [
rule_filter.rule for rule_filter in rule_filters if rule_filter.filter()
]
LOG.debug(
"[2nd_pass] %d rule(s) found to enforce for %s.",
len(matched_in_second_pass),
self.trigger["name"],
)
matched_rules.extend(matched_in_second_pass)
LOG.info(
"%d rule(s) found to enforce for %s.",
len(matched_rules),
self.trigger["name"],
)
return matched_rules
def _split_rules_into_passes(self):
"""
Splits the rules in the Matcher into first_pass and second_pass collections.
Since the
"""
first_pass = []
second_pass = []
for rule in self.rules:
if self._is_first_pass_rule(rule):
first_pass.append(rule)
else:
second_pass.append(rule)
return first_pass, second_pass
def _is_first_pass_rule(self, rule):
return rule.type["ref"] != RULE_TYPE_BACKSTOP
| apache-2.0 | -3,724,466,836,075,677,000 | 33.211111 | 84 | 0.603118 | false |
ranjinidas/Axelrod | axelrod/tests/unit/test_ecosystem.py | 4 | 3370 | """Tests for the Ecosystem class"""
import unittest
import axelrod
class TestEcosystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cooperators = axelrod.Tournament(players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
])
defector_wins = axelrod.Tournament(players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Defector(),
])
cls.res_cooperators = cooperators.play()
cls.res_defector_wins = defector_wins.play()
def test_init(self):
"""Are the populations created correctly?"""
# By default create populations of equal size
eco = axelrod.Ecosystem(self.res_cooperators)
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(list(set(pops[0])), [0.25])
# Can pass list of initial population distributions
eco = axelrod.Ecosystem(self.res_cooperators, population=[.7, .25, .03, .02])
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [.7, .25, .03, .02])
# Distribution will automatically normalise
eco = axelrod.Ecosystem(self.res_cooperators, population=[70, 25, 3, 2])
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [.7, .25, .03, .02])
# If passed list is of incorrect size get error
self.assertRaises(TypeError, axelrod.Ecosystem, self.res_cooperators, population=[.7, .2, .03, .1, .1])
# If passed list has negative values
self.assertRaises(TypeError, axelrod.Ecosystem, self.res_cooperators, population=[.7, -.2, .03, .2])
def test_fitness(self):
fitness = lambda p: 2 * p
eco = axelrod.Ecosystem(self.res_cooperators, fitness=fitness)
self.assertTrue(eco.fitness(10), 20)
def test_cooperators(self):
"""Are cooperators stable over time?"""
eco = axelrod.Ecosystem(self.res_cooperators)
eco.reproduce(100)
pops = eco.population_sizes
self.assertEqual(len(pops), 101)
for p in pops:
self.assertEqual(len(p), 4)
self.assertEqual(sum(p), 1.0)
self.assertEqual(list(set(p)), [0.25])
def test_defector_wins(self):
"""Does one defector win over time?"""
eco = axelrod.Ecosystem(self.res_defector_wins)
eco.reproduce(1000)
pops = eco.population_sizes
self.assertEqual(len(pops), 1001)
for p in pops:
self.assertEqual(len(p), 4)
self.assertAlmostEqual(sum(p), 1.0)
last = pops[-1]
self.assertAlmostEqual(last[0], 0.0)
self.assertAlmostEqual(last[1], 0.0)
self.assertAlmostEqual(last[2], 0.0)
self.assertAlmostEqual(last[3], 1.0)
| mit | 3,322,875,338,283,443,700 | 34.851064 | 111 | 0.605935 | false |
revoer/keystone-8.0.0 | swift/container/sync.py | 1 | 29912 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, head_object,
InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.utils import (
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
from swift.common.wsgi import ConfigString
# The default internal client config body is to support upgrades without
# requiring deployment of the new /etc/swift/internal-client.conf
ic_conf_body = """
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host =
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[pipeline:main]
pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
# See proxy-server.conf-sample for options
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options
""".lstrip()
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, logger=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = logger or get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a container sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Per container stats. These are collected per container.
#: puts - the number of puts that were done for the container
#: deletes - the number of deletes that were fot the container
#: bytes - the total number of bytes transferred per the container
self.container_stats = collections.defaultdict(int)
self.container_stats.clear()
#: Time of last stats report.
self.reported = time()
self.swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(self.swift_dir,
ring_name='container')
bind_ip = conf.get('bind_ip', '0.0.0.0')
self._myips = whataremyips(bind_ip)
self._myport = int(conf.get('bind_port', 6201))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.conn_timeout = float(conf.get('conn_timeout', 5))
request_tries = int(conf.get('request_tries') or 3)
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
_('Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options'))
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
try:
self.swift = InternalClient(
internal_client_conf, 'Swift Container Sync', request_tries)
except IOError as err:
if err.errno != errno.ENOENT:
raise
raise SystemExit(
_('Unable to load internal client from config: %r (%s)') %
(internal_client_conf_path, err))
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_stats.clear()
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_report(self, start, end, sync_point1, sync_point2, info,
max_row):
self.logger.info(_('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s'),
{'container': '%s/%s' % (info['account'],
info['container']),
'start': start,
'end': end,
'puts': self.container_stats['puts'],
'posts': 0,
'deletes': self.container_stats['deletes'],
'bytes': self.container_stats['bytes'],
'point1': sync_point1,
'point2': sync_point2,
'total': max_row})
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if is_local_device(self._myips, self._myport,
node['ip'], node['port']):
break
else:
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.items():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
start_at = time()
stop_at = start_at + self.container_time
next_sync_point = None
sync_stage_time = start_at
try:
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
# This node will only initially sync out one third
# of the objects (if 3 replicas, 1/4 if 4, etc.)
# and will skip problematic rows as needed in case of
# faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the
# nodes didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None,
next_sync_point)
else:
next_sync_point = sync_point2
sync_stage_time = time()
while sync_stage_time < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of
# the objects (if 3 replicas, 1/4 if 4, etc.).
# It'll come back around to the section above
# and attempt to sync previously skipped rows in case
# the other nodes didn't succeed or in case it failed
# to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
sync_stage_time = time()
self.container_syncs += 1
self.logger.increment('syncs')
except Exception as ex:
raise ex
finally:
self.container_report(start_at, sync_stage_time,
sync_point1,
next_sync_point,
info, broker.get_max_row())
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
realm, realm_key, method, headers):
"""
Updates container sync headers
:param name: The name of the object
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param method: HTTP method to create sig with
:param headers: headers to update with container sync headers
"""
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(name)
sig = self.realms_conf.get_sig(method, path,
headers.get('x-timestamp', 0),
nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (realm,
nonce,
sig)
else:
headers['x-container-sync-key'] = user_key
def _object_in_remote_container(self, name, sync_to, user_key,
realm, realm_key, timestamp):
"""
Performs head object on remote to eliminate extra remote put and
local get object calls
:param name: The name of the object in the updated row in the local
database triggering the sync update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param timestamp: last modified date of local object
:returns: True if object already exists in remote
"""
headers = {'x-timestamp': timestamp.internal}
self._update_sync_to_headers(name, sync_to, user_key, realm,
realm_key, 'HEAD', headers)
try:
metadata, _ = head_object(sync_to, name=name,
headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
retries=0)
remote_ts = Timestamp(metadata.get('x-timestamp', 0))
self.logger.debug("remote obj timestamp %s local obj %s" %
(timestamp.internal, remote_ts.internal))
if timestamp <= remote_ts:
return True
# Object in remote should be updated
return False
except ClientException as http_err:
# Object not in remote
if http_err.http_status == 404:
return False
raise http_err
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
Update can be either delete or put.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
# extract last modified time from the created_at value
ts_data, ts_ctype, ts_meta = decode_timestamps(
row['created_at'])
if row['deleted']:
# when sync'ing a deleted object, use ts_data - this is the
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
self._update_sync_to_headers(row['name'], sync_to,
user_key, realm, realm_key,
'DELETE', headers)
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
timeout=self.conn_timeout)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.container_stats['deletes'] += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
if self._object_in_remote_container(row['name'],
sync_to, user_key, realm,
realm_key, ts_meta):
return True
exc = None
# look up for the newest one
headers_out = {'X-Newest': True,
'X-Backend-Storage-Policy-Index':
str(info['storage_policy_index'])}
try:
source_obj_status, headers, body = \
self.swift.get_object(info['account'],
info['container'], row['name'],
headers=headers_out,
acceptable_statuses=(2, 4))
except (Exception, UnexpectedResponse, Timeout) as err:
headers = {}
body = None
exc = err
timestamp = Timestamp(headers.get('x-timestamp', 0))
if timestamp < ts_meta:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: '
'%(account)r %(container)r %(object)r'),
{'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
self._update_sync_to_headers(row['name'], sync_to, user_key,
realm, realm_key, 'PUT', headers)
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,
timeout=self.conn_timeout)
self.container_puts += 1
self.container_stats['puts'] += 1
self.container_stats['bytes'] += row['size']
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout) as err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
| apache-2.0 | -2,236,279,326,873,953,000 | 45.303406 | 79 | 0.529888 | false |
openlawlibrary/pygls | tests/lsp/test_hover.py | 1 | 5452 | ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import unittest
from typing import Optional
from pygls.lsp.methods import HOVER
from pygls.lsp.types import (Hover, HoverOptions, HoverParams, MarkedString, MarkupContent,
MarkupKind, Position, Range, TextDocumentIdentifier)
from ..conftest import CALL_TIMEOUT, ClientServer
class TestHover(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client_server = ClientServer()
cls.client, cls.server = cls.client_server
@cls.server.feature(
HOVER,
HoverOptions(),
)
def f(params: HoverParams) -> Optional[Hover]:
range = Range(
start=Position(line=0, character=0),
end=Position(line=1, character=1),
)
return {
'file://return.marked_string': Hover(
range=range,
contents=MarkedString(
language='language',
value='value',
),
),
'file://return.marked_string_list': Hover(
range=range,
contents=[
MarkedString(
language='language',
value='value',
),
'str type'
],
),
'file://return.markup_content': Hover(
range=range,
contents=MarkupContent(
kind=MarkupKind.Markdown,
value='value'
),
),
}.get(params.text_document.uri, None)
cls.client_server.start()
@classmethod
def tearDownClass(cls):
cls.client_server.stop()
def test_capabilities(self):
capabilities = self.server.server_capabilities
assert capabilities.hover_provider
def test_hover_return_marked_string(self):
response = self.client.lsp.send_request(
HOVER,
HoverParams(
text_document=TextDocumentIdentifier(uri='file://return.marked_string'),
position=Position(line=0, character=0),
),
).result(timeout=CALL_TIMEOUT)
assert response
assert response['contents']['language'] == 'language'
assert response['contents']['value'] == 'value'
assert response['range']['start']['line'] == 0
assert response['range']['start']['character'] == 0
assert response['range']['end']['line'] == 1
assert response['range']['end']['character'] == 1
def test_hover_return_marked_string_list(self):
response = self.client.lsp.send_request(
HOVER,
HoverParams(
text_document=TextDocumentIdentifier(uri='file://return.marked_string_list'),
position=Position(line=0, character=0),
),
).result(timeout=CALL_TIMEOUT)
assert response
assert response['contents'][0]['language'] == 'language'
assert response['contents'][0]['value'] == 'value'
assert response['contents'][1] == 'str type'
assert response['range']['start']['line'] == 0
assert response['range']['start']['character'] == 0
assert response['range']['end']['line'] == 1
assert response['range']['end']['character'] == 1
def test_hover_return_markup_content(self):
response = self.client.lsp.send_request(
HOVER,
HoverParams(
text_document=TextDocumentIdentifier(uri='file://return.markup_content'),
position=Position(line=0, character=0),
),
).result(timeout=CALL_TIMEOUT)
assert response
assert response['contents']['kind'] == MarkupKind.Markdown
assert response['contents']['value'] == 'value'
assert response['range']['start']['line'] == 0
assert response['range']['start']['character'] == 0
assert response['range']['end']['line'] == 1
assert response['range']['end']['character'] == 1
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,349,198,200,429,678,000 | 37.394366 | 93 | 0.501651 | false |
Swiftb0y/P4wnP1 | hidtools/mouse/hid_mouse.py | 3 | 4850 | #!/usr/bin/python
# This file is part of P4wnP1.
#
# Copyright (c) 2017, Marcus Mengs.
#
# P4wnP1 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# P4wnP1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with P4wnP1. If not, see <http://www.gnu.org/licenses/>.
import struct
import sys
class hid_mouse(object) :
def __init__(self, absolute=False, outfile="/dev/hidg2"):
self.button1 = False
self.button2 = False
self.button3 = False
self._x_abs = 0.0
self._y_abs = 0.0
self.outf = outfile
self._abs = absolute
self.bleft = 0
self.bright = 32767
self.btop = 0
self.bbottom = 32767
self._x_abs_short = self.bleft
self._y_abs_short = self.btop
self._x_rel = 0
self._y_rel = 0
@property
def x_rel(self):
return self._x_rel
@x_rel.setter
def x_rel(self, value):
# in case the last coordinate change was relative, we disable absolute mode
self._abs = False
self._x_rel = value
@property
def y_rel(self):
return self._y_rel
@y_rel.setter
def y_rel(self, value):
# in case the last coordinate change was relative, we disable absolute mode
self._abs = False
self._y_rel = value
@property
def x_abs(self):
return self._x_abs
@x_abs.setter
def x_abs(self, value):
# in case the last coordinate change was absolute, we enable absolute mode
self._abs = True
self._x_abs = self.clamp_float(value)
self._x_abs_short = self.scaled_short(self._x_abs, self.bleft, self.bright)
@property
def y_abs(self):
return self._y_abs
@y_abs.setter
def y_abs(self, value):
# in case the last coordinate change was absolute, we enable absolute mode
self._abs = True
self._y_abs = self.clamp_float(value)
self._y_abs_short = self.scaled_short(self._y_abs, self.btop, self.bbottom)
def clamp_float(self, val):
return min(max(0.0, val), 1.0)
def scaled_short(self, val, lower, upper):
#print "val {0}".format(val)
lower = min(max(-32768, lower), 32767)
upper = min(max(-32768, upper), 32767)
val = self.clamp_float(val)
dim = upper - lower
#print "dim {0}".format(dim)
scaled = int(lower + val*dim)
#print "clamped val {0} scaled {1}".format(val, scaled)
return scaled
def gen_out_report_abs(self):
#xout = hid_mouse.convert_pos_short(self._x_abs)
xout = struct.pack("<h", int(self._x_abs_short)) # signed short, little endian
#yout = hid_mouse.convert_pos_short(self._y_abs)
yout = struct.pack("<h", int(self._y_abs_short)) # signed short, little endian
btnout = hid_mouse.convert_btn_byte(self.button1, self.button2, self.button3)
return "\x02" + btnout + xout + yout
def gen_out_report_rel(self):
#xout = hid_mouse.convert_pos_short(self._x_abs)
xout = struct.pack("<b", int(self._x_rel)) # signed short, little endian
#yout = hid_mouse.convert_pos_short(self._y_abs)
yout = struct.pack("<b", int(self._y_rel)) # signed short, little endian
btnout = hid_mouse.convert_btn_byte(self.button1, self.button2, self.button3)
return "\x01" + btnout + xout + yout + "\x00\x00"
def fire_report(self):
with open(self.outf, "wb") as f:
if self._abs:
#print "absolute x: {0} ({1})\ty: {2} ({3})".format(self._x_abs, self._x_abs_short, self._y_abs, self._y_abs_short)
f.write(self.gen_out_report_abs())
else:
#print "relative x: {0} \ty: {1}".format(self.x_rel, self.y_rel)
f.write(self.gen_out_report_rel())
f.flush()
@staticmethod
def convert_btn_byte(btn1=False, btn2=False, btn3=False):
res = 0x00
if btn1:
res += 0x01
if btn2:
res += 0x02
if btn3:
res += 0x04
return struct.pack("<B", res)
@staticmethod
def convert_pos_short(val):
# clamp val
valf = max(min(val, 1.0), 0.0)
valx = valf * 0x7FFF # scale to 0x7FFF
# valx = valf * 10000 + 1 # scale from 0x0001 to 0x7FFF
res = struct.pack("<h", int(valx)) # signed short, little endian
return res
@staticmethod
def convert_pos_short(val):
# clamp val
valf = max(min(val, 1.0), 0.0)
valx = valf * 0x7FFE + 1 # scale from 0x0001 to 0x7FFF
# valx = valf * 10000 + 1 # scale from 0x0001 to 0x7FFF
res = struct.pack("<h", int(valx)) # signed short, little endian
return res
@staticmethod
def bytes2hexstr(bytes):
return "\\x"+"\\x".join("{:02x}".format(ord(c)) for c in bytes)
@staticmethod
def convert_pos_str(val):
res = hid_mouse.convert_pos_short(val)
res_str = hid_mouse.bytes2hexstr(res)
return res_str
| gpl-3.0 | 287,219,454,957,731,260 | 26.556818 | 119 | 0.664742 | false |
pedropenna/.emacs.d | elpa/floobits-20160804.1135/floo/common/cert.py | 3 | 4988 | CA_CERT = '''-----BEGIN CERTIFICATE-----
MIIGNDCCBBygAwIBAgIBGzANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDcxMDI0MjA1NzA5WhcNMTcxMDI0MjA1NzA5WjCB
jDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzApBgNVBAsT
IlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNVBAMTL1N0
YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgU2VydmVyIENBMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4k85L6GMmoWtCA4IPlfyiAEh
G5SpbOK426oZGEY6UqH1D/RujOqWjJaHeRNAUS8i8gyLhw9l33F0NENVsTUJm9m8
H/rrQtCXQHK3Q5Y9upadXVACHJuRjZzArNe7LxfXyz6CnXPrB0KSss1ks3RVG7RL
hiEs93iHMuAW5Nq9TJXqpAp+tgoNLorPVavD5d1Bik7mb2VsskDPF125w2oLJxGE
d2H2wnztwI14FBiZgZl1Y7foU9O6YekO+qIw80aiuckfbIBaQKwn7UhHM7BUxkYa
8zVhwQIpkFR+ZE3EMFICgtffziFuGJHXuKuMJxe18KMBL47SLoc6PbQpZ4rEAwID
AQABo4IBrTCCAakwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
VR0OBBYEFBHbI0X9VMxqcW+EigPXvvcBLyaGMB8GA1UdIwQYMBaAFE4L7xqkQFul
F2mHMMo0aEPQQa7yMGYGCCsGAQUFBwEBBFowWDAnBggrBgEFBQcwAYYbaHR0cDov
L29jc3Auc3RhcnRzc2wuY29tL2NhMC0GCCsGAQUFBzAChiFodHRwOi8vd3d3LnN0
YXJ0c3NsLmNvbS9zZnNjYS5jcnQwWwYDVR0fBFQwUjAnoCWgI4YhaHR0cDovL3d3
dy5zdGFydHNzbC5jb20vc2ZzY2EuY3JsMCegJaAjhiFodHRwOi8vY3JsLnN0YXJ0
c3NsLmNvbS9zZnNjYS5jcmwwgYAGA1UdIAR5MHcwdQYLKwYBBAGBtTcBAgEwZjAu
BggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20vcG9saWN5LnBkZjA0
BggrBgEFBQcCARYoaHR0cDovL3d3dy5zdGFydHNzbC5jb20vaW50ZXJtZWRpYXRl
LnBkZjANBgkqhkiG9w0BAQsFAAOCAgEAbQjxXHkqUPtUY+u8NEFcuKMDITfjvGkl
LgrTuBW63grW+2AuDAZRo/066eNHs6QV4i5e4ujwPSR2dgggY7mOIIBmiDm2QRjF
5CROq6zDlIdqlsFZICkuONDNFpFjaPtZRTmuK1n6gywQgCNSIrbzjPcwR/jL/wow
bfwC9yGme1EeZRqvWy/HzFWacs7UMmWlRk6DTmpfPOPMJo5AxyTZCiCYQQeksV7x
UAeY0kWa+y/FV+eerOPUl6yy4jRHTk7tCySxrciZwYbd6YNLmeIQoUAdRC3CH3nT
B2/JYxltcgyGHMiPU3TtafZgLs8fvncv+wIF1YAF/OGqg8qmzoJ3ghM4upGdTMIu
8vADdmuLC/+dnbzknxX6QEGlWA8zojLUxVhGNfIFoizu/V/DyvSvYuxzzIkPECK5
gDoMoBTTMI/wnxXwulNPtfgF7/5AtDhA4GNAfB2SddxiNQAF7XkUHtMZ9ff3W6Xk
FldOG+NlLFqsDBG/KLckyFK36gq+FqNFCbmtmtXBGB5L1fDIeYzcMKG6hFQxhHS0
oqpdHhp2nWBfLlOnTNqIZNJzOH37OJE6Olk45LNFJtSrqIAZyCCfM6bQgoQvZuIa
xs9SIp+63ZMk9TxEaQj/KteaOyfaPXI9778U7JElMTz3Bls62mslV2I1C/A73Zyq
JZWQZ8NU4ds=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
-----END CERTIFICATE-----'''
| gpl-3.0 | -8,713,156,703,054,190,000 | 61.35 | 64 | 0.956696 | false |
dirn/readthedocs.org | readthedocs/restapi/permissions.py | 12 | 1615 | from rest_framework import permissions
from privacy.backend import AdminPermission
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class CommentModeratorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, comment):
if request.method in permissions.SAFE_METHODS:
return True # TODO: Similar logic to #1084
else:
return AdminPermission.is_admin(request.user, comment.node.project)
class RelatedProjectIsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.project.users.all()
class APIPermission(permissions.IsAuthenticatedOrReadOnly):
'''
This permission should allow authenicated users readonly access to the API,
and allow admin users write access. This should be used on API resources
that need to implement write operations to resources that were based on the
ReadOnlyViewSet
'''
def has_object_permission(self, request, view, obj):
has_perm = super(APIPermission, self).has_object_permission(
request, view, obj)
return has_perm or (request.user and request.user.is_staff)
| mit | 3,586,140,166,495,542,000 | 32.645833 | 79 | 0.712074 | false |
tomprince/alot | alot/ui.py | 1 | 17916 | import urwid
import logging
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from settings import config
from buffers import BufferlistBuffer
import commands
from commands import commandfactory
from alot.commands import CommandParseError
import widgets
class InputWrap(urwid.WidgetWrap):
"""
This is the topmost widget used in the widget tree.
Its purpose is to capture and interpret keypresses
by instantiating and applying the relevant :class:`Command` objects
or relaying them to the wrapped `rootwidget`.
"""
def __init__(self, ui, rootwidget):
urwid.WidgetWrap.__init__(self, rootwidget)
self.ui = ui
self.rootwidget = rootwidget
self.select_cancel_only = False
def set_root(self, w):
self._w = w
def get_root(self):
return self._w
def allowed_command(self, cmd):
"""sanity check if the given command should be applied.
This is used in :meth:`keypress`"""
if not self.select_cancel_only:
return True
elif isinstance(cmd, commands.globals.SendKeypressCommand):
if cmd.key in ['select', 'cancel']:
return True
else:
return False
def keypress(self, size, key):
"""overwrites `urwid.WidgetWrap.keypress`"""
mode = self.ui.mode
if self.select_cancel_only:
mode = 'global'
cmdline = config.get_mapping(mode, key)
if cmdline:
try:
cmd = commandfactory(cmdline, mode)
if self.allowed_command(cmd):
self.ui.apply_command(cmd)
return None
except CommandParseError, e:
self.ui.notify(e.message, priority='error')
return self._w.keypress(size, key)
class UI(object):
"""
This class integrates all components of alot and offers
methods for user interaction like :meth:`prompt`, :meth:`notify` etc.
It handles the urwid widget tree and mainloop (we use twisted) and is
responsible for opening, closing and focussing buffers.
"""
buffers = []
"""list of active buffers"""
current_buffer = None
"""points to currently active :class:`~alot.buffers.Buffer`"""
dbman = None
"""Database manager (:class:`~alot.db.DBManager`)"""
accountman = None
"""account manager (:class:`~alot.account.AccountManager`)"""
def __init__(self, dbman, accountman, initialcmd, colourmode):
"""
:param dbman: :class:`~alot.db.DBManager`
:param accountman: :class:`~alot.account.AccountManager`
:param initialcmd: commandline applied after setting up interface
:type initialcmd: str
:param colourmode: determines which theme to chose
:type colourmode: int in [1,16,256]
"""
self.dbman = dbman
self.accountman = accountman
if not colourmode:
colourmode = config.getint('general', 'colourmode')
logging.info('setup gui in %d colours' % colourmode)
self.mainframe = urwid.Frame(urwid.SolidFill())
self.inputwrap = InputWrap(self, self.mainframe)
self.mainloop = urwid.MainLoop(self.inputwrap,
config.get_palette(),
handle_mouse=False,
event_loop=urwid.TwistedEventLoop(),
unhandled_input=self.unhandeled_input)
self.mainloop.screen.set_terminal_properties(colors=colourmode)
self.show_statusbar = config.getboolean('general', 'show_statusbar')
self.notificationbar = None
self.mode = 'global'
self.commandprompthistory = []
logging.debug('fire first command')
self.apply_command(initialcmd)
self.mainloop.run()
def unhandeled_input(self, key):
"""called if a keypress is not handled."""
logging.debug('unhandled input: %s' % key)
def keypress(self, key):
"""relay all keypresses to our `InputWrap`"""
self.inputwrap.keypress((150, 20), key)
def show_as_root_until_keypress(self, w, key, relay_rest=True,
afterwards=None):
def oe():
self.inputwrap.set_root(self.mainframe)
self.inputwrap.select_cancel_only = False
if callable(afterwards):
logging.debug('called')
afterwards()
logging.debug('relay: %s' % relay_rest)
helpwrap = widgets.CatchKeyWidgetWrap(w, key, on_catch=oe,
relay_rest=relay_rest)
self.inputwrap.set_root(helpwrap)
self.inputwrap.select_cancel_only = not relay_rest
def prompt(self, prefix='>', text=u'', completer=None, tab=0, history=[]):
"""prompt for text input
:param prefix: text to print before the input field
:type prefix: str
:param text: initial content of the input field
:type text: str
:param completer: completion object to use
:type completer: :meth:`alot.completion.Completer`
:param tab: number of tabs to press initially
(to select completion results)
:type tab: int
:param history: history to be used for up/down keys
:type history: list of str
:returns: a :class:`twisted.defer.Deferred`
"""
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
# restore main screen and invoke callback
# (delayed return) with given text
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
#set up widgets
leftpart = urwid.Text(prefix, align='left')
editpart = widgets.CompleteEdit(completer, on_exit=select_or_cancel,
edit_text=text, history=history)
for i in range(tab): # hit some tabs
editpart.keypress((0,), 'tab')
# build promptwidget
both = urwid.Columns(
[
('fixed', len(prefix), leftpart),
('weight', 1, editpart),
])
both = urwid.AttrMap(both, 'global_prompt')
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def exit(self):
"""
shuts down user interface without cleaning up.
Use a :class:`commands.globals.ExitCommand` for a clean shutdown.
"""
reactor.stop()
def buffer_open(self, buf):
"""register and focus new :class:`~alot.buffers.Buffer`."""
self.buffers.append(buf)
self.buffer_focus(buf)
def buffer_close(self, buf):
"""
closes given :class:`~alot.buffers.Buffer`.
This it removes it from the bufferlist and calls its cleanup() method.
"""
buffers = self.buffers
if buf not in buffers:
string = 'tried to close unknown buffer: %s. \n\ni have:%s'
logging.error(string % (buf, self.buffers))
elif self.current_buffer == buf:
logging.debug('UI: closing current buffer %s' % buf)
index = buffers.index(buf)
buffers.remove(buf)
offset = config.getint('general', 'bufferclose_focus_offset')
nextbuffer = buffers[(index + offset) % len(buffers)]
self.buffer_focus(nextbuffer)
buf.cleanup()
else:
string = 'closing buffer %d:%s'
logging.debug(string % (buffers.index(buf), buf))
buffers.remove(buf)
buf.cleanup()
def buffer_focus(self, buf):
"""focus given :class:`~alot.buffers.Buffer`."""
if buf not in self.buffers:
logging.error('tried to focus unknown buffer')
else:
if self.current_buffer != buf:
self.current_buffer = buf
self.inputwrap.set_root(self.mainframe)
self.mode = buf.modename
if isinstance(self.current_buffer, BufferlistBuffer):
self.current_buffer.rebuild()
self.update()
def get_deep_focus(self, startfrom=None):
"""return the bottom most focussed widget of the widget tree"""
if not startfrom:
startfrom = self.current_buffer
if 'get_focus' in dir(startfrom):
focus = startfrom.get_focus()
if isinstance(focus, tuple):
focus = focus[0]
if isinstance(focus, urwid.Widget):
return self.get_deep_focus(startfrom=focus)
return startfrom
def get_buffers_of_type(self, t):
"""
returns currently open buffers for a given subclass of
:class:`alot.buffer.Buffer`
"""
return filter(lambda x: isinstance(x, t), self.buffers)
def clear_notify(self, messages):
"""
clears notification popups. Call this to ged rid of messages that don't
time out.
:param messages: The popups to remove. This should be exactly
what :meth:`notify` returned when creating the popup
"""
newpile = self.notificationbar.widget_list
for l in messages:
newpile.remove(l)
if newpile:
self.notificationbar = urwid.Pile(newpile)
else:
self.notificationbar = None
self.update()
def choice(self, message, choices={'y': 'yes', 'n': 'no'},
select=None, cancel=None, msg_position='above'):
"""
prompt user to make a choice
:param message: string to display before list of choices
:type message: unicode
:param choices: dict of possible choices
:type choices: dict: keymap->choice (both str)
:param select: choice to return if enter/return is hit. Ignored if set
to `None`.
:type select: str
:param cancel: choice to return if escape is hit. Ignored if set to
`None`.
:type cancel: str
:param msg_position: determines if `message` is above or left of the
prompt. Must be `above` or `left`.
:type msg_position: str
:returns: a :class:`twisted.defer.Deferred`
"""
assert select in choices.values() + [None]
assert cancel in choices.values() + [None]
assert msg_position in ['left', 'above']
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
#set up widgets
msgpart = urwid.Text(message)
choicespart = widgets.ChoiceWidget(choices, callback=select_or_cancel,
select=select, cancel=cancel)
# build widget
if msg_position == 'left':
both = urwid.Columns(
[
('fixed', len(message), msgpart),
('weight', 1, choicespart),
], dividechars=1)
else: # above
both = urwid.Pile([msgpart, choicespart])
both = urwid.AttrMap(both, 'prompt', 'prompt')
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def notify(self, message, priority='normal', timeout=0, block=False):
"""
opens notification popup
:param message: message to print
:type message: str
:param priority: priority string, used to format the popup: currently,
'normal' and 'error' are defined. If you use 'X' here,
the attribute 'global_notify_X' is used to format the
popup.
:type priority: str
:param timeout: seconds until message disappears. Defaults to the value
of 'notify_timeout' in the general config section.
A negative value means never time out.
:type timeout: int
:param block: this notification blocks until a keypress is made
:type block: bool
:returns: an urwid widget (this notification) that can be handed to
:meth:`clear_notify` for removal
"""
def build_line(msg, prio):
cols = urwid.Columns([urwid.Text(msg)])
return urwid.AttrMap(cols, 'global_notify_' + prio)
msgs = [build_line(message, priority)]
if not self.notificationbar:
self.notificationbar = urwid.Pile(msgs)
else:
newpile = self.notificationbar.widget_list + msgs
self.notificationbar = urwid.Pile(newpile)
self.update()
def clear(*args):
self.clear_notify(msgs)
if block:
# put "cancel to continue" widget as overlay on main widget
txt = urwid.Text('(cancel continues)')
overlay = urwid.Overlay(txt, self.mainframe,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 0),
None)
self.show_as_root_until_keypress(overlay, 'cancel',
relay_rest=False,
afterwards=clear)
else:
if timeout >= 0:
if timeout == 0:
timeout = config.getint('general', 'notify_timeout')
self.mainloop.set_alarm_in(timeout, clear)
return msgs[0]
def update(self):
"""redraw interface"""
#who needs a header?
#head = urwid.Text('notmuch gui')
#h=urwid.AttrMap(head, 'header')
#self.mainframe.set_header(h)
# body
if self.current_buffer:
self.mainframe.set_body(self.current_buffer)
# footer
lines = []
if self.notificationbar: # .get_text()[0] != ' ':
lines.append(self.notificationbar)
if self.show_statusbar:
lines.append(self.build_statusbar())
if lines:
self.mainframe.set_footer(urwid.Pile(lines))
else:
self.mainframe.set_footer(None)
def build_statusbar(self):
"""construct and return statusbar widget"""
if self.current_buffer is not None:
idx = self.buffers.index(self.current_buffer)
lefttxt = '%d: %s' % (idx, self.current_buffer)
else:
lefttxt = '[no buffers]'
footerleft = urwid.Text(lefttxt, align='left')
righttxt = 'total messages: %d' % self.dbman.count_messages('*')
pending_writes = len(self.dbman.writequeue)
if pending_writes > 0:
righttxt = ('|' * pending_writes) + ' ' + righttxt
footerright = urwid.Text(righttxt, align='right')
columns = urwid.Columns([
footerleft,
('fixed', len(righttxt), footerright)])
return urwid.AttrMap(columns, 'global_footer')
def apply_command(self, cmd):
"""
applies a command
This calls the pre and post hooks attached to the command,
as well as :meth:`cmd.apply`.
:param cmd: an applicable command
:type cmd: :class:`~alot.commands.Command`
"""
if cmd:
# call pre- hook
if cmd.prehook:
logging.debug('calling pre-hook')
try:
cmd.prehook(ui=self, dbm=self.dbman, aman=self.accountman,
config=config)
except:
logging.exception('prehook failed')
# define (callback) function that invokes post-hook
def call_posthook(retval_from_apply):
if cmd.posthook:
logging.debug('calling post-hook')
try:
cmd.posthook(ui=self, dbm=self.dbman,
aman=self.accountman, config=config)
except:
logging.exception('posthook failed')
# define error handler for Failures/Exceptions
# raised in cmd.apply()
def errorHandler(failure):
logging.debug(failure.getTraceback())
msg = "Error: %s,\ncheck the log for details"
self.notify(msg % failure.getErrorMessage(), priority='error')
# call cmd.apply
logging.debug('apply command: %s' % cmd)
try:
retval = cmd.apply(self)
# if we deal with a InlineCallbacks-decorated method, it
# instantly returns a defered. This adds call/errbacks to react
# to successful/erroneous termination of the defered apply()
if isinstance(retval, defer.Deferred):
retval.addErrback(errorHandler)
retval.addCallback(call_posthook)
except Exception, e:
errorHandler(Failure(e))
| gpl-3.0 | 1,657,993,199,879,393,300 | 37.038217 | 79 | 0.556988 | false |
tsgit/invenio | modules/websession/lib/websession_regression_tests.py | 8 | 5312 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=E1102
"""WebSession Regression Test Suite."""
__revision__ = \
"$Id$"
from invenio.testutils import InvenioTestCase
from mechanize import Browser
from invenio.config import CFG_SITE_SECURE_URL, CFG_SITE_ADMIN_EMAIL
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
from invenio.dbquery import run_sql
class WebSessionWebPagesAvailabilityTest(InvenioTestCase):
"""Check WebSession web pages whether they are up or not."""
def test_your_account_pages_availability(self):
"""websession - availability of Your Account pages"""
baseurl = CFG_SITE_SECURE_URL + '/youraccount/'
# not testing 'change' endpoint here, since it is accepting
# only POST requests
_exports = ['', 'edit', 'lost', 'display',
'send_email', 'youradminactivities',
'delete', 'logout', 'login', 'register']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_your_groups_pages_availability(self):
"""websession - availability of Your Groups pages"""
baseurl = CFG_SITE_SECURE_URL + '/yourgroups/'
_exports = ['', 'display', 'create', 'join', 'leave', 'edit', 'members']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
class WebSessionLostYourPasswordTest(InvenioTestCase):
"""Test Lost Your Passwords functionality."""
def test_lost_your_password_for_internal_accounts(self):
"""websession - sending lost password for internal admin account"""
try_with_account = CFG_SITE_ADMIN_EMAIL
# click on "send lost password" for CFG_SITE_ADMIN_EMAIL internal account
browser = Browser()
browser.open(CFG_SITE_SECURE_URL + "/youraccount/lost")
browser.select_form(nr=0)
browser['p_email'] = try_with_account
try:
browser.submit()
except Exception, e:
# Restore the admin password (send_email set it to random number)
run_sql("UPDATE user SET password=AES_ENCRYPT(email, '')"
"WHERE id=1")
self.fail("Obtained %s: probably the email server is not installed "
"correctly." % e)
# verify the response:
expected_response = "Okay, a password reset link has been emailed to " + \
try_with_account
lost_password_response_body = browser.response().read()
try:
lost_password_response_body.index(expected_response)
except ValueError:
# Restore the admin password (send_email set it to random number)
run_sql("UPDATE user SET password=AES_ENCRYPT(email, '')"
"WHERE id=1")
self.fail("Expected to see %s, got %s." % \
(expected_response, lost_password_response_body))
def tearDown(self):
# Restore the admin password (send_email set it to random number)
run_sql("UPDATE user SET password=AES_ENCRYPT(email, '')"
"WHERE id=1")
class WebSessionExternalLoginTest(InvenioTestCase):
"""Test external login functionality."""
def test_no_external_login(self):
"""websession - openid, oauth1 or oauth2 external login option in log in page"""
base_url = CFG_SITE_SECURE_URL + '/youraccount'
login_url = base_url + '/login'
browser = Browser()
response = browser.open(login_url)
#Check all the links and see if any of them is of class openid (external login button)
for link in browser.links():
for value in link.attrs:
if (value[0] == 'class'):
if value[1] == 'openid_url':
self.fail("Openid external login in login page: %s" % link.attrs)
return
TEST_SUITE = make_test_suite(WebSessionWebPagesAvailabilityTest,
WebSessionLostYourPasswordTest,
WebSessionExternalLoginTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | 6,097,270,312,127,321,000 | 38.348148 | 94 | 0.628389 | false |
xyzy/androguard | tests/test_ins.py | 38 | 5384 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, re
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from androguard.core.androgen import AndroguardS
from androguard.core.analysis import analysis
TESTS_CASES = [ #'examples/android/TC/bin/classes.dex',
'examples/android/TestsAndroguard/bin/classes.dex',
]
VALUES = {
'examples/android/TestsAndroguard/bin/classes.dex' : {
"Ltests/androguard/TestInvoke; <init> ()V" : {
0x0 : ("invoke-direct" , [['v',1] , ['meth@', 4, 'Ljava/lang/Object;', '()', 'V', '<init>']]),
0xa : ("invoke-virtual", [['v',1], ['v',0] , ['meth@', 49, 'Ltests/androguard/TestInvoke;', '(I)', 'I', 'TestInvoke1']]),
},
"Ltests/androguard/TestInvoke; TestInvoke1 (I)I" : {
0x4 : ("invoke-virtual", [['v',1] , ['v',2] , ['v',0] , ['meth@', 50,'Ltests/androguard/TestInvoke;' ,'(I I)', 'I', 'TestInvoke2']]),
},
"Ltests/androguard/TestInvoke; TestInvoke2 (I I)I" : {
0x4 : ("invoke-virtual", [['v',1] , ['v',2] , ['v',3] , ['v',0] , ['meth@', 51, 'Ltests/androguard/TestInvoke;', '(I I I)', 'I', 'TestInvoke3']]),
},
"Ltests/androguard/TestInvoke; TestInvoke3 (I I I)I" : {
0x4 : ("invoke-virtual", [['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 0], ['meth@', 52, 'Ltests/androguard/TestInvoke;', '(I I I I)', 'I', 'TestInvoke4']]),
},
"Ltests/androguard/TestInvoke; TestInvoke4 (I I I I)I" : {
0xe : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['meth@', 53, 'Ltests/androguard/TestInvoke;', '(I I I I I)', 'I', 'TestInvoke5']]),
},
"Ltests/androguard/TestInvoke; TestInvoke5 (I I I I I)I" : {
0x10 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['meth@', 54, 'Ltests/androguard/TestInvoke;', '(I I I I I I)', 'I', 'TestInvoke6']]),
},
"Ltests/androguard/TestInvoke; TestInvoke6 (I I I I I I)I" : {
0x12 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['v', 7], ['meth@', 55, 'Ltests/androguard/TestInvoke;', '(I I I I I I I)', 'I', 'TestInvoke7']]),
},
"Ltests/androguard/TestInvoke; TestInvoke7 (I I I I I I I)I" : {
0x16 : ("invoke-virtual/range", [['v', 0], ['v', 1], ['v', 2], ['v', 3], ['v', 4], ['v', 5], ['v', 6], ['v', 7], ['v', 8], ['meth@', 56, 'Ltests/androguard/TestInvoke;', '(I I I I I I I I)', 'I', 'TestInvoke8']]),
},
"Ltests/androguard/TestInvoke; TestInvoke8 (I I I I I I I I)I" : {
0x0 : ("mul-int", [['v', 0], ['v', 2], ['v', 3]]),
0x4 : ("mul-int/2addr", [['v', 0], ['v', 4]]),
0x10 : ("return", [['v', 0]]),
}
},
}
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '\t%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
def getVal(i) :
op = i.get_operands()
if isinstance(op, int) :
return [ op ]
elif i.get_name() == "lookupswitch" :
x = []
x.append( i.get_operands().default )
for idx in range(0, i.get_operands().npairs) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off )
return x
return [-1]
def check(a, values) :
for method in a.get_methods() :
key = method.get_class_name() + " " + method.get_name() + " " + method.get_descriptor()
if key not in values :
continue
print "CHECKING ...", method.get_class_name(), method.get_name(), method.get_descriptor()
code = method.get_code()
bc = code.get_bc()
idx = 0
for i in bc.get() :
# print "\t", "%x(%d)" % (idx, idx), i.get_name(), i.get_operands()
if idx in values[key] :
elem = values[key][idx]
val1 = i.get_name() + "%s" % i.get_operands()
val2 = elem[0] + "%s" % elem[1]
test(val1, val2)
del values[key][idx]
idx += i.get_length()
for i in TESTS_CASES :
a = AndroguardS( i )
check( a, VALUES[i] )
x = analysis.VMAnalysis( a.get_vm() )
print x
| apache-2.0 | -6,049,424,913,585,224,000 | 39.481203 | 234 | 0.495542 | false |
devilry/devilry-django | devilry/apps/core/tests/test_assignment_group_history.py | 1 | 8857 | from django.test import TestCase
from django.utils import timezone
from django.utils.timezone import timedelta
from model_bakery import baker
from devilry.apps.core import devilry_core_baker_factories as core_baker
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import AssignmentGroupHistory
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
class TestAssignmentGroupHistory(TestCase):
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_merge_history_meta_data(self):
assignment_group_history = baker.make('core.AssignmentGroupHistory')
datetime1 = (timezone.now() - timedelta(days=1)).isoformat()
datetime2 = (timezone.now() - timedelta(days=2)).isoformat()
assignment_group_history.merge_history = {
'merge_datetime': datetime1,
'state': None,
'groups': [
{
'merge_datetime': datetime2,
'state': {
'name': 'group1'
},
'groups': [
{
'merge_datetime': None,
'state': {
'name': 'group1'
},
'groups': []
},
{
'merge_datetime': None,
'state': {
'name': 'group3'
},
'groups': []
},
{
'merge_datetime': None,
'state': {
'name': 'group4'
},
'groups': []
}
]
},
{
'merge_datetime': None,
'state': {
'name': 'group2'
},
'groups': []
}
]
}
meta_data = assignment_group_history.meta_data
self.assertEqual(len(meta_data), 2)
self.assertDictEqual(meta_data[0], {
'merge_datetime': datetime1,
'groups': ['group1', 'group2']
})
self.assertDictEqual(meta_data[1], {
'merge_datetime': datetime2,
'groups': ['group1', 'group3', 'group4']
})
def test_merge_history_meta_data_real_groups(self):
test_assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
group1 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group1')
group2 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group2')
group3 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group3')
group4 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group4')
AssignmentGroup.merge_groups([group1, group2, group3])
AssignmentGroup.merge_groups([group1, group4])
meta_data = AssignmentGroupHistory.objects.get(assignment_group__id=group1.id).meta_data
self.assertEqual(len(meta_data), 2)
self.assertDictContainsSubset({
'groups': ['group1', 'group4']
}, meta_data[0])
self.assertDictContainsSubset({
'groups': ['group1', 'group2', 'group3']
}, meta_data[1])
def test_merge_single_assignment_groups(self):
test_assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
group1 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group1')
group2 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group2')
AssignmentGroup.merge_groups([group1, group2])
merge_history = AssignmentGroupHistory.objects.get(assignment_group__id=group1.id).merge_history
self.assertEqual(merge_history['groups'][0]['state']['name'], 'group1')
self.assertEqual(merge_history['groups'][1]['state']['name'], 'group2')
def test_merge_assignmentgroup_multiple_times(self):
test_assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
group1 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group1')
group2 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group2')
group3 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group3')
group4 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group4')
core_baker.candidate(group=group1)
core_baker.candidate(group=group1)
core_baker.candidate(group=group2)
core_baker.candidate(group=group3)
core_baker.candidate(group=group4)
core_baker.examiner(group=group1)
core_baker.examiner(group=group2)
core_baker.examiner(group=group2)
core_baker.examiner(group=group3)
group1_state = group1.get_current_state()
group2_state = group2.get_current_state()
group3_state = group3.get_current_state()
group4_state = group4.get_current_state()
AssignmentGroup.merge_groups([group1, group3])
AssignmentGroup.merge_groups([group2, group4])
group1_merge_history = AssignmentGroupHistory.objects.get(assignment_group__id=group1.id).merge_history
group2_merge_history = AssignmentGroupHistory.objects.get(assignment_group__id=group2.id).merge_history
self.assertDictEqual(group1_merge_history['groups'][0]['state'], group1_state)
self.assertDictEqual(group1_merge_history['groups'][1]['state'], group3_state)
self.assertDictEqual(group2_merge_history['groups'][0]['state'], group2_state)
self.assertDictEqual(group2_merge_history['groups'][1]['state'], group4_state)
group1 = AssignmentGroup.objects.get(id=group1.id)
group2 = AssignmentGroup.objects.get(id=group2.id)
# Checking one more level in the Btree
group1_state = AssignmentGroup.objects.get(id=group1.id).get_current_state()
group2_state = AssignmentGroup.objects.get(id=group2.id).get_current_state()
AssignmentGroup.merge_groups([group1, group2])
group1_merge_history_new = AssignmentGroupHistory.objects.get(assignment_group__id=group1.id).merge_history
self.assertListEqual(group1_merge_history_new['groups'][0]['groups'], group1_merge_history['groups'])
self.assertListEqual(group1_merge_history_new['groups'][1]['groups'], group2_merge_history['groups'])
self.assertDictEqual(group1_merge_history_new['groups'][0]['state'], group1_state)
self.assertDictEqual(group1_merge_history_new['groups'][1]['state'], group2_state)
def test_is_deleted_after_merge(self):
test_assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
group1 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group1')
group2 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group2')
group3 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group3')
group4 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group4')
AssignmentGroup.merge_groups([group1, group2])
historygroup1id = group1.assignmentgrouphistory.id
AssignmentGroup.merge_groups([group4, group3])
historygroup4id = group4.assignmentgrouphistory.id
AssignmentGroup.merge_groups([group1, group4])
with self.assertRaises(AssignmentGroupHistory.DoesNotExist):
AssignmentGroupHistory.objects.get(id=historygroup4id)
self.assertTrue(AssignmentGroupHistory.objects.filter(id=historygroup1id).exists())
# def test_num_queries(self):
# test_assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start')
# group1 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group1')
# group2 = baker.make('core.AssignmentGroup', parentnode=test_assignment, name='group2')
# group_baker.feedbackset_new_attempt_published(group1)
# group_baker.feedbackset_new_attempt_published(group1)
# group_baker.feedbackset_new_attempt_published(group2)
# group_baker.feedbackset_new_attempt_published(group2)
#
# with self.assertNumQueries(3):
# AssignmentGroup.merge_groups([group1, group2])
| bsd-3-clause | -6,874,605,748,674,799,000 | 49.902299 | 115 | 0.605848 | false |
ichuang/sympy | sympy/stats/tests/test_continuous_rv.py | 1 | 14252 | from sympy.stats import (P, E, where, density, variance, covariance, skewness,
given, pspace, cdf, ContinuousRV, sample)
from sympy.stats import (Arcsin, Benini, Beta, BetaPrime, Cauchy, Chi, Dagum,
Exponential, Gamma, Laplace, Logistic, LogNormal,
Maxwell, Nakagami, Normal, Pareto, Rayleigh, StudentT,
Triangular, Uniform, UniformSum, Weibull,
WignerSemicircle)
from sympy import (Symbol, Dummy, Abs, exp, S, N, pi, simplify, Interval, erf,
Eq, log, lowergamma, Sum, symbols, sqrt, And, gamma, beta,
Piecewise, Integral, sin, Lambda, factorial, binomial, floor)
from sympy.utilities.pytest import raises, XFAIL
oo = S.Infinity
_x = Dummy("x")
_z = Dummy("z")
def test_single_normal():
mu = Symbol('mu', real=True, bounded=True)
sigma = Symbol('sigma', real=True, positive=True, bounded=True)
X = Normal(0,1)
Y = X*sigma + mu
assert simplify(E(Y)) == mu
assert simplify(variance(Y)) == sigma**2
pdf = density(Y)
x = Symbol('x')
assert pdf(x) == 2**S.Half*exp(-(x - mu)**2/(2*sigma**2))/(2*pi**S.Half*sigma)
assert P(X**2 < 1) == erf(2**S.Half/2)
assert E(X, Eq(X, mu)) == mu
@XFAIL
def test_conditional_1d():
X = Normal(0,1)
Y = given(X, X>=0)
assert density(Y) == 2 * density(X)
assert Y.pspace.domain.set == Interval(0, oo)
assert E(Y) == sqrt(2) / sqrt(pi)
assert E(X**2) == E(Y**2)
def test_ContinuousDomain():
X = Normal(0,1)
assert where(X**2<=1).set == Interval(-1,1)
assert where(X**2<=1).symbol == X.symbol
where(And(X**2<=1, X>=0)).set == Interval(0,1)
raises(ValueError, "where(sin(X)>1)")
Y = given(X, X>=0)
assert Y.pspace.domain.set == Interval(0, oo)
def test_multiple_normal():
X, Y = Normal(0,1), Normal(0,1)
assert E(X+Y) == 0
assert variance(X+Y) == 2
assert variance(X+X) == 4
assert covariance(X, Y) == 0
assert covariance(2*X + Y, -X) == -2*variance(X)
assert E(X, Eq(X+Y, 0)) == 0
assert variance(X, Eq(X+Y, 0)) == S.Half
def test_symbolic():
mu1, mu2 = symbols('mu1 mu2', real=True, bounded=True)
s1, s2 = symbols('sigma1 sigma2', real=True, bounded=True, positive=True)
rate = Symbol('lambda', real=True, positive=True, bounded=True)
X = Normal(mu1, s1)
Y = Normal(mu2, s2)
Z = Exponential(rate)
a, b, c = symbols('a b c', real=True, bounded=True)
assert E(X) == mu1
assert E(X+Y) == mu1+mu2
assert E(a*X+b) == a*E(X)+b
assert variance(X) == s1**2
assert simplify(variance(X+a*Y+b)) == variance(X) + a**2*variance(Y)
assert E(Z) == 1/rate
assert E(a*Z+b) == a*E(Z)+b
assert E(X+a*Z+b) == mu1 + a/rate + b
def test_cdf():
X = Normal(0,1)
d = cdf(X)
assert P(X<1) == d(1)
assert d(0) == S.Half
d = cdf(X, X>0) # given X>0
assert d(0) == 0
Y = Exponential(10)
d = cdf(Y)
assert d(-5) == 0
assert P(Y > 3) == 1 - d(3)
raises(ValueError, "cdf(X+Y)")
Z = Exponential(1)
f = cdf(Z)
z = Symbol('z')
assert f(z) == Piecewise((0, z < 0), (1 - exp(-z), True))
def test_sample():
z = Symbol('z')
Z = ContinuousRV(z, exp(-z), set=Interval(0,oo))
assert sample(Z) in Z.pspace.domain.set
sym, val = Z.pspace.sample().items()[0]
assert sym == Z and val in Interval(0, oo)
def test_ContinuousRV():
x = Symbol('x')
pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
# X and Y should be equivalent
X = ContinuousRV(x, pdf)
Y = Normal(0, 1)
assert variance(X) == variance(Y)
assert P(X>0) == P(Y>0)
def test_arcsin():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
x = Symbol("x")
X = Arcsin(a, b, symbol=x)
assert density(X) == Lambda(_x, 1/(pi*sqrt((-_x + b)*(_x - a))))
def test_benini():
alpha = Symbol("alpha", positive=True)
b = Symbol("beta", positive=True)
sigma = Symbol("sigma", positive=True)
x = Symbol("x")
X = Benini(alpha, b, sigma, symbol=x)
assert density(X) == (Lambda(_x, (alpha/_x + 2*b*log(_x/sigma)/_x)
*exp(-alpha*log(_x/sigma) - b*log(_x/sigma)**2)))
def test_beta():
a, b = symbols('alpha beta', positive=True)
B = Beta(a, b)
assert pspace(B).domain.set == Interval(0, 1)
dens = density(B)
x = Symbol('x')
assert dens(x) == x**(a-1)*(1-x)**(b-1) / beta(a,b)
# This is too slow
# assert E(B) == a / (a + b)
# assert variance(B) == (a*b) / ((a+b)**2 * (a+b+1))
# Full symbolic solution is too much, test with numeric version
a, b = 1, 2
B = Beta(a, b)
assert E(B) == a / S(a + b)
assert variance(B) == (a*b) / S((a+b)**2 * (a+b+1))
def test_betaprime():
alpha = Symbol("alpha", positive=True)
beta = Symbol("beta", positive=True)
x = Symbol("x")
X = BetaPrime(alpha, beta, symbol=x)
assert density(X) == (Lambda(_x, _x**(alpha - 1)*(_x + 1)**(-alpha - beta)
*gamma(alpha + beta)/(gamma(alpha)*gamma(beta))))
def test_cauchy():
x0 = Symbol("x0")
gamma = Symbol("gamma", positive=True)
x = Symbol("x")
X = Cauchy(x0, gamma, symbol=x)
assert density(X) == Lambda(_x, 1/(pi*gamma*(1 + (_x - x0)**2/gamma**2)))
def test_chi():
k = Symbol("k", integer=True)
x = Symbol("x")
X = Chi(k, symbol=x)
assert density(X) == (Lambda(_x, 2**(-k/2 + 1)*_x**(k - 1)
*exp(-_x**2/2)/gamma(k/2)))
def test_dagum():
p = Symbol("p", positive=True)
b = Symbol("b", positive=True)
a = Symbol("a", positive=True)
x = Symbol("x")
X = Dagum(p, a, b, symbol=x)
assert density(X) == Lambda(_x,
a*p*(_x/b)**(a*p)*((_x/b)**a + 1)**(-p - 1)/_x)
def test_exponential():
rate = Symbol('lambda', positive=True, real=True, bounded=True)
X = Exponential(rate)
assert E(X) == 1/rate
assert variance(X) == 1/rate**2
assert skewness(X) == 2
assert P(X>0) == S(1)
assert P(X>1) == exp(-rate)
assert P(X>10) == exp(-10*rate)
assert where(X<=1).set == Interval(0,1)
def test_gamma():
k = Symbol("k", positive=True)
theta = Symbol("theta", positive=True)
x = Symbol("x")
X = Gamma(k, theta, symbol=x)
assert density(X) == Lambda(_x,
_x**(k - 1)*theta**(-k)*exp(-_x/theta)/gamma(k))
assert cdf(X, meijerg=True) == Lambda(_z, Piecewise((0, _z < 0),
(-k*lowergamma(k, 0)/gamma(k + 1) + k*lowergamma(k, _z/theta)/gamma(k + 1), True)))
assert variance(X) == (-theta**2*gamma(k + 1)**2/gamma(k)**2 +
theta*theta**(-k)*theta**(k + 1)*gamma(k + 2)/gamma(k))
k, theta = symbols('k theta', real=True, bounded=True, positive=True)
X = Gamma(k, theta)
assert simplify(E(X)) == k*theta
# can't get things to simplify on this one so we use subs
assert variance(X).subs(k,5) == (k*theta**2).subs(k, 5)
# The following is too slow
# assert simplify(skewness(X)).subs(k, 5) == (2/sqrt(k)).subs(k, 5)
def test_laplace():
mu = Symbol("mu")
b = Symbol("b", positive=True)
x = Symbol("x")
X = Laplace(mu, b, symbol=x)
assert density(X) == Lambda(_x, exp(-Abs(_x - mu)/b)/(2*b))
def test_logistic():
mu = Symbol("mu", real=True)
s = Symbol("s", positive=True)
x = Symbol("x")
X = Logistic(mu, s, symbol=x)
assert density(X) == Lambda(_x,
exp((-_x + mu)/s)/(s*(exp((-_x + mu)/s) + 1)**2))
def test_lognormal():
mean = Symbol('mu', real=True, bounded=True)
std = Symbol('sigma', positive=True, real=True, bounded=True)
X = LogNormal(mean, std)
# The sympy integrator can't do this too well
#assert E(X) == exp(mean+std**2/2)
#assert variance(X) == (exp(std**2)-1) * exp(2*mean + std**2)
# Right now, only density function and sampling works
# Test sampling: Only e^mean in sample std of 0
for i in range(3):
X = LogNormal(i, 0)
assert S(sample(X)) == N(exp(i))
# The sympy integrator can't do this too well
#assert E(X) ==
mu = Symbol("mu", real=True)
sigma = Symbol("sigma", positive=True)
x = Symbol("x")
X = LogNormal(mu, sigma, symbol=x)
assert density(X) == (Lambda(_x, sqrt(2)*exp(-(-mu + log(_x))**2
/(2*sigma**2))/(2*_x*sqrt(pi)*sigma)))
X = LogNormal(0, 1, symbol=Symbol('x')) # Mean 0, standard deviation 1
assert density(X) == Lambda(_x, sqrt(2)*exp(-log(_x)**2/2)/(2*_x*sqrt(pi)))
def test_maxwell():
a = Symbol("a", positive=True)
x = Symbol("x")
X = Maxwell(a, symbol=x)
assert density(X) == (Lambda(_x, sqrt(2)*_x**2*exp(-_x**2/(2*a**2))/
(sqrt(pi)*a**3)))
assert E(X) == 2*sqrt(2)*a/sqrt(pi)
assert simplify(variance(X)) == a**2*(-8 + 3*pi)/pi
def test_nakagami():
mu = Symbol("mu", positive=True)
omega = Symbol("omega", positive=True)
x = Symbol("x")
X = Nakagami(mu, omega, symbol=x)
assert density(X) == (Lambda(_x, 2*_x**(2*mu - 1)*mu**mu*omega**(-mu)
*exp(-_x**2*mu/omega)/gamma(mu)))
assert simplify(E(X, meijerg=True)) == (sqrt(mu)*sqrt(omega)
*gamma(mu + S.Half)/gamma(mu + 1))
assert (simplify(variance(X, meijerg=True)) ==
(omega*(gamma(mu)*gamma(mu + 1)
- gamma(mu + S.Half)**2)/(gamma(mu)*gamma(mu + 1))))
def test_pareto():
xm, beta = symbols('xm beta', positive=True, bounded=True)
alpha = beta + 5
X = Pareto(xm, alpha)
dens = density(X)
x = Symbol('x')
assert dens(x) == x**(-(alpha+1))*xm**(alpha)*(alpha)
# These fail because SymPy can not deduce that 1/xm != 0
# assert simplify(E(X)) == alpha*xm/(alpha-1)
# assert simplify(variance(X)) == xm**2*alpha / ((alpha-1)**2*(alpha-2))
def test_pareto_numeric():
xm, beta = 3, 2
alpha = beta + 5
X = Pareto(xm, alpha)
assert E(X) == alpha*xm/S(alpha-1)
assert variance(X) == xm**2*alpha / S(((alpha-1)**2*(alpha-2)))
# Skewness tests too slow. Try shortcutting function?
def test_rayleigh():
sigma = Symbol("sigma", positive=True)
x = Symbol("x")
X = Rayleigh(sigma, symbol=x)
assert density(X) == Lambda(_x, _x*exp(-_x**2/(2*sigma**2))/sigma**2)
assert E(X) == sqrt(2)*sqrt(pi)*sigma/2
assert variance(X) == -pi*sigma**2/2 + 2*sigma**2
def test_studentt():
nu = Symbol("nu", positive=True)
x = Symbol("x")
X = StudentT(nu, symbol=x)
assert density(X) == (Lambda(_x, (_x**2/nu + 1)**(-nu/2 - S.Half)
*gamma(nu/2 + S.Half)/(sqrt(pi)*sqrt(nu)*gamma(nu/2))))
@XFAIL
def test_triangular():
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
x = Symbol("x")
X = Triangular(a,b,c, symbol=x)
assert Density(X) == Lambda(_x,
Piecewise(((2*_x - 2*a)/((-a + b)*(-a + c)), And(a <= _x, _x < c)),
(2/(-a + b), _x == c),
((-2*_x + 2*b)/((-a + b)*(b - c)), And(_x <= b, c < _x)),
(0, True)))
def test_uniform():
l = Symbol('l', real=True, bounded=True)
w = Symbol('w', positive=True, bounded=True)
X = Uniform(l, l+w)
assert simplify(E(X)) == l + w/2
assert simplify(variance(X)) == w**2/12
assert P(X<l) == 0 and P(X>l+w) == 0
# With numbers all is well
X = Uniform(3, 5)
assert P(X<3) == 0 and P(X>5) == 0
assert P(X<4) == P(X>4) == S.Half
@XFAIL
def test_uniformsum():
n = Symbol("n", integer=True)
x = Symbol("x")
_k = Symbol("k")
X = UniformSum(n, symbol=x)
assert density(X) == (Lambda(_x, Sum((-1)**_k*(-_k + _x)**(n - 1)
*binomial(n, _k), (_k, 0, floor(_x)))/factorial(n - 1)))
def test_weibull():
a, b = symbols('a b', positive=True)
X = Weibull(a, b)
assert simplify(E(X)) == simplify(a * gamma(1 + 1/b))
assert simplify(variance(X)) == simplify(a**2 * gamma(1 + 2/b) - E(X)**2)
# Skewness tests too slow. Try shortcutting function?
def test_weibull_numeric():
# Test for integers and rationals
a = 1
bvals = [S.Half, 1, S(3)/2, 5]
for b in bvals:
X = Weibull(a, b)
assert simplify(E(X)) == simplify(a * gamma(1 + 1/S(b)))
assert simplify(variance(X)) == simplify(
a**2 * gamma(1 + 2/S(b)) - E(X)**2)
# Not testing Skew... it's slow with int/frac values > 3/2
def test_wignersemicircle():
R = Symbol("R", positive=True)
x = Symbol("x")
X = WignerSemicircle(R, symbol=x)
assert density(X) == Lambda(_x, 2*sqrt(-_x**2 + R**2)/(pi*R**2))
assert E(X) == 0
def test_prefab_sampling():
N = Normal(0, 1)
L = LogNormal(0, 1)
E = Exponential(1)
P = Pareto(1, 3)
W = Weibull(1, 1)
U = Uniform(0, 1)
B = Beta(2,5)
G = Gamma(1,3)
variables = [N,L,E,P,W,U,B,G]
niter = 10
for var in variables:
for i in xrange(niter):
assert sample(var) in var.pspace.domain.set
def test_input_value_assertions():
a, b = symbols('a b')
p, q = symbols('p q', positive=True)
raises(ValueError, "Normal(3, 0)")
raises(ValueError, "Normal(a, b)")
Normal(a, p) # No error raised
raises(ValueError, "Exponential(a)")
Exponential(p) # No error raised
for fn_name in ['Pareto', 'Weibull', 'Beta', 'Gamma']:
raises(ValueError, "%s(a, p)" % fn_name)
raises(ValueError, "%s(p, a)" % fn_name)
eval("%s(p, q)" % fn_name) # No error raised
@XFAIL
def test_unevaluated():
x = Symbol('x')
X = Normal(0,1, symbol=x)
assert E(X, evaluate=False) == (
Integral(sqrt(2)*x*exp(-x**2/2)/(2*sqrt(pi)), (x, -oo, oo)))
assert E(X+1, evaluate=False) == (
Integral(sqrt(2)*x*exp(-x**2/2)/(2*sqrt(pi)), (x, -oo, oo)) + 1)
assert P(X>0, evaluate=False) == (
Integral(sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)), (x, 0, oo)))
assert P(X>0, X**2<1, evaluate=False) == (
Integral(sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)*
Integral(sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)),
(x, -1, 1))), (x, 0, 1)))
| bsd-3-clause | 9,090,344,859,402,542,000 | 29.323404 | 87 | 0.532417 | false |
denny820909/builder | lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/commands/transfer.py | 4 | 12091 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os, tarfile, tempfile
from twisted.python import log
from twisted.internet import defer
from buildslave.commands.base import Command
class TransferCommand(Command):
def finished(self, res):
if self.debug:
log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc))
# don't use self.sendStatus here, since we may no longer be running
# if we have been interrupted
upd = {'rc': self.rc}
if self.stderr:
upd['stderr'] = self.stderr
self.builder.sendUpdate(upd)
return res
def interrupt(self):
if self.debug:
log.msg('interrupted')
if self.interrupted:
return
self.rc = 1
self.interrupted = True
# now we wait for the next trip around the loop. It abandon the file
# when it sees self.interrupted set.
class SlaveFileUploadCommand(TransferCommand):
"""
Upload a file from slave to build master
Arguments:
- ['workdir']: base directory to use
- ['slavesrc']: name of the slave-side file to read from
- ['writer']: RemoteReference to a transfer._FileWriter object
- ['maxsize']: max size (in bytes) of file to write
- ['blocksize']: max size for each data block
- ['keepstamp']: whether to preserve file modified and accessed times
"""
debug = False
def setup(self, args):
self.workdir = args['workdir']
self.filename = args['slavesrc']
self.writer = args['writer']
self.remaining = args['maxsize']
self.blocksize = args['blocksize']
self.keepstamp = args.get('keepstamp', False)
self.stderr = None
self.rc = 0
def start(self):
if self.debug:
log.msg('SlaveFileUploadCommand started')
# Open file
self.path = os.path.join(self.builder.basedir,
self.workdir,
os.path.expanduser(self.filename))
accessed_modified = None
try:
if self.keepstamp:
accessed_modified = (os.path.getatime(self.path),
os.path.getmtime(self.path))
self.fp = open(self.path, 'rb')
if self.debug:
log.msg("Opened '%s' for upload" % self.path)
except:
self.fp = None
self.stderr = "Cannot open file '%s' for upload" % self.path
self.rc = 1
if self.debug:
log.msg("Cannot open file '%s' for upload" % self.path)
self.sendStatus({'header': "sending %s" % self.path})
d = defer.Deferred()
self._reactor.callLater(0, self._loop, d)
def _close_ok(res):
self.fp = None
d1 = self.writer.callRemote("close")
def _utime_ok(res):
return self.writer.callRemote("utime", accessed_modified)
if self.keepstamp:
d1.addCallback(_utime_ok)
return d1
def _close_err(f):
self.rc = 1
self.fp = None
# call remote's close(), but keep the existing failure
d1 = self.writer.callRemote("close")
def eb(f2):
log.msg("ignoring error from remote close():")
log.err(f2)
d1.addErrback(eb)
d1.addBoth(lambda _ : f) # always return _loop failure
return d1
d.addCallbacks(_close_ok, _close_err)
d.addBoth(self.finished)
return d
def _loop(self, fire_when_done):
d = defer.maybeDeferred(self._writeBlock)
def _done(finished):
if finished:
fire_when_done.callback(None)
else:
self._loop(fire_when_done)
def _err(why):
fire_when_done.errback(why)
d.addCallbacks(_done, _err)
return None
def _writeBlock(self):
"""Write a block of data to the remote writer"""
if self.interrupted or self.fp is None:
if self.debug:
log.msg('SlaveFileUploadCommand._writeBlock(): end')
return True
length = self.blocksize
if self.remaining is not None and length > self.remaining:
length = self.remaining
if length <= 0:
if self.stderr is None:
self.stderr = 'Maximum filesize reached, truncating file \'%s\'' \
% self.path
self.rc = 1
data = ''
else:
data = self.fp.read(length)
if self.debug:
log.msg('SlaveFileUploadCommand._writeBlock(): '+
'allowed=%d readlen=%d' % (length, len(data)))
if len(data) == 0:
log.msg("EOF: callRemote(close)")
return True
if self.remaining is not None:
self.remaining = self.remaining - len(data)
assert self.remaining >= 0
d = self.writer.callRemote('write', data)
d.addCallback(lambda res: False)
return d
class SlaveDirectoryUploadCommand(SlaveFileUploadCommand):
debug = False
def setup(self, args):
self.workdir = args['workdir']
self.dirname = args['slavesrc']
self.writer = args['writer']
self.remaining = args['maxsize']
self.blocksize = args['blocksize']
self.compress = args['compress']
self.stderr = None
self.rc = 0
def start(self):
if self.debug:
log.msg('SlaveDirectoryUploadCommand started')
self.path = os.path.join(self.builder.basedir,
self.workdir,
os.path.expanduser(self.dirname))
if self.debug:
log.msg("path: %r" % self.path)
# Create temporary archive
fd, self.tarname = tempfile.mkstemp()
fileobj = os.fdopen(fd, 'w')
if self.compress == 'bz2':
mode='w|bz2'
elif self.compress == 'gz':
mode='w|gz'
else:
mode = 'w'
archive = tarfile.open(name=self.tarname, mode=mode, fileobj=fileobj)
archive.add(self.path, '')
archive.close()
fileobj.close()
# Transfer it
self.fp = open(self.tarname, 'rb')
self.sendStatus({'header': "sending %s" % self.path})
d = defer.Deferred()
self._reactor.callLater(0, self._loop, d)
def unpack(res):
d1 = self.writer.callRemote("unpack")
def unpack_err(f):
self.rc = 1
return f
d1.addErrback(unpack_err)
d1.addCallback(lambda ignored: res)
return d1
d.addCallback(unpack)
d.addBoth(self.finished)
return d
def finished(self, res):
self.fp.close()
os.remove(self.tarname)
return TransferCommand.finished(self, res)
class SlaveFileDownloadCommand(TransferCommand):
"""
Download a file from master to slave
Arguments:
- ['workdir']: base directory to use
- ['slavedest']: name of the slave-side file to be created
- ['reader']: RemoteReference to a transfer._FileReader object
- ['maxsize']: max size (in bytes) of file to write
- ['blocksize']: max size for each data block
- ['mode']: access mode for the new file
"""
debug = False
def setup(self, args):
self.workdir = args['workdir']
self.filename = args['slavedest']
self.reader = args['reader']
self.bytes_remaining = args['maxsize']
self.blocksize = args['blocksize']
self.mode = args['mode']
self.stderr = None
self.rc = 0
def start(self):
if self.debug:
log.msg('SlaveFileDownloadCommand starting')
# Open file
self.path = os.path.join(self.builder.basedir,
self.workdir,
os.path.expanduser(self.filename))
dirname = os.path.dirname(self.path)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
self.fp = open(self.path, 'wb')
if self.debug:
log.msg("Opened '%s' for download" % self.path)
if self.mode is not None:
# note: there is a brief window during which the new file
# will have the buildslave's default (umask) mode before we
# set the new one. Don't use this mode= feature to keep files
# private: use the buildslave's umask for that instead. (it
# is possible to call os.umask() before and after the open()
# call, but cleaning up from exceptions properly is more of a
# nuisance that way).
os.chmod(self.path, self.mode)
except IOError:
# TODO: this still needs cleanup
self.fp = None
self.stderr = "Cannot open file '%s' for download" % self.path
self.rc = 1
if self.debug:
log.msg("Cannot open file '%s' for download" % self.path)
d = defer.Deferred()
self._reactor.callLater(0, self._loop, d)
def _close(res):
# close the file, but pass through any errors from _loop
d1 = self.reader.callRemote('close')
d1.addErrback(log.err, 'while trying to close reader')
d1.addCallback(lambda ignored: res)
return d1
d.addBoth(_close)
d.addBoth(self.finished)
return d
def _loop(self, fire_when_done):
d = defer.maybeDeferred(self._readBlock)
def _done(finished):
if finished:
fire_when_done.callback(None)
else:
self._loop(fire_when_done)
def _err(why):
fire_when_done.errback(why)
d.addCallbacks(_done, _err)
return None
def _readBlock(self):
"""Read a block of data from the remote reader."""
if self.interrupted or self.fp is None:
if self.debug:
log.msg('SlaveFileDownloadCommand._readBlock(): end')
return True
length = self.blocksize
if self.bytes_remaining is not None and length > self.bytes_remaining:
length = self.bytes_remaining
if length <= 0:
if self.stderr is None:
self.stderr = "Maximum filesize reached, truncating file '%s'" \
% self.path
self.rc = 1
return True
else:
d = self.reader.callRemote('read', length)
d.addCallback(self._writeData)
return d
def _writeData(self, data):
if self.debug:
log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' %
len(data))
if len(data) == 0:
return True
if self.bytes_remaining is not None:
self.bytes_remaining = self.bytes_remaining - len(data)
assert self.bytes_remaining >= 0
self.fp.write(data)
return False
def finished(self, res):
if self.fp is not None:
self.fp.close()
return TransferCommand.finished(self, res)
| mit | 1,681,450,181,126,970,400 | 32.963483 | 82 | 0.553056 | false |
palladius/gcloud | packages/gcutil-1.7.1/lib/python_gflags/gflags.py | 19 | 104226 | #!/usr/bin/env python
#
# Copyright (c) 2002, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ---
# Author: Chad Lester
# Design and style contributions by:
# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
# Eric Veach, Laurence Gonsalves, Matthew Springer
# Code reorganized a bit by Craig Silverstein
"""This module is used to define and parse command line flags.
This module defines a *distributed* flag-definition policy: rather than
an application having to define all flags in or near main(), each python
module defines flags that are useful to it. When one python module
imports another, it gains access to the other's flags. (This is
implemented by having all modules share a common, global registry object
containing all the flag information.)
Flags are defined through the use of one of the DEFINE_xxx functions.
The specific function used determines how the flag is parsed, checked,
and optionally type-converted, when it's seen on the command line.
IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
'FlagValues' object (typically the global FlagValues FLAGS, defined
here). The 'FlagValues' object can scan the command line arguments and
pass flag arguments to the corresponding 'Flag' objects for
value-checking and type conversion. The converted flag values are
available as attributes of the 'FlagValues' object.
Code can access the flag through a FlagValues object, for instance
gflags.FLAGS.myflag. Typically, the __main__ module passes the command
line arguments to gflags.FLAGS for parsing.
At bottom, this module calls getopt(), so getopt functionality is
supported, including short- and long-style flags, and the use of -- to
terminate flags.
Methods defined by the flag module will throw 'FlagsError' exceptions.
The exception argument will be a human-readable string.
FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
take a name, default value, help-string, and optional 'short' name
(one-letter name). Some flags have other arguments, which are described
with the flag.
DEFINE_string: takes any input, and interprets it as a string.
DEFINE_bool or
DEFINE_boolean: typically does not take an argument: say --myflag to
set FLAGS.myflag to true, or --nomyflag to set
FLAGS.myflag to false. Alternately, you can say
--myflag=true or --myflag=t or --myflag=1 or
--myflag=false or --myflag=f or --myflag=0
DEFINE_float: takes an input and interprets it as a floating point
number. Takes optional args lower_bound and upper_bound;
if the number specified on the command line is out of
range, it will raise a FlagError.
DEFINE_integer: takes an input and interprets it as an integer. Takes
optional args lower_bound and upper_bound as for floats.
DEFINE_enum: takes a list of strings which represents legal values. If
the command-line value is not in this list, raise a flag
error. Otherwise, assign to FLAGS.flag as a string.
DEFINE_list: Takes a comma-separated list of strings on the commandline.
Stores them in a python list object.
DEFINE_spaceseplist: Takes a space-separated list of strings on the
commandline. Stores them in a python list object.
Example: --myspacesepflag "foo bar baz"
DEFINE_multistring: The same as DEFINE_string, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of strings),
even if the flag is only on the command line once.
DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
specified more than once on the commandline. The
result is a python list object (list of ints), even if
the flag is only on the command line once.
SPECIAL FLAGS: There are a few flags that have special meaning:
--help prints a list of all the flags in a human-readable fashion
--helpshort prints a list of all key flags (see below).
--helpxml prints a list of all flags, in XML format. DO NOT parse
the output of --help and --helpshort. Instead, parse
the output of --helpxml. For more info, see
"OUTPUT FOR --helpxml" below.
--flagfile=foo read flags from file foo.
--undefok=f1,f2 ignore unrecognized option errors for f1,f2.
For boolean flags, you should use --undefok=boolflag, and
--boolflag and --noboolflag will be accepted. Do not use
--undefok=noboolflag.
-- as in getopt(), terminates flag-processing
FLAGS VALIDATORS: If your program:
- requires flag X to be specified
- needs flag Y to match a regular expression
- or requires any more general constraint to be satisfied
then validators are for you!
Each validator represents a constraint over one flag, which is enforced
starting from the initial parsing of the flags and until the program
terminates.
Also, lower_bound and upper_bound for numerical flags are enforced using flag
validators.
Howto:
If you want to enforce a constraint over one flag, use
gflags.RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS)
After flag values are initially parsed, and after any change to the specified
flag, method checker(flag_value) will be executed. If constraint is not
satisfied, an IllegalFlagValue exception will be raised. See
RegisterValidator's docstring for a detailed explanation on how to construct
your own checker.
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
gflags.DEFINE_integer('my_version', 0, 'Version number.')
gflags.DEFINE_string('filename', None, 'Input file name', short_name='f')
gflags.RegisterValidator('my_version',
lambda value: value % 2 == 0,
message='--my_version must be divisible by 2')
gflags.MarkFlagAsRequired('filename')
NOTE ON --flagfile:
Flags may be loaded from text files in addition to being specified on
the commandline.
Any flags you don't feel like typing, throw them in a file, one flag per
line, for instance:
--myflag=myvalue
--nomyboolean_flag
You then specify your file with the special flag '--flagfile=somefile'.
You CAN recursively nest flagfile= tokens OR use multiple files on the
command line. Lines beginning with a single hash '#' or a double slash
'//' are comments in your flagfile.
Any flagfile=<file> will be interpreted as having a relative path from
the current working directory rather than from the place the file was
included from:
myPythonScript.py --flagfile=config/somefile.cfg
If somefile.cfg includes further --flagfile= directives, these will be
referenced relative to the original CWD, not from the directory the
including flagfile was found in!
The caveat applies to people who are including a series of nested files
in a different dir than they are executing out of. Relative path names
are always from CWD, not from the directory of the parent include
flagfile. We do now support '~' expanded directory names.
Absolute path names ALWAYS work!
EXAMPLE USAGE:
FLAGS = gflags.FLAGS
# Flag names are globally defined! So in general, we need to be
# careful to pick names that are unlikely to be used by other libraries.
# If there is a conflict, we'll get an error at import time.
gflags.DEFINE_string('name', 'Mr. President', 'your name')
gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
gflags.DEFINE_boolean('debug', False, 'produces debugging output')
gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
def main(argv):
try:
argv = FLAGS(argv) # parse flags
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
if FLAGS.debug: print 'non-flag arguments:', argv
print 'Happy Birthday', FLAGS.name
if FLAGS.age is not None:
print 'You are a %d year old %s' % (FLAGS.age, FLAGS.gender)
if __name__ == '__main__':
main(sys.argv)
KEY FLAGS:
As we already explained, each module gains access to all flags defined
by all the other modules it transitively imports. In the case of
non-trivial scripts, this means a lot of flags ... For documentation
purposes, it is good to identify the flags that are key (i.e., really
important) to a module. Clearly, the concept of "key flag" is a
subjective one. When trying to determine whether a flag is key to a
module or not, assume that you are trying to explain your module to a
potential user: which flags would you really like to mention first?
We'll describe shortly how to declare which flags are key to a module.
For the moment, assume we know the set of key flags for each module.
Then, if you use the app.py module, you can use the --helpshort flag to
print only the help for the flags that are key to the main module, in a
human-readable format.
NOTE: If you need to parse the flag help, do NOT use the output of
--help / --helpshort. That output is meant for human consumption, and
may be changed in the future. Instead, use --helpxml; flags that are
key for the main module are marked there with a <key>yes</key> element.
The set of key flags for a module M is composed of:
1. Flags defined by module M by calling a DEFINE_* function.
2. Flags that module M explictly declares as key by using the function
DECLARE_key_flag(<flag_name>)
3. Key flags of other modules that M specifies by using the function
ADOPT_module_key_flags(<other_module>)
This is a "bulk" declaration of key flags: each flag that is key for
<other_module> becomes key for the current module too.
Notice that if you do not use the functions described at points 2 and 3
above, then --helpshort prints information only about the flags defined
by the main module of our script. In many cases, this behavior is good
enough. But if you move part of the main module code (together with the
related flags) into a different module, then it is nice to use
DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
lists all relevant flags (otherwise, your code refactoring may confuse
your users).
Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
pluses and minuses: DECLARE_key_flag is more targeted and may lead a
more focused --helpshort documentation. ADOPT_module_key_flags is good
for cases when an entire module is considered key to the current script.
Also, it does not require updates to client scripts when a new flag is
added to the module.
EXAMPLE USAGE 2 (WITH KEY FLAGS):
Consider an application that contains the following three files (two
auxiliary modules and a main module)
File libfoo.py:
import gflags
gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
... some code ...
File libbar.py:
import gflags
gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
'Path to the GFS files for libbar.')
gflags.DEFINE_string('email_for_bar_errors', '[email protected]',
'Email address for bug reports about module libbar.')
gflags.DEFINE_boolean('bar_risky_hack', False,
'Turn on an experimental and buggy optimization.')
... some code ...
File myscript.py:
import gflags
import libfoo
import libbar
gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
# Declare that all flags that are key for libfoo are
# key for this module too.
gflags.ADOPT_module_key_flags(libfoo)
# Declare that the flag --bar_gfs_path (defined in libbar) is key
# for this module.
gflags.DECLARE_key_flag('bar_gfs_path')
... some code ...
When myscript is invoked with the flag --helpshort, the resulted help
message lists information about all the key flags for myscript:
--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path.
Of course, myscript uses all the flags declared by it (in this case,
just --num_replicas) or by any of the modules it transitively imports
(e.g., the modules libfoo, libbar). E.g., it can access the value of
FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
flag for myscript.
OUTPUT FOR --helpxml:
The --helpxml flag generates output with the following structure:
<?xml version="1.0"?>
<AllFlags>
<program>PROGRAM_BASENAME</program>
<usage>MAIN_MODULE_DOCSTRING</usage>
(<flag>
[<key>yes</key>]
<file>DECLARING_MODULE</file>
<name>FLAG_NAME</name>
<meaning>FLAG_HELP_MESSAGE</meaning>
<default>DEFAULT_FLAG_VALUE</default>
<current>CURRENT_FLAG_VALUE</current>
<type>FLAG_TYPE</type>
[OPTIONAL_ELEMENTS]
</flag>)*
</AllFlags>
Notes:
1. The output is intentionally similar to the output generated by the
C++ command-line flag library. The few differences are due to the
Python flags that do not have a C++ equivalent (at least not yet),
e.g., DEFINE_list.
2. New XML elements may be added in the future.
3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
pass for this flag on the command-line. E.g., for a flag defined
using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
4. CURRENT_FLAG_VALUE is produced using str(). This means that the
string 'false' will be represented in the same way as the boolean
False. Using repr() would have removed this ambiguity and simplified
parsing, but would have broken the compatibility with the C++
command-line flags.
5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
flags: lower_bound, upper_bound (for flags that specify bounds),
enum_value (for enum flags), list_separator (for flags that consist of
a list of values, separated by a special token).
6. We do not provide any example here: please use --helpxml instead.
This module requires at least python 2.2.1 to run.
"""
import cgi
import getopt
import os
import re
import string
import struct
import sys
# pylint: disable-msg=C6204
try:
import fcntl
except ImportError:
fcntl = None
try:
# Importing termios will fail on non-unix platforms.
import termios
except ImportError:
termios = None
import gflags_validators
# pylint: enable-msg=C6204
# Are we running under pychecker?
_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
def _GetCallingModuleObjectAndName():
"""Returns the module that's calling into this module.
We generally use this function to get the name of the module calling a
DEFINE_foo... function.
"""
# Walk down the stack to find the first globals dict that's not ours.
for depth in range(1, sys.getrecursionlimit()):
if not sys._getframe(depth).f_globals is globals():
globals_for_frame = sys._getframe(depth).f_globals
module, module_name = _GetModuleObjectAndName(globals_for_frame)
if module_name is not None:
return module, module_name
raise AssertionError("No module was found")
def _GetCallingModule():
"""Returns the name of the module that's calling into this module."""
return _GetCallingModuleObjectAndName()[1]
def _GetThisModuleObjectAndName():
"""Returns: (module object, module name) for this module."""
return _GetModuleObjectAndName(globals())
# module exceptions:
class FlagsError(Exception):
"""The base class for all flags errors."""
pass
class DuplicateFlag(FlagsError):
"""Raised if there is a flag naming conflict."""
pass
class CantOpenFlagFileError(FlagsError):
"""Raised if flagfile fails to open: doesn't exist, wrong permissions, etc."""
pass
class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
"""Special case of DuplicateFlag -- SWIG flag value can't be set to None.
This can be raised when a duplicate flag is created. Even if allow_override is
True, we still abort if the new value is None, because it's currently
impossible to pass None default value back to SWIG. See FlagValues.SetDefault
for details.
"""
pass
class DuplicateFlagError(DuplicateFlag):
"""A DuplicateFlag whose message cites the conflicting definitions.
A DuplicateFlagError conveys more information than a DuplicateFlag,
namely the modules where the conflicting definitions occur. This
class was created to avoid breaking external modules which depend on
the existing DuplicateFlags interface.
"""
def __init__(self, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
"""
self.flagname = flagname
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
msg = "The flag '%s' is defined twice. First from %s, Second from %s" % (
self.flagname, first_module, second_module)
DuplicateFlag.__init__(self, msg)
class IllegalFlagValue(FlagsError):
"""The flag command line argument is illegal."""
pass
class UnrecognizedFlag(FlagsError):
"""Raised if a flag is unrecognized."""
pass
# An UnrecognizedFlagError conveys more information than an UnrecognizedFlag.
# Since there are external modules that create DuplicateFlags, the interface to
# DuplicateFlag shouldn't change. The flagvalue will be assigned the full value
# of the flag and its argument, if any, allowing handling of unrecognized flags
# in an exception handler.
# If flagvalue is the empty string, then this exception is an due to a
# reference to a flag that was not already defined.
class UnrecognizedFlagError(UnrecognizedFlag):
def __init__(self, flagname, flagvalue=''):
self.flagname = flagname
self.flagvalue = flagvalue
UnrecognizedFlag.__init__(
self, "Unknown command line flag '%s'" % flagname)
# Global variable used by expvar
_exported_flags = {}
_help_width = 80 # width of help output
def GetHelpWidth():
"""Returns: an integer, the width of help lines that is used in TextWrap."""
if (not sys.stdout.isatty()) or (termios is None) or (fcntl is None):
return _help_width
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable
if columns >= 40:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _help_width))
except (TypeError, IOError, struct.error):
return _help_width
def CutCommonSpacePrefix(text):
"""Removes a common space prefix from the lines of a multiline text.
If the first line does not start with a space, it is left as it is and
only in the remaining lines a common space prefix is being searched
for. That means the first line will stay untouched. This is especially
useful to turn doc strings into help texts. This is because some
people prefer to have the doc comment start already after the
apostrophe and then align the following lines while others have the
apostrophes on a separate line.
The function also drops trailing empty lines and ignores empty lines
following the initial content line while calculating the initial
common whitespace.
Args:
text: text to work on
Returns:
the resulting text
"""
text_lines = text.splitlines()
# Drop trailing empty lines
while text_lines and not text_lines[-1]:
text_lines = text_lines[:-1]
if text_lines:
# We got some content, is the first line starting with a space?
if text_lines[0] and text_lines[0][0].isspace():
text_first_line = []
else:
text_first_line = [text_lines.pop(0)]
# Calculate length of common leading whitespace (only over content lines)
common_prefix = os.path.commonprefix([line for line in text_lines if line])
space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
# If we have a common space prefix, drop it from all lines
if space_prefix_len:
for index in xrange(len(text_lines)):
if text_lines[index]:
text_lines[index] = text_lines[index][space_prefix_len:]
return '\n'.join(text_first_line + text_lines)
return ''
def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
"""Wraps a given text to a maximum line length and returns it.
We turn lines that only contain whitespace into empty lines. We keep
new lines and tabs (e.g., we do not treat tabs as spaces).
Args:
text: text to wrap
length: maximum length of a line, includes indentation
if this is None then use GetHelpWidth()
indent: indent for all but first line
firstline_indent: indent for first line; if None, fall back to indent
tabs: replacement for tabs
Returns:
wrapped text
Raises:
FlagsError: if indent not shorter than length
FlagsError: if firstline_indent not shorter than length
"""
# Get defaults where callee used None
if length is None:
length = GetHelpWidth()
if indent is None:
indent = ''
if len(indent) >= length:
raise FlagsError('Indent must be shorter than length')
# In line we will be holding the current line which is to be started
# with indent (or firstline_indent if available) and then appended
# with words.
if firstline_indent is None:
firstline_indent = ''
line = indent
else:
line = firstline_indent
if len(firstline_indent) >= length:
raise FlagsError('First line indent must be shorter than length')
# If the callee does not care about tabs we simply convert them to
# spaces If callee wanted tabs to be single space then we do that
# already here.
if not tabs or tabs == ' ':
text = text.replace('\t', ' ')
else:
tabs_are_whitespace = not tabs.strip()
line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
# Split the text into lines and the lines with the regex above. The
# resulting lines are collected in result[]. For each split we get the
# spaces, the tabs and the next non white space (e.g. next word).
result = []
for text_line in text.splitlines():
# Store result length so we can find out whether processing the next
# line gave any new content
old_result_len = len(result)
# Process next line with line_regex. For optimization we do an rstrip().
# - process tabs (changes either line or word, see below)
# - process word (first try to squeeze on line, then wrap or force wrap)
# Spaces found on the line are ignored, they get added while wrapping as
# needed.
for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
# If tabs weren't converted to spaces, handle them now
if current_tabs:
# If the last thing we added was a space anyway then drop
# it. But let's not get rid of the indentation.
if (((result and line != indent) or
(not result and line != firstline_indent)) and line[-1] == ' '):
line = line[:-1]
# Add the tabs, if that means adding whitespace, just add it at
# the line, the rstrip() code while shorten the line down if
# necessary
if tabs_are_whitespace:
line += tabs * len(current_tabs)
else:
# if not all tab replacement is whitespace we prepend it to the word
word = tabs * len(current_tabs) + word
# Handle the case where word cannot be squeezed onto current last line
if len(line) + len(word) > length and len(indent) + len(word) <= length:
result.append(line.rstrip())
line = indent + word
word = ''
# No space left on line or can we append a space?
if len(line) + 1 >= length:
result.append(line.rstrip())
line = indent
else:
line += ' '
# Add word and shorten it up to allowed line length. Restart next
# line with indent and repeat, or add a space if we're done (word
# finished) This deals with words that cannot fit on one line
# (e.g. indent + word longer than allowed line length).
while len(line) + len(word) >= length:
line += word
result.append(line[:length])
word = line[length:]
line = indent
# Default case, simply append the word and a space
if word:
line += word + ' '
# End of input line. If we have content we finish the line. If the
# current line is just the indent but we had content in during this
# original line then we need to add an empty line.
if (result and line != indent) or (not result and line != firstline_indent):
result.append(line.rstrip())
elif len(result) == old_result_len:
result.append('')
line = indent
return '\n'.join(result)
def DocToHelp(doc):
"""Takes a __doc__ string and reformats it as help."""
# Get rid of starting and ending white space. Using lstrip() or even
# strip() could drop more than maximum of first line and right space
# of last line.
doc = doc.strip()
# Get rid of all empty lines
whitespace_only_line = re.compile('^[ \t]+$', re.M)
doc = whitespace_only_line.sub('', doc)
# Cut out common space at line beginnings
doc = CutCommonSpacePrefix(doc)
# Just like this module's comment, comments tend to be aligned somehow.
# In other words they all start with the same amount of white space
# 1) keep double new lines
# 2) keep ws after new lines if not empty line
# 3) all other new lines shall be changed to a space
# Solution: Match new lines between non white space and replace with space.
doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
return doc
def _GetModuleObjectAndName(globals_dict):
"""Returns the module that defines a global environment, and its name.
Args:
globals_dict: A dictionary that should correspond to an environment
providing the values of the globals.
Returns:
A pair consisting of (1) module object and (2) module name (a
string). Returns (None, None) if the module could not be
identified.
"""
# The use of .items() (instead of .iteritems()) is NOT a mistake: if
# a parallel thread imports a module while we iterate over
# .iteritems() (not nice, but possible), we get a RuntimeError ...
# Hence, we use the slightly slower but safer .items().
for name, module in sys.modules.items():
if getattr(module, '__dict__', None) is globals_dict:
if name == '__main__':
# Pick a more informative name for the main module.
name = sys.argv[0]
return (module, name)
return (None, None)
def _GetMainModule():
"""Returns: string, name of the module from which execution started."""
# First, try to use the same logic used by _GetCallingModuleObjectAndName(),
# i.e., call _GetModuleObjectAndName(). For that we first need to
# find the dictionary that the main module uses to store the
# globals.
#
# That's (normally) the same dictionary object that the deepest
# (oldest) stack frame is using for globals.
deepest_frame = sys._getframe(0)
while deepest_frame.f_back is not None:
deepest_frame = deepest_frame.f_back
globals_for_main_module = deepest_frame.f_globals
main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1]
# The above strategy fails in some cases (e.g., tools that compute
# code coverage by redefining, among other things, the main module).
# If so, just use sys.argv[0]. We can probably always do this, but
# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()
if main_module_name is None:
main_module_name = sys.argv[0]
return main_module_name
class FlagValues:
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: gflags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Set if we should use new style gnu_getopt rather than getopt when parsing
# the args. Only possible with Python 2.3+
self.UseGnuGetOpt(False)
def UseGnuGetOpt(self, use_gnu_getopt=True):
"""Use GNU-style scanning. Allows mixing of flag and non-flag arguments.
See http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
use_gnu_getopt: wether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
def IsGnuGetOpt(self):
return self.__dict__['__use_gnu_getopt']
def FlagDict(self):
return self.__dict__['__flags']
def FlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def FlagsByModuleIdDict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def KeyFlagsByModuleDict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def _RegisterFlagByModule(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module = self.FlagsByModuleDict()
flags_by_module.setdefault(module_name, []).append(flag)
def _RegisterFlagByModuleId(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: An int, the ID of the Python module.
flag: A Flag object, a flag that is key to the module.
"""
flags_by_module_id = self.FlagsByModuleIdDict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def _RegisterKeyFlagForModule(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: A string, the name of a Python module.
flag: A Flag object, a flag that is key to the module.
"""
key_flags_by_module = self.KeyFlagsByModuleDict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _GetFlagsDefinedByModule(self, module):
"""Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, []))
def _GetKeyFlagsForModule(self, module):
"""Returns the list of key flags for a module.
Args:
module: A module object or a module name (a string)
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._GetFlagsDefinedByModule(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.KeyFlagsByModuleDict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def FindModuleDefiningFlag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module, flags in self.FlagsByModuleDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module
return default
def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default
def AppendFlagValues(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: registry to copy from
"""
for flag_name, flag in flag_values.FlagDict().iteritems():
# Each flags with shortname appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except DuplicateFlagError:
raise DuplicateFlagError(flag_name, self,
other_flag_values=flag_values)
def RemoveFlagValues(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: registry containing flags to remove.
"""
for flag_name in flag_values.FlagDict():
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self.FlagDict()
if not isinstance(flag, Flag):
raise IllegalFlagValue(flag)
if not isinstance(name, type("")):
raise FlagsError("Flag name must be a string")
if len(name) == 0:
raise FlagsError("Flag name cannot be empty")
# If running under pychecker, duplicate keys are likely to be
# defined. Disable check for duplicate keys when pycheck'ing.
if (name in fl and not flag.allow_override and
not fl[name].allow_override and not _RUNNING_PYCHECKER):
module, module_name = _GetCallingModuleObjectAndName()
if (self.FindModuleDefiningFlag(name) == module_name and
id(module) != self.FindModuleIdDefiningFlag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise DuplicateFlagError(name, self)
short_name = flag.short_name
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
raise DuplicateFlagError(short_name, self)
fl[short_name] = flag
fl[name] = flag
global _exported_flags
_exported_flags[name] = flag
def __getitem__(self, name):
"""Retrieves the Flag object for the flag --name."""
return self.FlagDict()[name]
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
return fl[name].value
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self.FlagDict()
fl[name].value = value
self._AssertValidators(fl[name].validators)
return value
def _AssertAllValidators(self):
all_validators = set()
for flag in self.FlagDict().itervalues():
for validator in flag.validators:
all_validators.add(validator)
self._AssertValidators(all_validators)
def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(gflags_validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValue: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.Verify(self)
except gflags_validators.Error, e:
message = validator.PrintFlagsWithValues(self)
raise IllegalFlagValue('%s: %s' % (message, str(e)))
def _FlagIsRegistered(self, flag_obj):
"""Checks whether a Flag object is registered under some name.
Note: this is non trivial: in addition to its normal name, a flag
may have a short name too. In self.FlagDict(), both the normal and
the short name are mapped to the same flag object. E.g., calling
only "del FLAGS.short_name" is not unregistering the corresponding
Flag object (it is still registered under the longer name).
Args:
flag_obj: A Flag object.
Returns:
A boolean: True iff flag_obj is registered under some name.
"""
flag_dict = self.FlagDict()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
# The flag cannot be registered under any other name, so we do not
# need to do a full search through the values of self.FlagDict().
return False
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del flag_values_object.<flag_name>
E.g.,
gflags.DEFINE_integer('foo', 1, 'Integer flag.')
del gflags.FLAGS.foo
Args:
flag_name: A string, the name of the flag to be deleted.
Raises:
AttributeError: When there is no registered flag named flag_name.
"""
fl = self.FlagDict()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
if not self._FlagIsRegistered(flag_obj):
# If the Flag object indicated by flag_name is no longer
# registered (please see the docstring of _FlagIsRegistered), then
# we delete the occurrences of the flag object in all our internal
# dictionaries.
self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.FlagsByModuleIdDict(), flag_obj)
self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
"""Removes a flag object from a module -> list of flags dictionary.
Args:
flags_by_module_dict: A dictionary that maps module names to lists of
flags.
flag_obj: A flag object.
"""
for unused_module, flags_in_module in flags_by_module_dict.iteritems():
# while (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def SetDefault(self, name, value):
"""Changes the default value of the named flag object."""
fl = self.FlagDict()
if name not in fl:
raise AttributeError(name)
fl[name].SetDefault(value)
self._AssertValidators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self.FlagDict()
has_key = __contains__ # a synonym for __contains__()
def __iter__(self):
return iter(self.FlagDict())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned. Flags are parsed using the GNU
Program Argument Syntax Conventions, using getopt:
http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
Args:
argv: argument list. Can be of any type that may be converted to a list.
Returns:
The list of arguments not parsed as options, including argv[0]
Raises:
FlagsError: on any parsing error
"""
# Support any sequence type that can be converted to a list
argv = list(argv)
shortopts = ""
longopts = []
fl = self.FlagDict()
# This pre parses the argv list for --flagfile=<> options.
argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
# Correct the argv to support the google style of passing boolean
# parameters. Boolean parameters may be passed by using --mybool,
# --nomybool, --mybool=(true|false|1|0). getopt does not support
# having options that may or may not have a parameter. We replace
# instances of the short form --mybool and --nomybool with their
# full forms: --mybool=(true|false).
original_argv = list(argv) # list() makes a copy
shortest_matches = None
for name, flag in fl.items():
if not flag.boolean:
continue
if shortest_matches is None:
# Determine the smallest allowable prefix for all flag names
shortest_matches = self.ShortestUniquePrefixes(fl)
no_name = 'no' + name
prefix = shortest_matches[name]
no_prefix = shortest_matches[no_name]
# Replace all occurrences of this boolean with extended forms
for arg_idx in range(1, len(argv)):
arg = argv[arg_idx]
if arg.find('=') >= 0: continue
if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
argv[arg_idx] = ('--%s=true' % name)
elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
argv[arg_idx] = ('--%s=false' % name)
# Loop over all of the flags, building up the lists of short options
# and long options that will be passed to getopt. Short options are
# specified as a string of letters, each letter followed by a colon
# if it takes an argument. Long options are stored in an array of
# strings. Each string ends with an '=' if it takes an argument.
for name, flag in fl.items():
longopts.append(name + "=")
if len(name) == 1: # one-letter option: allow short flag type also
shortopts += name
if not flag.boolean:
shortopts += ":"
longopts.append('undefok=')
undefok_flags = []
# In case --undefok is specified, loop to pick up unrecognized
# options one by one.
unrecognized_opts = []
args = argv[1:]
while True:
try:
if self.__dict__['__use_gnu_getopt']:
optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
else:
optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
break
except getopt.GetoptError, e:
if not e.opt or e.opt in fl:
# Not an unrecognized option, re-raise the exception as a FlagsError
raise FlagsError(e)
# Remove offender from args and try again
for arg_index in range(len(args)):
if ((args[arg_index] == '--' + e.opt) or
(args[arg_index] == '-' + e.opt) or
(args[arg_index].startswith('--' + e.opt + '='))):
unrecognized_opts.append((e.opt, args[arg_index]))
args = args[0:arg_index] + args[arg_index+1:]
break
else:
# We should have found the option, so we don't expect to get
# here. We could assert, but raising the original exception
# might work better.
raise FlagsError(e)
for name, arg in optlist:
if name == '--undefok':
flag_names = arg.split(',')
undefok_flags.extend(flag_names)
# For boolean flags, if --undefok=boolflag is specified, then we should
# also accept --noboolflag, in addition to --boolflag.
# Since we don't know the type of the undefok'd flag, this will affect
# non-boolean flags as well.
# NOTE: You shouldn't use --undefok=noboolflag, because then we will
# accept --nonoboolflag here. We are choosing not to do the conversion
# from noboolflag -> boolflag because of the ambiguity that flag names
# can start with 'no'.
undefok_flags.extend('no' + name for name in flag_names)
continue
if name.startswith('--'):
# long option
name = name[2:]
short_option = 0
else:
# short option
name = name[1:]
short_option = 1
if name in fl:
flag = fl[name]
if flag.boolean and short_option: arg = 1
flag.Parse(arg)
# If there were unrecognized options, raise an exception unless
# the options were named via --undefok.
for opt, value in unrecognized_opts:
if opt not in undefok_flags:
raise UnrecognizedFlagError(opt, value)
if unparsed_args:
if self.__dict__['__use_gnu_getopt']:
# if using gnu_getopt just return the program name + remainder of argv.
ret_val = argv[:1] + unparsed_args
else:
# unparsed_args becomes the first non-flag detected by getopt to
# the end of argv. Because argv may have been modified above,
# return original_argv for this region.
ret_val = argv[:1] + original_argv[-len(unparsed_args):]
else:
ret_val = argv[:1]
self._AssertAllValidators()
return ret_val
def Reset(self):
"""Resets the values to the point before FLAGS(argv) was called."""
for f in self.FlagDict().values():
f.Unparse()
def RegisteredFlags(self):
"""Returns: a list of the names and short names of all registered flags."""
return list(self.FlagDict())
def FlagValuesDict(self):
"""Returns: a dictionary that maps flag names to flag values."""
flag_values = {}
for flag_name in self.RegisteredFlags():
flag = self.FlagDict()[flag_name]
flag_values[flag_name] = flag.value
return flag_values
def __str__(self):
"""Generates a help string for all known flags."""
return self.GetHelp()
def GetHelp(self, prefix=''):
"""Generates a help string for all known flags."""
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = _GetMainModule()
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
self.__RenderModuleFlags('gflags',
_SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
self.__RenderFlagList(
self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
helplist, prefix)
return '\n'.join(helplist)
def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
"""Generates a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self.__RenderFlagList(flags, output_lines, prefix + " ")
def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
"""Generates a help string for a given module."""
flags = self._GetFlagsDefinedByModule(module)
if flags:
self.__RenderModuleFlags(module, flags, output_lines, prefix)
def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist)
def MainModuleHelp(self):
"""Describe the key flags of the main module.
Returns:
string describing the key flags of a module.
"""
return self.ModuleHelp(_GetMainModule())
def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
fl = self.FlagDict()
special_fl = _SPECIAL_FLAGS.FlagDict()
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the FlagDict.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ""
if flag.short_name: flaghelp += "-%s," % flag.short_name
if flag.boolean:
flaghelp += "--[no]%s" % flag.name + ":"
else:
flaghelp += "--%s" % flag.name + ":"
flaghelp += " "
if flag.help:
flaghelp += flag.help
flaghelp = TextWrap(flaghelp, indent=prefix+" ",
firstline_indent=prefix)
if flag.default_as_str:
flaghelp += "\n"
flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
indent=prefix+" ")
if flag.parser.syntactic_help:
flaghelp += "\n"
flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
indent=prefix+" ")
output_lines.append(flaghelp)
def get(self, name, default):
"""Returns the value of a flag (if not None) or a default value.
Args:
name: A string, the name of a flag.
default: Default value to use if the flag value is None.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def ShortestUniquePrefixes(self, fl):
"""Returns: dictionary; maps flag names to their shortest unique prefix."""
# Sort the list of flag names
sorted_flags = []
for name, flag in fl.items():
sorted_flags.append(name)
if flag.boolean:
sorted_flags.append('no%s' % name)
sorted_flags.sort()
# For each name in the sorted list, determine the shortest unique
# prefix by comparing itself to the next name and to the previous
# name (the latter check uses cached info from the previous loop).
shortest_matches = {}
prev_idx = 0
for flag_idx in range(len(sorted_flags)):
curr = sorted_flags[flag_idx]
if flag_idx == (len(sorted_flags) - 1):
next = None
else:
next = sorted_flags[flag_idx+1]
next_len = len(next)
for curr_idx in range(len(curr)):
if (next is None
or curr_idx >= next_len
or curr[curr_idx] != next[curr_idx]):
# curr longer than next or no more chars in common
shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
prev_idx = curr_idx
break
else:
# curr shorter than (or equal to) next
shortest_matches[curr] = curr
prev_idx = curr_idx + 1 # next will need at least one more char
return shortest_matches
def __IsFlagFileDirective(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type("")):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def ExtractFilename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
def __GetFlagFileLines(self, filename, parsed_file_list):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: A string, the name of the flag file.
parsed_file_list: A list of the names of the files we have
already read. MUTATED BY THIS FUNCTION.
Returns:
List of strings. See the note below.
NOTE(user): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError, e_msg:
raise CantOpenFlagFileError('ERROR:: Unable to open flagfile: %s' % e_msg)
line_list = file_obj.readlines()
file_obj.close()
parsed_file_list.append(filename)
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self.__IsFlagFileDirective(line):
sub_filename = self.ExtractFilename(line)
# We do a little safety check for reparsing a file we've already done.
if not sub_filename in parsed_file_list:
included_flags = self.__GetFlagFileLines(sub_filename,
parsed_file_list)
flag_line_list.extend(included_flags)
else: # Case of hitting a circularly included file.
sys.stderr.write('Warning: Hit circular flagfile dependency: %s\n' %
(sub_filename,))
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
return flag_line_list
def ReadFlagsFromFiles(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: A list of strings, usually sys.argv[1:], which may contain one or
more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: If False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
References: Global gflags.FLAG class instance.
This function should be called before the normal FLAGS(argv) call.
This function scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list between the
first item of the list and any subsequent items in the list.
Note that your application's flags are still defined the usual way
using gflags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> Flags from the command line argv _should_ always take precedence!
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be processed after the parent flag file is done.
--> For duplicate flags, first one we hit should "win".
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
parsed_file_list = []
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self.__IsFlagFileDirective(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise IllegalFlagValue('--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self.ExtractFilename(current_arg)
new_argv.extend(
self.__GetFlagFileLines(flag_filename, parsed_file_list))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def FlagsIntoString(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from http://code.google.com/p/google-gflags
"""
s = ''
for flag in self.FlagDict().values():
if flag.value is not None:
s += flag.Serialize() + '\n'
return s
def AppendFlagsIntoFile(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from http://code.google.com/p/google-gflags
"""
out_file = open(filename, 'a')
out_file.write(self.FlagsIntoString())
out_file.close()
def WriteHelpInXMLFormat(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
http://code.google.com/p/google-gflags
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
outfile = outfile or sys.stdout
outfile.write('<?xml version=\"1.0\"?>\n')
outfile.write('<AllFlags>\n')
indent = ' '
_WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
indent)
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
_WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
# Get list of key flags for the main module.
key_flags = self._GetKeyFlagsForModule(_GetMainModule())
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.FlagsByModuleDict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
flag.WriteInfoInXMLFormat(outfile, module_name,
is_key=is_key, indent=indent)
outfile.write('</AllFlags>\n')
outfile.flush()
def AddValidator(self, validator):
"""Register new flags validator to be checked.
Args:
validator: gflags_validators.Validator
Raises:
AttributeError: if validators work with a non-existing flag.
"""
for flag_name in validator.GetFlagsNames():
flag = self.FlagDict()[flag_name]
flag.validators.append(validator)
# end of FlagValues definition
# The global FlagValues instance
FLAGS = FlagValues()
def _StrOrUnicode(value):
"""Converts value to a python string or, if necessary, unicode-string."""
try:
return str(value)
except UnicodeEncodeError:
return unicode(value)
def _MakeXMLSafe(s):
"""Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
s = cgi.escape(s) # Escape <, >, and &
# Remove characters that cannot appear in an XML 1.0 document
# (http://www.w3.org/TR/REC-xml/#charsets).
#
# NOTE: if there are problems with current solution, one may move to
# XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
# Convert non-ascii characters to entities. Note: requires python >=2.3
s = s.encode('ascii', 'xmlcharrefreplace') # u'\xce\x88' -> 'uΈ'
return s
def _WriteSimpleXMLElement(outfile, name, value, indent):
"""Writes a simple XML element.
Args:
outfile: File object we write the XML element to.
name: A string, the name of XML element.
value: A Python object, whose string representation will be used
as the value of the XML element.
indent: A string, prepended to each line of generated output.
"""
value_str = _StrOrUnicode(value)
if isinstance(value, bool):
# Display boolean values as the C++ flag library does: no caps.
value_str = value_str.lower()
safe_value_str = _MakeXMLSafe(value_str)
outfile.write('%s<%s>%s</%s>\n' % (indent, name, safe_value_str, name))
class Flag:
"""Information about a command-line flag.
'Flag' objects define the following fields:
.name - the name for this flag
.default - the default value for this flag
.default_as_str - default value as repr'd string, e.g., "'true'" (or None)
.value - the most recent parsed value of this flag; set by Parse()
.help - a help string or None if no help is available
.short_name - the single letter alias for this flag (or None)
.boolean - if 'true', this flag does not accept arguments
.present - true if this flag was parsed from command line flags.
.parser - an ArgumentParser object
.serializer - an ArgumentSerializer object
.allow_override - the flag may be redefined without raising an error
The only public method of a 'Flag' object is Parse(), but it is
typically only called by a 'FlagValues' object. The Parse() method is
a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
value is saved in .value, and the .present attribute is updated. If
this flag was already present, a FlagsError is raised.
Parse() is also called during __init__ to parse the default value and
initialize the .value attribute. This enables other python modules to
safely use flags even if the __main__ module neglects to parse the
command line arguments. The .present attribute is cleared after
__init__ parsing. If the default value is set to None, then the
__init__ parsing step is skipped and the .value attribute is
initialized to None.
Note: The default value is also presented to the user in the help
string, so it is important that it be a legal value for this flag.
"""
def __init__(self, parser, serializer, name, default, help_string,
short_name=None, boolean=0, allow_override=0):
self.name = name
if not help_string:
help_string = '(no help available)'
self.help = help_string
self.short_name = short_name
self.boolean = boolean
self.present = 0
self.parser = parser
self.serializer = serializer
self.allow_override = allow_override
self.value = None
self.validators = []
self.SetDefault(default)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
def __lt__(self, other):
if isinstance(other, Flag):
return id(self) < id(other)
return NotImplemented
def __GetParsedValueAsString(self, value):
if value is None:
return None
if self.serializer:
return repr(self.serializer.Serialize(value))
if self.boolean:
if value:
return repr('true')
else:
return repr('false')
return repr(_StrOrUnicode(value))
def Parse(self, argument):
try:
self.value = self.parser.Parse(argument)
except ValueError, e: # recast ValueError as IllegalFlagValue
raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
self.present += 1
def Unparse(self):
if self.default is None:
self.value = None
else:
self.Parse(self.default)
self.present = 0
def Serialize(self):
if self.value is None:
return ''
if self.boolean:
if self.value:
return "--%s" % self.name
else:
return "--no%s" % self.name
else:
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
def SetDefault(self, value):
"""Changes the default value (and current value too) for this Flag."""
# We can't allow a None override because it may end up not being
# passed to C++ code when we're overriding C++ flags. So we
# cowardly bail out until someone fixes the semantics of trying to
# pass None to a C++ flag. See swig_flags.Init() for details on
# this behavior.
# TODO(user): Users can directly call this method, bypassing all flags
# validators (we don't have FlagValues here, so we can not check
# validators).
# The simplest solution I see is to make this method private.
# Another approach would be to store reference to the corresponding
# FlagValues with each flag, but this seems to be an overkill.
if value is None and self.allow_override:
raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
self.default = value
self.Unparse()
self.default_as_str = self.__GetParsedValueAsString(self.value)
def Type(self):
"""Returns: a string that describes the type of this Flag."""
# NOTE: we use strings, and not the types.*Type constants because
# our flags can have more exotic types, e.g., 'comma separated list
# of strings', 'whitespace separated list of strings', etc.
return self.parser.Type()
def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
"""Writes common info about this flag, in XML format.
This is information that is relevant to all flags (e.g., name,
meaning, etc.). If you defined a flag that has some other pieces of
info, then please override _WriteCustomInfoInXMLFormat.
Please do NOT override this method.
Args:
outfile: File object we write to.
module_name: A string, the name of the module that defines this flag.
is_key: A boolean, True iff this flag is key for main module.
indent: A string that is prepended to each generated line.
"""
outfile.write(indent + '<flag>\n')
inner_indent = indent + ' '
if is_key:
_WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
_WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
# Print flag features that are relevant for all flags.
_WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
if self.short_name:
_WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
inner_indent)
if self.help:
_WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
# The default flag value can either be represented as a string like on the
# command line, or as a Python object. We serialize this value in the
# latter case in order to remain consistent.
if self.serializer and not isinstance(self.default, str):
default_serialized = self.serializer.Serialize(self.default)
else:
default_serialized = self.default
_WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
_WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
_WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
# Print extra flag features this flag may have.
self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
outfile.write(indent + '</flag>\n')
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
"""Writes extra info about this flag, in XML format.
"Extra" means "not already printed by WriteInfoInXMLFormat above."
Args:
outfile: File object we write to.
indent: A string that is prepended to each generated line.
"""
# Usually, the parser knows the extra details about the flag, so
# we just forward the call to it.
self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
# End of Flag definition
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(mcs, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for mcs with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
args: Positional initializer arguments.
kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(mcs, *args, **kwargs)
else:
instances = mcs._instances
key = (mcs,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(mcs, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(mcs, *args)
class ArgumentParser(object):
"""Base class used to parse and convert arguments.
The Parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
__metaclass__ = _ArgumentParserCache
syntactic_help = ""
def Parse(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
def Type(self):
return 'string'
def WriteCustomInfoInXMLFormat(self, outfile, indent):
pass
class ArgumentSerializer:
"""Base class for generating string representations of a flag value."""
def Serialize(self, value):
return _StrOrUnicode(value)
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def Serialize(self, value):
return self.list_sep.join([_StrOrUnicode(x) for x in value])
# Flags validators
def RegisterValidator(flag_name,
checker,
message='Flag validation failed',
flag_values=FLAGS):
"""Adds a constraint, which will be enforced during program execution.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_name: string, name of the flag to be checked.
checker: method to validate the flag.
input - value of the corresponding flag (string, boolean, etc.
This value will be passed to checker by the library). See file's
docstring for examples.
output - Boolean.
Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags_validators.Error(desired_error_message).
message: error text to be shown to the user if checker returns False.
If checker raises gflags_validators.Error, message from the raised
Error will be shown.
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
flag_values.AddValidator(gflags_validators.SimpleValidator(flag_name,
checker,
message))
def MarkFlagAsRequired(flag_name, flag_values=FLAGS):
"""Ensure that flag is not None during program execution.
Registers a flag validator, which will follow usual validator
rules.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
RegisterValidator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values)
def _RegisterBoundsValidatorIfNeeded(parser, name, flag_values):
"""Enforce lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser). Provides lower
and upper bounds, and help text to display.
name: string, name of the flag
flag_values: FlagValues
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def Checker(value):
if value is not None and parser.IsOutsideBounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise gflags_validators.Error(message)
return True
RegisterValidator(name,
Checker,
flag_values=flag_values)
# The DEFINE functions are explained in mode details in the module doc string.
def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
**args):
"""Registers a generic Flag object.
NOTE: in the docstrings of all DEFINE* functions, "registers" is short
for "creates a new flag and registers it".
Auxiliary function: clients should use the specialized DEFINE_<type>
function instead.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object the flag will be registered with.
serializer: ArgumentSerializer that serializes the flag value.
args: Dictionary with extra keyword args that are passes to the
Flag __init__.
"""
DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_flag(flag, flag_values=FLAGS):
"""Registers a 'Flag' object with a 'FlagValues' object.
By default, the global FLAGS 'FlagValue' object is used.
Typical users will use one of the more specialized DEFINE_xxx
functions, such as DEFINE_string or DEFINE_integer. But developers
who need to create Flag objects themselves should use this function
to register their flags.
"""
# copying the reference to flag_values prevents pychecker warnings
fv = flag_values
fv[flag.name] = flag
# Tell flag_values who's defining the flag.
if isinstance(flag_values, FlagValues):
# Regarding the above isinstance test: some users pass funny
# values of flag_values (e.g., {}) in order to avoid the flag
# registration (in the past, there used to be a flag_values ==
# FLAGS test here) and redefine flags with the same name (e.g.,
# debug). To avoid breaking their code, we perform the
# registration only if flag_values is a real FlagValues object.
module, module_name = _GetCallingModuleObjectAndName()
flag_values._RegisterFlagByModule(module_name, flag)
flag_values._RegisterFlagByModuleId(id(module), flag)
def _InternalDeclareKeyFlags(flag_names,
flag_values=FLAGS, key_flag_values=None):
"""Declares a flag as key for the calling module.
Internal function. User code should call DECLARE_key_flag or
ADOPT_module_key_flags instead.
Args:
flag_names: A list of strings that are names of already-registered
Flag objects.
flag_values: A FlagValues object that the flags listed in
flag_names have registered with (the value of the flag_values
argument from the DEFINE_* calls that defined those flags).
This should almost never need to be overridden.
key_flag_values: A FlagValues object that (among possibly many
other things) keeps track of the key flags for each module.
Default None means "same as flag_values". This should almost
never need to be overridden.
Raises:
UnrecognizedFlagError: when we refer to a flag that was not
defined yet.
"""
key_flag_values = key_flag_values or flag_values
module = _GetCallingModule()
for flag_name in flag_names:
if flag_name not in flag_values:
raise UnrecognizedFlagError(flag_name)
flag = flag_values.FlagDict()[flag_name]
key_flag_values._RegisterKeyFlagForModule(module, flag)
def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
def ADOPT_module_key_flags(module, flag_values=FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: A module object.
flag_values: A FlagValues object. This should almost never need
to be overridden.
Raises:
FlagsError: When given an argument that is a module name (a
string), instead of a module object.
"""
# NOTE(user): an even better test would be if not
# isinstance(module, types.ModuleType) but I didn't want to import
# types for such a tiny use.
if isinstance(module, str):
raise FlagsError('Received module name %s; expected a module object.'
% module)
_InternalDeclareKeyFlags(
[f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _SPECIAL_FLAGS into account.
if module == _GetThisModuleObjectAndName()[0]:
_InternalDeclareKeyFlags(
# As we associate flags with _GetCallingModuleObjectAndName(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use _GetKeyFlagsForModule.
# Instead, we take all flags from _SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
#
# STRING FLAGS
#
def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be any string."""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# BOOLEAN FLAGS
#
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def Convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if type(argument) == str:
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def Parse(self, argument):
val = self.Convert(argument)
return val
def Type(self):
return 'bool'
class BooleanFlag(Flag):
"""Basic boolean flag.
Boolean flags do not take any arguments, and their value is either
True (1) or False (0). The false value is specified on the command
line by prepending the word 'no' to either the long or the short flag
name.
For example, if a Boolean flag was created whose long name was
'update' and whose short name was 'x', then this flag could be
explicitly unset through either --noupdate or --nox.
"""
def __init__(self, name, default, help, short_name=None, **args):
p = BooleanParser()
Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
if not self.help: self.help = "a boolean value"
def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
"""Registers a boolean flag.
Such a boolean flag does not take an argument. If a user wants to
specify a false value explicitly, the long option beginning with 'no'
must be used: i.e. --noflag
This flag will have a value of None, True or False. None is possible
if default=None and the user does not specify the flag on the command
line.
"""
DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
# Match C++ API to unconfuse C++ people.
DEFINE_bool = DEFINE_boolean
class HelpFlag(BooleanFlag):
"""
HelpFlag is a special boolean flag that prints usage information and
raises a SystemExit exception if it is ever found in the command
line arguments. Note this is called with allow_override=1, so other
apps can define their own --help flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "help", 0, "show this help",
short_name="?", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = str(FLAGS)
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
class HelpXMLFlag(BooleanFlag):
"""Similar to HelpFlag, but generates output in XML format."""
def __init__(self):
BooleanFlag.__init__(self, 'helpxml', False,
'like --help, but generates XML output',
allow_override=1)
def Parse(self, arg):
if arg:
FLAGS.WriteHelpInXMLFormat(sys.stdout)
sys.exit(1)
class HelpshortFlag(BooleanFlag):
"""
HelpshortFlag is a special boolean flag that prints usage
information for the "main" module, and rasies a SystemExit exception
if it is ever found in the command line arguments. Note this is
called with allow_override=1, so other apps can define their own
--helpshort flag, replacing this one, if they want.
"""
def __init__(self):
BooleanFlag.__init__(self, "helpshort", 0,
"show usage only for this module", allow_override=1)
def Parse(self, arg):
if arg:
doc = sys.modules["__main__"].__doc__
flags = FLAGS.MainModuleHelp()
print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
if flags:
print "flags:"
print flags
sys.exit(1)
#
# Numeric parser - base class for Integer and Float parsers
#
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def IsOutsideBounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def Parse(self, argument):
val = self.Convert(argument)
if self.IsOutsideBounds(val):
raise ValueError("%s is not %s" % (val, self.syntactic_help))
return val
def WriteCustomInfoInXMLFormat(self, outfile, indent):
if self.lower_bound is not None:
_WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
if self.upper_bound is not None:
_WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
def Convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
# End of Numeric Parser
#
# FLOAT FLAGS
#
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "a"
number_name = "number"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def Type(self):
return 'float'
# End of FloatParser
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be a float.
If lower_bound or upper_bound are set, then this flag must be
within the given range.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# INTEGER FLAGS
#
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = "an"
number_name = "integer"
syntactic_help = " ".join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = "a positive %s" % self.number_name
elif upper_bound == -1:
sh = "a negative %s" % self.number_name
elif lower_bound == 0:
sh = "a non-negative %s" % self.number_name
elif upper_bound == 0:
sh = "a non-positive %s" % self.number_name
elif upper_bound is not None:
sh = "%s <= %s" % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = "%s >= %s" % (self.number_name, lower_bound)
self.syntactic_help = sh
def Convert(self, argument):
__pychecker__ = 'no-returnvalues'
if type(argument) == str:
base = 10
if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
base = 16
return int(argument, base)
else:
return int(argument)
def Type(self):
return 'int'
def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value must be an integer.
If lower_bound, or upper_bound are set, then this flag must be
within the given range.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE(parser, name, default, help, flag_values, serializer, **args)
_RegisterBoundsValidatorIfNeeded(parser, name, flag_values=flag_values)
#
# ENUM FLAGS
#
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None):
super(EnumParser, self).__init__()
self.enum_values = enum_values
def Parse(self, argument):
if self.enum_values and argument not in self.enum_values:
raise ValueError("value should be one of <%s>" %
"|".join(self.enum_values))
return argument
def Type(self):
return 'string enum'
class EnumFlag(Flag):
"""Basic enum flag; its value can be any string from list of enum_values."""
def __init__(self, name, default, help, enum_values=None,
short_name=None, **args):
enum_values = enum_values or []
p = EnumParser(enum_values)
g = ArgumentSerializer()
Flag.__init__(self, p, g, name, default, help, short_name, **args)
if not self.help: self.help = "an enum string"
self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
def _WriteCustomInfoInXMLFormat(self, outfile, indent):
for enum_value in self.parser.enum_values:
_WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
**args):
"""Registers a flag whose value can be any string from enum_values."""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values)
#
# LIST FLAGS
#
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = "a %s separated list" % self._name
def Parse(self, argument):
if isinstance(argument, list):
return argument
elif argument == '':
return []
else:
return [s.strip() for s in argument.split(self._token)]
def Type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
_WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, None, 'whitespace')
def WriteCustomInfoInXMLFormat(self, outfile, indent):
BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
separators = list(string.whitespace)
separators.sort()
for ws_char in string.whitespace:
_WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings."""
parser = ListParser()
serializer = ListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a whitespace-separated list of strings.
Any whitespace can be used as a separator.
"""
parser = WhitespaceSeparatedListParser()
serializer = ListSerializer(' ')
DEFINE(parser, name, default, help, flag_values, serializer, **args)
#
# MULTI FLAGS
#
class MultiFlag(Flag):
"""A flag that can appear multiple time on the command-line.
The value of such a flag is a list that contains the individual values
from all the appearances of that flag on the command-line.
See the __doc__ for Flag for most behavior of this class. Only
differences in behavior are described here:
* The default value may be either a single value or a list of values.
A single value is interpreted as the [value] singleton list.
* The value of the flag is always a list, even if the option was
only supplied once, and even if the default value is a single
value
"""
def __init__(self, *args, **kwargs):
Flag.__init__(self, *args, **kwargs)
self.help += ';\n repeat this option to specify a list of values'
def Parse(self, arguments):
"""Parses one or more arguments with the installed parser.
Args:
arguments: a single argument or a list of arguments (typically a
list of default values); a single argument is converted
internally into a list containing one item.
"""
if not isinstance(arguments, list):
# Default value may be a list of values. Most other arguments
# will not be, so convert them into a single-item list to make
# processing simpler below.
arguments = [arguments]
if self.present:
# keep a backup reference to list of previously supplied option values
values = self.value
else:
# "erase" the defaults with an empty list
values = []
for item in arguments:
# have Flag superclass parse argument, overwriting self.value reference
Flag.Parse(self, item) # also increments self.present
values.append(self.value)
# put list of option values back in the 'value' attribute
self.value = values
def Serialize(self):
if not self.serializer:
raise FlagsError("Serializer not present for flag %s" % self.name)
if self.value is None:
return ''
s = ''
multi_value = self.value
for self.value in multi_value:
if s: s += ' '
s += Flag.Serialize(self)
self.value = multi_value
return s
def Type(self):
return 'multi ' + self.parser.Type()
def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
**args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values)
def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of any strings.
Use the flag on the command line multiple times to place multiple
string values into the list. The 'default' may be a single string
(which will be converted into a single-element list) or a list of
strings.
"""
parser = ArgumentParser()
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary integers.
Use the flag on the command line multiple times to place multiple
integer values into the list. The 'default' may be a single integer
(which will be converted into a single-element list) or a list of
integers.
"""
parser = IntegerParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
def DEFINE_multi_float(name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
# Now register the flags that we want to exist in all applications.
# These are all defined with allow_override=1, so user-apps can use
# these flagnames for their own purposes, if they want.
DEFINE_flag(HelpFlag())
DEFINE_flag(HelpshortFlag())
DEFINE_flag(HelpXMLFlag())
# Define special flags here so that help may be generated for them.
# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
_SPECIAL_FLAGS = FlagValues()
DEFINE_string(
'flagfile', "",
"Insert flag definitions from the given file into the command line.",
_SPECIAL_FLAGS)
DEFINE_string(
'undefok', "",
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name. IMPORTANT: flags in this list that have "
"arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
| gpl-3.0 | 3,194,141,635,143,222,300 | 35.417191 | 80 | 0.673959 | false |
gkioxari/RstarCNN | lib/random_data_layer/layer.py | 1 | 6391 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
"""The data layer used during training to train a R*CNN network.
RandomDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from random_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class RandomDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {
'data': 0,
'rois': 1,
'secondary_rois': 2,
'labels': 3}
# data blob: holds a batch of N images, each with 3 channels
# The height and width (100 x 100) are dummy values
top[0].reshape(1, 3, 100, 100)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[1].reshape(1, 5)
# secondary rois blob: holds R*C regions of interest, where C is
# the number of secondary regions
# (n, x1, y1, x2, y2) specifying an image batch
top[2].reshape(1, 5)
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[3].reshape(1)
if cfg.TRAIN.BBOX_REG:
self._name_to_top_map['bbox_targets'] = 4
self._name_to_top_map['bbox_loss_weights'] = 5
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
top[4].reshape(1, self._num_classes * 4)
# bbox_loss_weights blob: At most 4 targets per roi are active;
# thisbinary vector sepcifies the subset of active targets
top[5].reshape(1, self._num_classes * 4)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
| bsd-2-clause | 8,975,019,176,434,802,000 | 36.374269 | 75 | 0.570803 | false |
puruckertom/ubertool | ubertool/sam_old/sam_multiprocessing.py | 1 | 8682 | __author__ = 'jflaisha'
from concurrent.futures import ProcessPoolExecutor as Pool
from functools import partial
import logging
import multiprocessing
#import numpy as np
import os
import sys
import sam_callable
try:
import superprzm # Import superprzm.dll / .so
_dll_loaded = True
except ImportError as e:
logging.exception(e)
_dll_loaded = False
curr_path = os.path.abspath(os.path.dirname(__file__))
mp_logger = multiprocessing.log_to_stderr()
def multiprocessing_setup():
"""
Create the ProcessPoolExecutor object with the max number of concurrent workers equal to the number of cores of the
machine running this script.
:return: ProcessPoolExecutor object reference
"""
nproc = multiprocessing.cpu_count() # Get number of processors available on machine
if nproc > 16: # Force 'nproc' to be 16
nproc = 16
try:
host_name = os.uname()[1]
if host_name == 'ord-uber-vm005': # Force Server 5 to use 16 processes to avoid the memdump error when using a process pool with less max_workers than total number of processes
nproc = 16
except Exception as e:
return Pool(max_workers=nproc) # Set number of workers to equal the number of processors available on machine
class SamModelCaller(object):
"""
Class for calling Fortran version of SAM.
"""
def __init__(self, jid, name_temp, no_of_processes=16):
"""
Constructor for SamModelCaller class.
:param name_temp: string
:param number_of_rows_list: list
:param no_of_processes: int
"""
self.sam_bin_path = os.path.join(curr_path, 'bin')
self.jid = jid
self.name_temp = name_temp
self.no_of_processes = no_of_processes
def sam_multiprocessing(self):
"""
Submits jobs (SAM runs) to the worker pool.
"""
try:
import subprocess32 as subprocess # Use subprocess32 for Linux (Python 3.2 backport)
except ImportError:
import subprocess
try: # Ensure that the ProcessPoolExecutor object has been instantiated
if pool is None:
pass # 'pool' is already defined by multiprocessing_setup()
except NameError:
pool = multiprocessing_setup()
# Split master HUC CSV into sections and return a list containing the number of rows in each section (sequentially)
try:
self.number_of_rows_list = self.split_csv()
except Exception as e:
self.number_of_rows_list = [306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 306, 320]
for x in range(self.no_of_processes): # Loop over all the 'no_of_processes' to fill the process
pool.submit(
daily_conc_callable,
self.jid, # 'jid' of SAM run
self.sam_bin_path, # Absolute path to the SAM bin folder
self.name_temp, # Temporary path name for this SuperPRZM run
self.two_digit(x), # Section number, as two digits, of this set of HUCs for the SuperPRZM run
self.number_of_rows_list[x] # Number of 'rows'/HUC12s for this section of HUCs for the SuperPRZM run
).add_done_callback(
partial(callback_daily, self.two_digit(x))
)
# Destroy the Pool object which hosts the processes when the pending Futures objects are finished,
# but do not wait until all Futures are done to have this function return
# pool.shutdown(wait=False) # Non-blocking
pool.shutdown() # Blocking
def split_csv(self):
"""
Load master CSV for SuperPRZM run as Pandas DataFrame and slice it
based on the number of Futures objects created to execute it.
(Currently Fortran is set to accept only a 1 char digit; therefore,
the max number here is 9)
:param number: int (1 - 9)
:param curr_path: String; absolute path to this module
:return: list; list with length equal number of csv sections, where each index is number of rows in section
"""
import pandas as pd
df = pd.read_csv(os.path.join(
self.sam_bin_path, 'EcoRecipes_huc12', 'recipe_combos2012', 'huc12_outlets_metric.csv'),
dtype={'HUC_12': object, 'COMID': object} # Set columns 'HUC_12' & 'COMID' to 'object' (~eq. to string)
) # This preserves any leading zeros present in the HUC12_IDs
if self.no_of_processes > 99:
self.no_of_processes = 99
if self.no_of_processes < 1:
self.no_of_processes = 1
try:
rows_per_sect = df.shape[0] / self.no_of_processes
except Exception as e:
self.no_of_processes = 1
rows_per_sect = df.shape[0] / self.no_of_processes
os.makedirs(os.path.join(self.sam_bin_path, self.name_temp, 'EcoRecipes_huc12', 'recipe_combos2012'))
number_of_rows_list = []
i = 1
while i <= self.no_of_processes:
if i == 1:
# First slice
df_slice = df[:rows_per_sect]
elif i == self.no_of_processes:
# End slice: slice to the end of the DataFrame
df_slice = df[((i - 1) * rows_per_sect):]
else:
# Middle slices (not first or last)
df_slice = df[((i - 1) * rows_per_sect):i * rows_per_sect]
number_of_rows_list.append(len(df_slice)) # Save the number of rows for each CSV to be passed to SuperPRZM
df_slice.to_csv(os.path.join(
self.sam_bin_path, self.name_temp, 'EcoRecipes_huc12',
'recipe_combos2012', 'huc12_outlets_metric_' + self.two_digit(i - 1) + '.csv'
), index=False)
i += 1
return number_of_rows_list
def two_digit(self, x):
"""
Convert "1" to "01", etc., up to 9. Value of x has 1 added to it; therefore, a zero-based sequence is expected.
:param x: int
:return: String, two digit representation of x + 1 if x < 9
"""
if x < 9:
number_string = "0" + str(x + 1)
else:
number_string = str(x + 1)
return number_string
def daily_conc_callable(jid, sam_bin_path, name_temp, section, array_size=320):
"""
:param jid:
:param sam_bin_path:
:param name_temp:
:param section:
:param array_size:
:return:
"""
# TODO: Remove these; left to show how it was previously done while testing callable
# return subprocess.Popen(args).wait() # Identical to subprocess.call()
# return subprocess.Popen(args, stdout=subprocess.PIPE).communicate() #
try:
sam_callable.run(jid, sam_bin_path, name_temp, section, int(array_size))
except Exception as e:
mp_logger.exception(e)
def callback_daily(section, future):
"""
Daily callback.
:param section:
:param future:
:return:
"""
#
def create_number_of_rows_list(list_string):
"""
Create rows.
:param list_string:
:return:
"""
return list_string.split()
def main():
"""
When run from command line this script takes 1 mandatory arguments and 2 optional arguments.
Mandatory arg: name_temp, string. Random 6 character string for run to generate temporary run directory.
Optional args: number_of_rows_list, list. If using a dataset that has already been processed by the split_csv()
method, this is a list with a length equal to the number of csv sections created (which is equal
to the number of workers). Each item in the list is the number of rows in that csv section,
sequentially, where index 0 is the 1st csv section.
no_of_processes, int. Total number of processes that will be used to complete the run. This is also
equal to the number of sections the csv will be dividing into and the length of the
number_of_rows_list optional argument.
:return:
"""
# Get command line arguments
jid = sys.argv[1]
name_temp = sys.argv[2]
if len(sys.argv) == 4: # 'no_of_processes' is an optional command line argument that defaults to 16 if not given
no_of_processes = int(sys.argv[3])
sam = SamModelCaller(jid, name_temp, no_of_processes)
else:
sam = SamModelCaller(jid, name_temp)
sam.sam_multiprocessing()
if __name__ == "__main__":
# Create Process Pool
pool = multiprocessing_setup()
main()
sys.exit()
| unlicense | -770,549,884,738,831,200 | 33.728 | 185 | 0.608731 | false |
chokribr/invenio | invenio/modules/oauthclient/contrib/orcid.py | 13 | 6448 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Pre-configured remote application for enabling sign in/up with ORCID.
**Usage:**
1. Edit your configuration and add:
.. code-block:: python
from invenio.modules.oauthclient.contrib import orcid
OAUTHCLIENT_REMOTE_APPS = dict(
orcid=orcid.REMOTE_APP,
)
ORCID_APP_CREDENTIALS = dict(
consumer_key="changeme",
consumer_secret="changeme",
)
Note, if you want to use the ORCID sandbox, use ``orcid.REMOTE_SANDBOX_APP``
instead of ``orcid.REMOTE_APP``.
2. Register a new application with ORCID. When registering the
application ensure that the *Redirect URI* points to:
``CFG_SITE_SECURE_URL/oauth/authorized/orcid/`` (note, ORCID does not
allow localhost to be used, thus testing on development machines is
somewhat complicated by this).
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration (``invenio.cfg``):
.. code-block:: python
ORCID_APP_CREDENTIALS = dict(
consumer_key="<CLIENT ID>",
consumer_secret="<CLIENT SECRET>",
)
4. Now go to ``CFG_SITE_SECURE_URL/oauth/login/orcid/`` (e.g.
http://localhost:4000/oauth/login/orcid/)
5. Also, you should see ORCID listed under Linked accounts:
http://localhost:4000//account/settings/linkedaccounts/
By default the ORCID module will try first look if a link already exists
between a ORCID account and a user. If no link is found, the user is asked
to provide an email address to sign-up.
In templates you can add a sign in/up link:
.. code-block:: jinja
<a href="{{url_for('oauthclient.login', remote_app='orcid')}}">
Sign in with ORCID
</a>
"""
import copy
from flask import current_app, redirect, url_for
from flask_login import current_user
from invenio.ext.sqlalchemy.utils import session_manager
REMOTE_APP = dict(
title='ORCID',
description='Connecting Research and Researchers.',
icon='',
authorized_handler="invenio.modules.oauthclient.handlers"
":authorized_signup_handler",
disconnect_handler="invenio.modules.oauthclient.contrib.orcid"
":disconnect_handler",
signup_handler=dict(
info="invenio.modules.oauthclient.contrib.orcid:account_info",
setup="invenio.modules.oauthclient.contrib.orcid:account_setup",
view="invenio.modules.oauthclient.handlers:signup_handler",
),
params=dict(
request_token_params={'scope': '/authenticate',
'show_login': 'true'},
base_url='https://pub.orcid.org/v1.2/',
request_token_url=None,
access_token_url="https://pub.orcid.org/oauth/token",
access_token_method='POST',
authorize_url="https://orcid.org/oauth/authorize",
app_key="ORCID_APP_CREDENTIALS",
content_type="application/json",
)
)
""" ORCID Remote Application. """
REMOTE_SANDBOX_APP = copy.deepcopy(REMOTE_APP)
"""ORCID Sandbox Remote Application."""
REMOTE_SANDBOX_APP['params'].update(dict(
base_url="https://api.sandbox.orcid.org/",
access_token_url="https://api.sandbox.orcid.org/oauth/token",
authorize_url="https://sandbox.orcid.org/oauth/authorize#show_login",
))
def account_info(remote, resp):
"""Retrieve remote account information used to find local user."""
account_info = dict(external_id=resp.get("orcid"), external_method="orcid")
return account_info
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account."""
from invenio.modules.oauthclient.utils import oauth_unlink_external_id
from invenio.modules.oauthclient.models import RemoteAccount
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
account = RemoteAccount.get(user_id=current_user.get_id(),
client_id=remote.consumer_key)
orcid = account.extra_data.get('orcid')
if orcid:
oauth_unlink_external_id(dict(id=orcid, method='orcid'))
if account:
account.delete()
return redirect(url_for('oauthclient_settings.index'))
@session_manager
def account_setup(remote, token, resp):
"""Perform additional setup after user have been logged in."""
from invenio.modules.oauthclient.utils import oauth_link_external_id
from invenio.ext.sqlalchemy import db
# Retrieve ORCID from response.
orcid = resp.get("orcid")
# Set ORCID in extra_data.
token.remote_account.extra_data = {"orcid": orcid}
user = token.remote_account.user
# Create user <-> external id link.
oauth_link_external_id(user, dict(id=orcid, method="orcid"))
# Fill user full name if not already set
if user and not any([user.given_names, user.family_name]):
# Query ORCID to get the real name
response = remote.get("{0}/orcid-bio".format(orcid),
headers={'Accept': 'application/orcid+json'},
content_type="application/json")
if response.status == 200:
try:
name = response.data["orcid-profile"]["orcid-bio"][
"personal-details"]
user.given_names = name["given-names"]["value"]
user.family_name = name["family-name"]["value"]
except KeyError:
current_app.logger.exception(
"Unexpected return format from ORCID: {0}".format(
repr(response.data)))
return
db.session.add(user)
# Refresh user cache
current_user.reload()
| gpl-2.0 | -6,483,330,020,447,636,000 | 33.666667 | 79 | 0.659429 | false |
bdoin/GCompris-site | activity/color/closure/closure/bin/build/source.py | 18 | 2723 | # Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
import re
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRES_REGEX = re.compile(_BASE_REGEX_STRING % 'require')
# This line identifies base.js and should match the line in that file.
_GOOG_BASE_LINE = (
'var goog = goog || {}; // Identifies this file as the Closure base.')
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self._source = source
self._ScanSource()
def __str__(self):
return 'Source %s' % self._path
def GetSource(self):
"""Get the source as a string."""
return self._source
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
# TODO: Strip source comments first, as these might be in a comment
# block. RegExes can be borrowed from other projects.
source = self.GetSource()
source_lines = source.splitlines()
for line in source_lines:
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
# Closure's base file implicitly provides 'goog'.
for line in source_lines:
if line == _GOOG_BASE_LINE:
if len(self.provides) or len(self.requires):
raise Exception(
'Base files should not provide or require namespaces.')
self.provides.add('goog')
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = open(path)
try:
return fileobj.read()
finally:
fileobj.close()
| gpl-3.0 | -7,297,649,007,079,266,000 | 26.23 | 75 | 0.668013 | false |
befelix/scipy | scipy/stats/__init__.py | 5 | 9236 | """
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewnorm -- Skew normal
t -- Student's T
trapz -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) gropu
random_correlation -- random correlation matrices
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
histogram2
histogram
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
mvsdist
sem
zmap
zscore
iqr
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
f_value
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
ss
square_of_sums
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
chisqprob
betai
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause | -2,284,062,568,832,896,000 | 24.584488 | 94 | 0.570918 | false |
kvarkson/django-pdf-to-image-converter | 1.4/DjangoPdfToImageConverter/pdfconverter/extra.py | 3 | 1635 | from django.db.models import FileField
from django.forms import forms
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
class ContentTypeRestrictedFileField(FileField):
"""
Same as FileField, but you can specify:
* content_types - list containing allowed content_types. Example: ['application/pdf', 'image/jpeg']
* max_upload_size - a number indicating the maximum file size allowed for upload.
2.5MB - 2621440
5MB - 5242880
10MB - 10485760
20MB - 20971520
50MB - 5242880
100MB 104857600
250MB - 214958080
500MB - 429916160
"""
def __init__(self, *args, **kwargs):
self.content_types = kwargs.pop("content_types")
self.max_upload_size = kwargs.pop("max_upload_size")
super(ContentTypeRestrictedFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(ContentTypeRestrictedFileField, self).clean(*args, **kwargs)
file = data.file
try:
content_type = file.content_type
if content_type in self.content_types:
if file._size > self.max_upload_size:
raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (filesizeformat(self.max_upload_size), filesizeformat(file._size)))
else:
raise forms.ValidationError(_('Filetype not supported.'))
except AttributeError:
pass
return data | mit | -4,983,840,867,940,917,000 | 39.9 | 173 | 0.611009 | false |
repotvsupertuga/repo | script.module.gdrive/default.py | 1 | 92648 | '''
CloudService XBMC Plugin
Copyright (C) 2013-2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# cloudservice - required python modules
import sys
import urllib
import re
import os
KODI = True
if re.search(re.compile('.py', re.IGNORECASE), sys.argv[0]) is not None:
KODI = False
if KODI:
# cloudservice - standard XBMC modules
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, xbmcvfs
else:
from resources.libgui import xbmcaddon
from resources.libgui import xbmcgui
# common routines
from resources.lib import kodi_common
# global variables
import addon_parameters
addon = addon_parameters.addon
cloudservice3 = addon_parameters.cloudservice3
cloudservice2 = addon_parameters.cloudservice2
cloudservice1 = addon_parameters.cloudservice1
#*** testing - gdrive
from resources.lib import tvWindow
from resources.lib import gSpreadsheets
from resources.lib import gSheets_api4
##**
# cloudservice - standard modules
#from resources.lib import gdrive
#from resources.lib import gdrive_api2
from resources.lib import cloudservice
from resources.lib import authorization
from resources.lib import folder
from resources.lib import teamdrive
from resources.lib import file
from resources.lib import offlinefile
from resources.lib import package
from resources.lib import mediaurl
from resources.lib import crashreport
from resources.lib import gPlayer
from resources.lib import settings
from resources.lib import cache
from resources.lib import TMDB
#global variables
PLUGIN_URL = sys.argv[0]
plugin_handle = int(sys.argv[1])
plugin_queries = settings.parse_query(sys.argv[2][1:])
addon_dir = xbmc.translatePath( addon.getAddonInfo('path') )
kodi_common.debugger()
# cloudservice - create settings module
settings = settings.settings(addon)
# retrieve settings
user_agent = settings.getSetting('user_agent')
#obsolete, replace, revents audio from streaming
#if user_agent == 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)':
# addon.setSetting('user_agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.38 Safari/532.0')
mode = settings.getParameter('mode','main')
# make mode case-insensitive
mode = mode.lower()
#*** old - gdrive
# allow for playback of public videos without authentication
if (mode == 'streamurl'):
authenticate = False
else:
authenticate = True
##**
instanceName = ''
try:
instanceName = (plugin_queries['instance']).lower()
except:
pass
# cloudservice - content type
contextType = settings.getParameter('content_type')
#support encfs?
encfs = settings.getParameter('encfs', False)
contentType = kodi_common.getContentType(contextType,encfs)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
# xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TRACKNUM)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_SIZE)
numberOfAccounts = kodi_common.numberOfAccounts(addon_parameters.PLUGIN_NAME)
invokedUsername = settings.getParameter('username')
# cloudservice - utilities
###
if mode == 'dummy' or mode == 'delete' or mode == 'enroll':
kodi_common.accountActions(addon, addon_parameters.PLUGIN_NAME, mode, instanceName, numberOfAccounts)
#create strm files
elif mode == 'buildstrm':
silent = settings.getParameter('silent', settings.getSetting('strm_silent',0))
if silent == '':
silent = 0
try:
path = settings.getSetting('strm_path')
except:
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30026), 'files','',False,False,'')
addon.setSetting('strm_path', path)
if path == '':
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30026), 'files','',False,False,'')
addon.setSetting('strm_path', path)
if path != '':
returnPrompt = xbmcgui.Dialog().yesno(addon.getLocalizedString(30000), addon.getLocalizedString(30027) + '\n'+path + '?')
if path != '' and returnPrompt:
if silent != 2:
try:
pDialog = xbmcgui.DialogProgressBG()
pDialog.create(addon.getLocalizedString(30000), 'Building STRMs...')
except:
pass
url = settings.getParameter('streamurl')
url = re.sub('---', '&', url)
title = settings.getParameter('title')
type = int(settings.getParameter('type', 0))
if url != '':
filename = path + '/' + title+'.strm'
strmFile = xbmcvfs.File(filename, "w")
strmFile.write(url+'\n')
strmFile.close()
else:
folderID = settings.getParameter('folder')
filename = settings.getParameter('filename')
title = settings.getParameter('title')
invokedUsername = settings.getParameter('username')
encfs = settings.getParameter('encfs', False)
encryptedPath = settings.getParameter('epath', '')
dencryptedPath = settings.getParameter('dpath', '')
if folderID != '':
count = 1
loop = True
while loop:
instanceName = addon_parameters.PLUGIN_NAME+str(count)
try:
username = settings.getSetting(instanceName+'_username')
if username == invokedUsername:
#let's log in
if ( settings.getSettingInt(instanceName+'_type',0)==0):
service = cloudservice1(PLUGIN_URL,addon,instanceName, user_agent, settings)
else:
service = cloudservice2(PLUGIN_URL,addon,instanceName, user_agent, settings)
loop = False
except:
service = cloudservice1(PLUGIN_URL,addon,instanceName, user_agent)
break
if count == numberOfAccounts:
try:
service
except NameError:
#fallback on first defined account
if ( settings.getSettingInt(instanceName+'_type',0)==0):
service = cloudservice1(PLUGIN_URL,addon,addon_parameters.PLUGIN_NAME+'1', user_agent, settings)
else:
service = cloudservice2(PLUGIN_URL,addon,addon_parameters.PLUGIN_NAME+'1', user_agent, settings)
break
count = count + 1
# encfs -- extract filename
if encfs:
extrapulatedFolderName = re.compile('([^/]+)/$')
titleDecrypted = extrapulatedFolderName.match(dencryptedPath)
if titleDecrypted is not None:
title = titleDecrypted.group(1)
if addon_parameters.spreadsheet and service.cloudResume == '2':
spreadsheetFile = xbmcvfs.File(path + '/spreadsheet.tab', "w")
service.buildSTRM(path + '/'+title,folderID, contentType=contentType, pDialog=pDialog, epath=encryptedPath, dpath=dencryptedPath, encfs=encfs, spreadsheetFile=spreadsheetFile)
spreadsheetFile.close()
else:
service.buildSTRM(path + '/'+title,folderID, contentType=contentType, pDialog=pDialog, epath=encryptedPath, dpath=dencryptedPath, encfs=encfs)
elif filename != '':
if encfs:
values = {'title': title, 'encfs': 'True', 'epath': encryptedPath, 'dpath': dencryptedPath, 'filename': filename, 'username': invokedUsername}
# encfs -- extract filename
extrapulatedFileName = re.compile('.*?/([^/]+)$')
titleDecrypted = extrapulatedFileName.match(dencryptedPath)
if titleDecrypted is not None:
title = titleDecrypted.group(1)
else:
values = {'title': title, 'filename': filename, 'username': invokedUsername}
if type == 1:
url = PLUGIN_URL+'?mode=audio&'+urllib.urlencode(values)
else:
url = PLUGIN_URL+'?mode=video&'+urllib.urlencode(values)
filename = path + '/' + title+'.strm'
strmFile = xbmcvfs.File(filename, "w")
strmFile.write(url+'\n')
strmFile.close()
else:
count = 1
while True:
instanceName = addon_parameters.PLUGIN_NAME+str(count)
username = settings.getSetting(instanceName+'_username')
if username != '' and username == invokedUsername:
if ( settings.getSettingInt(instanceName+'_type',0)==0):
service = cloudservice1(PLUGIN_URL,addon,instanceName, user_agent, settings)
else:
service = cloudservice2(PLUGIN_URL,addon,instanceName, user_agent, settings)
service.buildSTRM(path + '/'+username, contentType=contentType, pDialog=pDialog, epath=encryptedPath, dpath=dencryptedPath, encfs=encfs)
if count == numberOfAccounts:
#fallback on first defined account
try:
service
except NameError:
#fallback on first defined account
if ( settings.getSettingInt(instanceName+'_type',0)==0):
service = cloudservice1(PLUGIN_URL,addon,addon_parameters.PLUGIN_NAME+'1', user_agent, settings)
else:
service = cloudservice2(PLUGIN_URL,addon,addon_parameters.PLUGIN_NAME+'1', user_agent, settings)
break
count = count + 1
if silent != 2:
try:
pDialog.update(100)
pDialog.close()
except:
pass
if silent == 0:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30028))
xbmcplugin.endOfDirectory(plugin_handle)
###
###
#STRM playback without instance name; use default
if invokedUsername == '' and instanceName == '' and (mode == 'video' or mode == 'audio'):
instanceName = addon_parameters.PLUGIN_NAME + str(settings.getSetting('account_default', 1))
instanceName = kodi_common.getInstanceName(addon, addon_parameters.PLUGIN_NAME, mode, instanceName, invokedUsername, numberOfAccounts, contextType)
service = None
if instanceName is None and (mode == 'index' or mode == 'main' or mode == 'offline'):
service = None
elif instanceName is None:
service = cloudservice2(PLUGIN_URL,addon,'', user_agent, settings, authenticate=False)
elif settings.getSettingInt(instanceName+'_type',0)==0 :
service = cloudservice1(PLUGIN_URL,addon,instanceName, user_agent, settings)
else:
service = cloudservice2(PLUGIN_URL,addon,instanceName, user_agent, settings)
#create strm files
if mode == 'buildf2':
import time
currentDate = time.strftime("%Y%m%d")
try:
path = settings.getSetting('strm_path')
except:
pass
if path != '':
try:
pDialog = xbmcgui.DialogProgressBG()
pDialog.create(addon.getLocalizedString(30000), 'Building STRMs...')
except:
pass
#service = gdrive_api2.gdrive(PLUGIN_URL,addon,instanceName, user_agent, settings)
# try:
addon.setSetting(instanceName + '_changedate', currentDate)
service.buildSTRM2(path, contentType=contentType, pDialog=pDialog)
# except:
# pass
try:
pDialog.update(100)
pDialog.close()
except:
pass
xbmcplugin.endOfDirectory(plugin_handle)
# options menu
#if mode == 'main':
# addMenu(PLUGIN_URL+'?mode=options','<< '+addon.getLocalizedString(30043)+' >>')
if mode == 'offline':
title = settings.getParameter('title')
folderID = settings.getParameter('folder')
folderName = settings.getParameter('foldername')
mediaItems = kodi_common.getOfflineFileList(settings.getSetting('cache_folder'))
if mediaItems:
for offlinefile in mediaItems:
kodi_common.addOfflineMediaFile(offlinefile)
elif service is None:
xbmcplugin.endOfDirectory(plugin_handle)
#cloud_db actions
elif mode == 'cloud_db':
title = settings.getParameter('title')
folderID = settings.getParameter('folder')
folderName = settings.getParameter('foldername')
filename = settings.getParameter('filename')
action = settings.getParameter('action')
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(folderID,folderName)
package=package.package(mediaFile,mediaFolder)
# TESTING
if addon_parameters.spreadsheet and service.cloudResume == '2':
if service.worksheetID == '':
try:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
spreadsheets = service.gSpreadsheet.getSpreadsheetList()
except:
pass
for title in spreadsheets.iterkeys():
if title == 'CLOUD_DB':
worksheets = service.gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[title])
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
service.worksheetID = worksheets[worksheet]
addon.setSetting(instanceName + '_spreadsheet', service.worksheetID)
break
break
# TESTING
if addon_parameters.spreadsheet and service.cloudResume == '2':
if service.gSpreadsheet is None:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
if action == 'watch':
service.gSpreadsheet.setMediaStatus(service.worksheetID,package, watched=1)
xbmc.executebuiltin("XBMC.Container.Refresh")
elif action == 'queue':
package.folder.id = 'QUEUED'
service.gSpreadsheet.setMediaStatus(service.worksheetID,package)
elif action == 'recentwatched' or action == 'recentstarted' or action == 'library' or action == 'queued':
mediaItems = service.gSpreadsheet.updateMediaPackage(service.worksheetID, criteria=action)
#ensure that folder view playback
if contextType == '':
contextType = 'video'
if mediaItems:
for item in mediaItems:
if item.file is None:
service.addDirectory(item.folder, contextType=contextType)
else:
service.addMediaFile(item, contextType=contextType)
service.updateAuthorization(addon)
#cloud_db actions
elif mode == 'cloud_dbtest':
title = settings.getParameter('title')
folderID = settings.getParameter('folder')
folderName = settings.getParameter('foldername')
filename = settings.getParameter('filename')
action = settings.getParameter('action')
# s = gSheets_api4.gSheets_api4(service,addon, user_agent)
# s.createSpreadsheet()
# s.addRows()
if action == 'library_menu':
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_genre&content_type='+str(contextType),'Genre')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_year&content_type='+str(contextType),'Year')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_title&content_type='+str(contextType),'Title')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_country&content_type='+str(contextType),'Countries')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_director&content_type='+str(contextType),'Directors')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_studio&content_type='+str(contextType),'Studio')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_resolution&content_type='+str(contextType),'Quality (Resolution)')
else:
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(folderID,folderName)
package=package.package(mediaFile,mediaFolder)
spreadsheet = None
# TESTING
if addon_parameters.spreadsheet:
try:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
spreadsheets = service.gSpreadsheet.getSpreadsheetList()
except:
pass
for t in spreadsheets.iterkeys():
if t == 'Movie2':
worksheets = service.gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[t])
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
spreadsheet = worksheets[worksheet]
break
break
# TESTING
if addon_parameters.spreadsheet:
if service.gSpreadsheet is None:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
if action == 'watch':
service.gSpreadsheet.setMediaStatus(service.worksheetID,package, watched=1)
xbmc.executebuiltin("XBMC.Container.Refresh")
elif action == 'queue':
package.folder.id = 'QUEUED'
service.gSpreadsheet.setMediaStatus(service.worksheetID,package)
elif action == 'genre' or action == 'year' or action == 'title' or action == 'country' or action == 'director' or action == 'studio' or action == 'recentstarted' or 'library' in action or action == 'queued':
if action == 'genre':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, genre=title)
elif action == 'year':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, year=title)
elif action == 'title':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, title=title)
elif action == 'resolution':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, resolution=title)
elif action == 'country':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, country=title)
elif action == 'director':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, director=title)
elif action == 'studio':
mediaItems = service.gSpreadsheet.getMovies(spreadsheet, studio=title)
elif action == 'library_title':
mediaItems = service.gSpreadsheet.getTitle(spreadsheet)
elif action == 'library_genre':
mediaItems = service.gSpreadsheet.getGenre(spreadsheet)
elif action == 'library_year':
mediaItems = service.gSpreadsheet.getYear(spreadsheet)
elif action == 'library_country':
mediaItems = service.gSpreadsheet.getCountries(spreadsheet)
elif action == 'library_director':
mediaItems = service.gSpreadsheet.getDirector(spreadsheet)
elif action == 'library_studio':
mediaItems = service.gSpreadsheet.getStudio(spreadsheet)
elif action == 'library_resolution':
mediaItems = service.gSpreadsheet.getResolution(spreadsheet)
#ensure that folder view playback
if contextType == '':
contextType = 'video'
tmdb= TMDB.TMDB(service,addon, user_agent)
if mediaItems:
for item in mediaItems:
if item.file is None:
service.addDirectory(item.folder, contextType=contextType)
else:
# movieID = tmdb.movieSearch(item.file.title,item.file.year)
# tmdb.movieDetails(movieID)
service.addMediaFile(item, contextType=contextType)
service.updateAuthorization(addon)
#dump a list of videos available to play
elif mode == 'main' or mode == 'index':
folderID = settings.getParameter('folder', False)
folderName = settings.getParameter('foldername', False)
#ensure that folder view playback
if contextType == '':
contextType = 'video'
# display option for all Videos/Music/Photos, across gdrive
#** gdrive specific
if mode == 'main':
if ('gdrive' in addon_parameters.PLUGIN_NAME):
if contentType in (2,4,7):
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=ALL&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30030)+']')
elif contentType == 1:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=VIDEOMUSIC&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30031)+']')
elif contentType == 0:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=VIDEO&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30025)+']')
elif contentType == 3:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=MUSIC&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30094)+']')
elif contentType == 5:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=PHOTO&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30034)+']')
elif contentType == 6:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=PHOTOMUSIC&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+' '+addon.getLocalizedString(30032)+']')
folderID = 'root'
if ('gdrive' in addon_parameters.PLUGIN_NAME):
# if (service.protocol != 2):
# kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=STARRED-FILES&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+ ' '+addon.getLocalizedString(30095)+']')
# kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=STARRED-FOLDERS&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+ ' '+addon.getLocalizedString(30096)+']')
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=SHARED&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+ ' '+addon.getLocalizedString(30098)+']')
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder=STARRED-FILESFOLDERS&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30018)+ ' '+addon.getLocalizedString(30097)+']')
teamdrives = service.getTeamDrives();
for drive in teamdrives:
kodi_common.addMenu(PLUGIN_URL+'?mode=index&folder='+str(drive.id)+'&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30200) + ' - ' + str(drive.title)+']')
kodi_common.addMenu(PLUGIN_URL+'?mode=search&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30111)+']')
kodi_common.addMenu(PLUGIN_URL+'?mode=buildstrm2&instance='+str(service.instanceName)+'&content_type='+str(contextType),'<Testing - manual run of change tracking build STRM>')
if addon_parameters.testing_features:
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_dbtest&instance='+str(service.instanceName)+'&action=library_menu&content_type='+str(contextType),'[MOVIES]')
#CLOUD_DB
if 'gdrive' in addon_parameters.PLUGIN_NAME and service.gSpreadsheet is not None:
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_db&action=recentstarted&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30177)+' recently started]')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_db&action=recentwatched&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30177)+' recently watched]')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_db&action=library&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30177)+' library]')
kodi_common.addMenu(PLUGIN_URL+'?mode=cloud_db&action=queued&instance='+str(service.instanceName)+'&content_type='+contextType,'['+addon.getLocalizedString(30177)+' queued]')
##**
# cloudservice - validate service
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052))
xbmc.log(addon.getLocalizedString(30050)+ addon_parameters.PLUGIN_NAME+'-login', xbmc.LOGERROR)
xbmcplugin.endOfDirectory(plugin_handle)
#if encrypted, get everything(as encrypted files will be of type application/ostream)
if encfs:
#temporarly force crypto with encfs
settings.setCryptoParameters()
if settings.cryptoPassword != "":
mediaItems = service.getMediaList(folderID,contentType=8)
if mediaItems:
from resources.lib import encryption
encrypt = encryption.encryption(settings.cryptoSalt,settings.cryptoPassword)
if contentType == 9:
mediaList = ['.mp4', '.flv', '.mov', '.webm', '.avi', '.ogg', '.mkv']
elif contentType == 10:
mediaList = ['.mp3', '.flac']
else:# contentType == 11:
mediaList = ['.jpg', '.png']
media_re = re.compile("|".join(mediaList), re.I)
#create the files and folders for decrypting file/folder names
for item in mediaItems:
if item.file is None:
try:
item.folder.displaytitle = encrypt.decryptString(str(item.folder.title))
service.addDirectory(item.folder, contextType=contextType, encfs=True )
except: pass
else:
try:
item.file.displaytitle = encrypt.decryptString(str(item.file.title))
item.file.title = item.file.displaytitle
if contentType < 9 or media_re.search(str(item.file.title)):
service.addMediaFile(item, contextType=contextType, encfs=True)
except:
pass
else:
settings.setEncfsParameters()
encryptedPath = settings.getParameter('epath', '')
dencryptedPath = settings.getParameter('dpath', '')
encfs_source = settings.encfsSource
encfs_target = settings.encfsTarget
encfs_inode = settings.encfsInode
mediaItems = service.getMediaList(folderID,contentType=8)
if mediaItems:
dirListINodes = {}
fileListINodes = {}
#create the files and folders for decrypting file/folder names
for item in mediaItems:
if item.file is None:
xbmcvfs.mkdir(encfs_source + str(encryptedPath))
xbmcvfs.mkdir(encfs_source + str(encryptedPath) + str(item.folder.title) + '/' )
if encfs_inode == 0:
dirListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(item.folder.title)).st_ino()))] = item.folder
else:
dirListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(item.folder.title)).st_ctime()))] = item.folder
#service.addDirectory(item.folder, contextType=contextType, encfs=True)
else:
xbmcvfs.mkdir(encfs_source + str(encryptedPath))
xbmcvfs.mkdir(encfs_source + str(encryptedPath) + str(item.file.title))
if encfs_inode == 0:
fileListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath)+ str(item.file.title)).st_ino()))] = item
else:
fileListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(item.file.title)).st_ctime()))] = item
#service.addMediaFile(item, contextType=contextType)
if encfs_inode > 0:
xbmc.sleep(1000)
if contentType == 9:
mediaList = ['.mp4', '.flv', '.mov', '.webm', '.avi', '.ogg', '.mkv']
elif contentType == 10:
mediaList = ['.mp3', '.flac']
else:# contentType == 11:
mediaList = ['.jpg', '.png']
media_re = re.compile("|".join(mediaList), re.I)
#examine the decrypted file/folder names for files for playback and dirs for navigation
dirs, files = xbmcvfs.listdir(encfs_target + str(dencryptedPath) )
for dir in dirs:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPath) + dir).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPath) + dir).st_ctime())
#we found a directory
if index in dirListINodes.keys():
xbmcvfs.rmdir(encfs_target + str(dencryptedPath) + dir)
# dirTitle = dir + ' [' +dirListINodes[index].title+ ']'
encryptedDir = dirListINodes[index].title
dirListINodes[index].displaytitle = dir + ' [' +dirListINodes[index].title+ ']'
service.addDirectory(dirListINodes[index], contextType=contextType, encfs=True, dpath=str(dencryptedPath) + str(dir) + '/', epath=str(encryptedPath) + str(encryptedDir) + '/' )
#we found a file
elif index in fileListINodes.keys():
xbmcvfs.rmdir(encfs_target + str(dencryptedPath) + dir)
fileListINodes[index].file.decryptedTitle = dir
if contentType < 9 or media_re.search(str(dir)):
service.addMediaFile(fileListINodes[index], contextType=contextType, encfs=True, dpath=str(dencryptedPath) + str(dir), epath=str(encryptedPath) )
# file is already downloaded
for file in files:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPath) + file).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPath) + file).st_ctime())
if index in fileListINodes.keys():
fileListINodes[index].file.decryptedTitle = file
if contentType < 9 or media_re.search(str(file)):
service.addMediaFile(fileListINodes[index], contextType=contextType, encfs=True, dpath=str(dencryptedPath) + str(file), epath=str(encryptedPath) )
#xbmc.executebuiltin("XBMC.Container.Refresh")
else:
path = settings.getParameter('epath', '')
# real folder
if folderID != '':
mediaItems = service.getMediaList(folderID,contentType=contentType)
if addon_parameters.spreadsheet and service.cloudResume == '2':
if service.gSpreadsheet is None:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
if service.worksheetID != '':
service.gSpreadsheet.updateMediaPackageList(service.worksheetID, folderID, mediaItems)
if mediaItems:
for item in sorted(mediaItems):
if item.file is None:
service.addDirectory(item.folder, contextType=contextType, epath=str(path)+ '/' + str(item.folder.title) + '/')
else:
service.addMediaFile(item, contextType=contextType)
# virtual folder; exists in spreadsheet only
# not in use
#elif folderName != '':
service.updateAuthorization(addon)
# NOT IN USE
#** testing - gdrive
elif mode == 'kiosk':
spreadshetModule = settings.getSetting('library', False)
if spreadshetModule:
gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
service.gSpreadsheet = gSpreadsheet
spreadsheets = service.getSpreadsheetList()
channels = []
for title in spreadsheets.iterkeys():
if title == 'TVShows':
worksheets = gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[title])
if 0:
import time
hour = time.strftime("%H")
minute = time.strftime("%M")
weekDay = time.strftime("%w")
month = time.strftime("%m")
day = time.strftime("%d")
for worksheet in worksheets.iterkeys():
if worksheet == 'schedule':
channels = gSpreadsheet.getChannels(worksheets[worksheet])
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30112), channels)
shows = gSpreadsheet.getShows(worksheets[worksheet] ,channels[ret])
showList = []
for show in shows:
showList.append(shows[show][6])
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30112), showList)
for worksheet in worksheets.iterkeys():
if worksheet == 'data':
episodes = gSpreadsheet.getVideo(worksheets[worksheet] ,showList[ret])
#player = gPlayer.gPlayer()
#player.setService(service)
player.setContent(episodes)
player.setWorksheet(worksheets['data'])
player.next()
while not player.isExit:
xbmc.sleep(5000)
else:
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
episodes = gSpreadsheet.getMedia(worksheets[worksheet], service.getRootID())
#player = gPlayer.gPlayer()
#player.setService(service)
# player.setContent(episodes)
player.setWorksheet(worksheets['db'])
player.PlayStream('plugin://plugin.video.'+addon_parameters.PLUGIN_NAME+'-testing/?mode=video&instance='+str(service.instanceName)+'&title='+episodes[0][3], None,episodes[0][7],episodes[0][2])
#player.next()
while not player.isExit:
player.saveTime()
xbmc.sleep(5000)
##** not in use
elif mode == 'photo':
title = settings.getParameter('title',0)
title = re.sub('/', '_', title) #remap / from titles (google photos)
docid = settings.getParameter('filename')
folder = settings.getParameter('folder',0)
encfs = settings.getParameter('encfs', False)
if encfs:
settings.setEncfsParameters()
encryptedPath = settings.getParameter('epath', '')
dencryptedPath = settings.getParameter('dpath', '')
encfs_source = settings.encfsSource
encfs_target = settings.encfsTarget
encfs_inode = settings.encfsInode
# don't redownload if present already
if (not xbmcvfs.exists(str(encfs_source) + str(encryptedPath) +str(title))):
url = service.getDownloadURL(docid)
service.downloadGeneralFile(url, str(encfs_source) + str(encryptedPath) +str(title))
xbmc.executebuiltin("XBMC.ShowPicture(\""+str(encfs_target) + str(dencryptedPath)+"\")")
#item = xbmcgui.ListItem(path=str(encfs_target) + str(dencryptedPath))
#xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
path = settings.getSetting('photo_folder')
#workaround for this issue: https://github.com/xbmc/xbmc/pull/8531
if not xbmcvfs.exists(path) and not os.path.exists(path):
path = ''
while path == '':
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30038), 'files','',False,False,'')
#workaround for this issue: https://github.com/xbmc/xbmc/pull/8531
if not xbmcvfs.exists(path) and not os.path.exists(path):
path = ''
else:
addon.setSetting('photo_folder', path)
if (not xbmcvfs.exists(str(path) + '/'+str(folder) + '/')):
xbmcvfs.mkdir(str(path) + '/'+str(folder))
# try:
# xbmcvfs.rmdir(str(path) + '/'+str(folder)+'/'+str(title))
# except:
# pass
# don't redownload if present already
if (not xbmcvfs.exists(str(path) + '/'+str(folder)+'/'+str(title))):
url = service.getDownloadURL(docid)
service.downloadPicture(url, str(path) + '/'+str(folder) + '/'+str(title))
#xbmc.executebuiltin("XBMC.ShowPicture("+str(path) + '/'+str(folder) + '/'+str(title)+")")
#item = xbmcgui.ListItem(path=str(path) + '/'+str(folder) + '/'+str(title))
url = service.getDownloadURL(docid)
item = xbmcgui.ListItem(path=url + '|' + service.getHeadersEncoded())
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
elif mode == 'downloadfolder':
title = settings.getParameter('title')
folderID = settings.getParameter('folder')
folderName = settings.getParameter('foldername')
encfs = settings.getParameter('encfs', False)
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052))
xbmc.log(addon.getLocalizedString(30050)+ addon_parameters.PLUGIN_NAME + '-login',xbmc.LOGERROR)
xbmcplugin.endOfDirectory(plugin_handle)
if encfs:
settings.setEncfsParameters()
encryptedPath = settings.getParameter('epath', '')
dencryptedPath = settings.getParameter('dpath', '')
encfs_source = settings.encfsSource
encfs_target = settings.encfsTarget
encfs_inode = settings.encfsInode
else:
path = settings.getParameter('epath', '/')
if encfs:
mediaItems = service.getMediaList(folderName=folderID, contentType=8)
path = str(encfs_source) + str(encryptedPath)
else:
mediaItems = service.getMediaList(folderName=folderID, contentType=contentType)
path = str(settings.getSetting('photo_folder')) + str(path)
if mediaItems:
progress = xbmcgui.DialogProgressBG()
progressBar = len(mediaItems)
progress.create(addon.getLocalizedString(30092), '')
count=0
if not xbmcvfs.exists(path) and not os.path.exists(path):
xbmcvfs.mkdirs(path)
for item in mediaItems:
count = count + 1
if item.file is not None:
progress.update((int)(float(count)/len(mediaItems)*100),addon.getLocalizedString(30092), str(item.file.title))
service.downloadGeneralFile(item.getMediaURL(),str(path) + str(item.file.title) )
# elif item.folder is not None:
# # create path if doesn't exist
# if (not xbmcvfs.exists(str(path) + '/'+str(folder) + '/')):
# xbmcvfs.mkdir(str(path) + '/'+str(folder))
progress.close()
elif mode == 'slideshow':
folder = settings.getParameter('folder',0)
title = settings.getParameter('title',0)
encfs = settings.getParameter('encfs', False)
if encfs:
settings.setEncfsParameters()
encfs_source = settings.encfsSource
encfs_target = settings.encfsTarget
encfs_inode = settings.encfsInode
if (not xbmcvfs.exists(str(encfs_target) + '/'+str(folder) + '/')):
xbmcvfs.mkdir(str(encfs_target) + '/'+str(folder))
folderINode = ''
if encfs_inode == 0:
folderINode = str(xbmcvfs.Stat(encfs_target + '/' + str(folder)).st_ino())
else:
folderINode = str(xbmcvfs.Stat(encfs_target + '/' + str(folder)).st_ctime())
mediaItems = service.getMediaList(folderName=folder, contentType=8)
if mediaItems:
dirs, filesx = xbmcvfs.listdir(encfs_source)
for dir in dirs:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_source + '/' + dir).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_source + '/' + dir).st_ctime())
if index == folderINode:
progress = xbmcgui.DialogProgressBG()
progress.create(addon.getLocalizedString(30035), 'Preparing list...')
count=0
for item in mediaItems:
if item.file is not None:
count = count + 1;
progress.update((int)(float(count)/len(mediaItems)*100),addon.getLocalizedString(30035), item.file.title)
if (not xbmcvfs.exists(str(encfs_source) + '/'+str(dir)+'/'+str(item.file.title))):
service.downloadGeneralFile(item.mediaurl.url,str(encfs_source) + '/'+str(dir)+ '/'+str(item.file.title))
if encfs_inode > 0:
xbmc.sleep(100)
progress.close()
xbmc.executebuiltin("XBMC.SlideShow(\""+str(encfs_target) + '/'+str(folder)+"/\")")
elif 0:
path = settings.getSetting('photo_folder')
#workaround for this issue: https://github.com/xbmc/xbmc/pull/8531
if not xbmcvfs.exists(path) and not os.path.exists(path):
path = ''
while path == '':
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30038), 'files','',False,False,'')
#workaround for this issue: https://github.com/xbmc/xbmc/pull/8531
if not xbmcvfs.exists(path) and not os.path.exists(path):
path = ''
else:
addon.setSetting('photo_folder', path)
# create path if doesn't exist
if (not xbmcvfs.exists(str(path) + '/'+str(folder) + '/')):
xbmcvfs.mkdir(str(path) + '/'+str(folder))
mediaItems = service.getMediaList(folderName=folder, contentType=5)
if mediaItems:
progress = xbmcgui.DialogProgressBG()
progress.create(addon.getLocalizedString(30035), 'Preparing list...')
count=0
for item in mediaItems:
if item.file is not None:
count = count + 1;
progress.update((int)(float(count)/len(mediaItems)*100),addon.getLocalizedString(30035), item.file.title)
service.downloadGeneralFile(item.mediaurl.url,str(path) + '/'+str(folder)+ '/'+item.file.title)
#xbmc.executebuiltin("XBMC.SlideShow("+str(path) + '/'+str(folder)+"/)")
progress.close()
xbmc.executebuiltin("XBMC.SlideShow(\""+str(path) + '/'+str(folder)+"/\")")
#else:
# xbmc.executebuiltin("XBMC.SlideShow("+str(path) + '/'+str(folder)+"/)")
###
# for video files
# force stream - play a video given its url
###
elif mode == 'streamurl':
url = settings.getParameter('url',0)
title = settings.getParameter('title')
promptQuality = settings.getSetting('prompt_quality', True)
mediaURLs = service.getPublicStream(url)
options = []
if mediaURLs:
mediaURLs = sorted(mediaURLs)
for mediaURL in mediaURLs:
options.append(mediaURL.qualityDesc)
if promptQuality:
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30033), options)
else:
ret = 0
playbackURL = mediaURLs[ret].url
if (playbackURL == ''):
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30020),addon.getLocalizedString(30021))
xbmc.log(addon.getAddonInfo('name') + ': ' + addon.getLocalizedString(20021), xbmc.LOGERROR)
else:
# if invoked in .strm or as a direct-video (don't prompt for quality)
item = xbmcgui.ListItem(path=playbackURL+ '|' + service.getHeadersEncoded())
item.setInfo( type="Video", infoLabels={ "Title": mediaURLs[ret].title , "Plot" : mediaURLs[ret].title } )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30020),addon.getLocalizedString(30021))
xbmc.log(addon.getAddonInfo('name') + ': ' + addon.getLocalizedString(20021), xbmc.LOGERROR)
###
# for video files - playback of video
# force stream - play a video given its url
###
#
# legacy (depreicated) - memorycachevideo [given title]
# legacy (depreicated) - play [given title]
# legacy (depreicated) - playvideo [given title]
# legacy (depreicated) - streamvideo [given title]
elif mode == 'audio' or mode == 'video' or mode == 'search' or mode == 'play' or mode == 'memorycachevideo' or mode == 'playvideo' or mode == 'streamvideo':
title = settings.getParameter('title') #file title
filename = settings.getParameter('filename') #file ID
folderID = settings.getParameter('folder') #folder ID
spreadsheetSTRM = settings.getParameter('spreadsheet')
sheetSTRM = settings.getParameter('sheet')
year = settings.getParameter('year')
if sheetSTRM != None and sheetSTRM != '':
if service.gSpreadsheet is None:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
try:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
spreadsheets = service.gSpreadsheet.getSpreadsheetList()
except:
pass
spreadsheet = None
for t in spreadsheets.iterkeys():
if t == 'Movies':
worksheets = service.gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[t])
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
spreadsheet = worksheets[worksheet]
break
break
if spreadsheet != None:
filename = service.gSpreadsheet.getSTRMplaybackMovie(spreadsheet, title, year)
else:
filename = service.gSpreadsheet.getSTRMplaybackMovie('https://spreadsheets.google.com/feeds/list/'+spreadsheetSTRM+'/'+sheetSTRM+'/private/full', title, year)
if folderID == 'False':
folderID = 'SEARCH'
if mode != 'audio':
settings.setVideoParameters()
seek = 0
if settings.seek:
dialog = xbmcgui.Dialog()
seek = dialog.numeric(2, 'Time to seek to', '00:00')
for r in re.finditer('(\d+)\:(\d+)' ,seek, re.DOTALL):
seekHours, seekMins = r.groups()
seek = int(seekMins) + (int(seekHours)*60)
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052))
xbmc.log(addon.getLocalizedString(30050)+ addon_parameters.PLUGIN_NAME + '-login', xbmc.LOGERROR)
xbmcplugin.endOfDirectory(plugin_handle)
#settings.setCacheParameters()
if mode == 'memorycachevideo':
settings.play = True
settings.download = True
elif mode == 'playvideo':
settings.play = False
settings.download = False
settings.playOriginal = True
if settings.cache:
settings.download = False
settings.play = False
encfs = settings.getParameter('encfs', False)
#testing
player = gPlayer.gPlayer()
player.setService(service)
resolvedPlayback = True
startPlayback = False
toExit = False
#package = None
if encfs:
#temporarly force crypto with encfs
settings.setCryptoParameters()
if settings.cryptoPassword != "":
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(folderID,'')
(mediaURLs,package) = service.getPlaybackCall(package=package.package(mediaFile,mediaFolder), title=title, contentType=8)
#override title
package.file.title = title
#(mediaURLs,package) = service.getPlaybackCall(None,title=title)
mediaURL = mediaURLs[0]
#mediaURL.url = mediaURL.url +'|' + service.getHeadersEncoded()
#print "mediaURLDD = " + mediaURL.url
# use streamer if defined
useStreamer = False
if service is not None and service.settings.streamer:
# test streamer
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from resources.lib import streamer
import urllib, urllib2
from SocketServer import ThreadingMixIn
import threading
try:
server = streamer.MyHTTPServer(('', service.settings.streamPort), streamer.myStreamer)
server.setAccount(service, '')
#if we make it here, streamer was not already running as a service, so we need to abort and playback using normal method, otherwise we will lock
except:
useStreamer = True
if useStreamer:
url = 'http://localhost:' + str(service.settings.streamPort) + '/crypto_playurl'
req = urllib2.Request(url, 'url=' + mediaURL.url)
print "mediaURL = "+mediaURL.url
try:
response = urllib2.urlopen(req)
response.close()
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail, path='http://localhost:' + str(service.settings.streamPort) + '/play')
item.setPath('http://localhost:' + str(service.settings.streamPort) + '/play')
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
## contribution by dabinn
# handle situation where playback is skipped to next file, wait for new source to load
if player.isPlaying():
xbmc.sleep(100)
startPlayback = False
# need to seek?
if seek > 0:
player.PlayStream(mediaURL.url, item, seek, startPlayback=startPlayback, package=package)
elif float(package.file.cloudResume) > 0:
player.PlayStream(mediaURL.url, item, package.file.cloudResume, startPlayback=startPlayback, package=package)
elif float(package.file.resume) > 0:
player.PlayStream(mediaURL.url, item, package.file.resume, startPlayback=startPlayback, package=package)
else:
player.PlayStream(mediaURL.url, item, 0, startPlayback=startPlayback, package=package)
# must occur after playback started (resolve or startPlayback in player)
# load captions
if 0 and (settings.srt or settings.cc) and (service.protocol == 2 or service.protocol == 3):
while not (player.isPlaying()):
xbmc.sleep(1000)
files = cache.getSRT(service)
for file in files:
if file != '':
try:
#file = file.decode('unicode-escape')
file = file.encode('utf-8')
except:
pass
player.setSubtitles(file)
xbmc.sleep(100)
# we need to keep the plugin alive for as long as there is playback from the plugin, or the player object closes
while not player.isExit:
player.saveTime()
xbmc.sleep(5000)
else:
settings.setEncfsParameters()
encryptedPath = settings.getParameter('epath', '')
dencryptedPath = settings.getParameter('dpath', '')
encfs_source = settings.encfsSource
encfs_target = settings.encfsTarget
encfs_inode = settings.encfsInode
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(folderID,'')
(mediaURLs,package) = service.getPlaybackCall(package=package.package(mediaFile,mediaFolder), title=title, contentType=8)
#(mediaURLs,package) = service.getPlaybackCall(None,title=title)
mediaURL = mediaURLs[0]
playbackTarget = encfs_target + dencryptedPath
item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail, path=playbackTarget)
#item.setInfo( type="Video", infoLabels={ "Title": package.file.title , "Plot" : package.file.title } )
# right-click or integrated player (no opening stream dialog...)
if contextType == '':
# for STRM (force resolve) -- resolve-only
if settings.username != '':
resolvedPlayback = True
startPlayback = False
else:
startPlayback = True
# resolve for an opening stream dialog
else:
resolvedPlayback=True
# download if not already cached
# if (not xbmcvfs.exists(str(encfs_source) + encryptedPath +str(title))):
url = service.getDownloadURL(filename)
## check for SRT
# use folderID, look for files with srt/sub
mediaItems = service.getMediaList(folderID,contentType=8)
encfsSubTitles = []
if mediaItems:
dirListINodes = {}
fileListINodes = {}
#create the files and folders for decrypting file/folder names
for itemx in mediaItems:
if itemx.file is None:
xbmcvfs.mkdir(encfs_source + str(encryptedPath))
xbmcvfs.mkdir(encfs_source + str(encryptedPath) + str(itemx.folder.title) + '/' )
if encfs_inode == 0:
dirListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(itemx.folder.title)).st_ino()))] = itemx.folder
else:
dirListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(itemx.folder.title)).st_ctime()))] = itemx.folder
#service.addDirectory(item.folder, contextType=contextType, encfs=True)
else:
xbmcvfs.mkdir(encfs_source + str(encryptedPath))
xbmcvfs.mkdir(encfs_source + str(encryptedPath) + str(itemx.file.title))
if encfs_inode == 0:
fileListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath)+ str(itemx.file.title)).st_ino()))] = itemx
else:
fileListINodes[(str(xbmcvfs.Stat(encfs_source + str(encryptedPath) + str(itemx.file.title)).st_ctime()))] = itemx
#service.addMediaFile(itemx, contextType=contextType)
if encfs_inode > 0:
xbmc.sleep(1000)
mediaList = ['.sub', '.srt']
media_re = re.compile("|".join(mediaList), re.I)
# encfs -- extract path
extrapulatedPath = re.compile('(.*?)/[^/]+$')
dencryptedPathWithoutFilename = extrapulatedPath.match(dencryptedPath)
if dencryptedPathWithoutFilename is None:
dencryptedPathWithoutFilename = ''
else:
dencryptedPathWithoutFilename = dencryptedPathWithoutFilename.group(1) + '/'
#examine the decrypted file/folder names for files for playback and dirs for navigation
dirs, files = xbmcvfs.listdir(encfs_target + str(dencryptedPathWithoutFilename) )
for dir in dirs:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPathWithoutFilename) + dir).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPathWithoutFilename) + dir).st_ctime())
#we found a file
if index in fileListINodes.keys():
xbmcvfs.rmdir(encfs_target + str(dencryptedPathWithoutFilename) + dir)
fileListINodes[index].file.decryptedTitle = dir
if media_re.search(str(dir)):
#we found a subtitle
service.downloadGeneralFile(fileListINodes[index].mediaurl.url, str(encfs_source) + str(encryptedPath) +str(fileListINodes[index].file.title))
# str(encfs_target) + str(dencryptedPathWithoutFilename) + str(fileListINodes[index].file.decryptedTitle)
encfsSubTitles.append(str(encfs_target) + str(dencryptedPathWithoutFilename) + str(fileListINodes[index].file.decryptedTitle))
# file is already downloaded
for file in files:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPathWithoutFilename) + file).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_target + str(dencryptedPathWithoutFilename) + file).st_ctime())
if index in fileListINodes.keys():
fileListINodes[index].file.decryptedTitle = file
if media_re.search(str(file)):
#we found a subtitle
# service.addMediaFile(fileListINodes[index], contextType=contextType, encfs=True, dpath=str(dencryptedPath) + str(file), epath=str(encryptedPath) )
# service.downloadGeneralFile(fileListINodes[index], package, playbackURL=playbackTarget, folderName=str(encfs_source) + encryptedPath + str(fileListINodes[index].file.title))
# service.downloadGeneralFile(fileListINodes[index].mediaurl.url, str(encfs_source) + str(encryptedPath) +str(title))
encfsSubTitles.append(str(encfs_target) + str(dencryptedPathWithoutFilename) + str(fileListINodes[index].file.decryptedTitle))
if settings.encfsStream or settings.encfsCacheSingle:
## calculate the decrypted name of the file cache.mp4
#creating a cache.mp4 file
fileListINodes = {}
#workaround for this issue: https://github.com/xbmc/xbmc/pull/8531
if not xbmcvfs.exists(encfs_target + 'encfs.mp4') and not os.path.exists(encfs_target + 'encfs.mp4'):
xbmcvfs.mkdir(encfs_target + 'encfs.mp4')
if encfs_inode == 0:
fileListINodes[(str(xbmcvfs.Stat(encfs_target + 'encfs.mp4').st_ino()))] = item
else:
fileListINodes[(str(xbmcvfs.Stat(encfs_target + 'encfs.mp4').st_ctime()))] = item
if encfs_inode > 0:
xbmc.sleep(1000)
dirs, files = xbmcvfs.listdir(encfs_source)
for dir in dirs:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_source + str(dir)).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_source + str(dir)).st_ctime())
#we found a file
if index in fileListINodes.keys():
xbmcvfs.rmdir(encfs_source + str(dir))
addon.setSetting('encfs_last', str(encryptedPath) +str(title))
if settings.encfsExp:
service.downloadEncfsFile2(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', folderName=str(encfs_source) + str(dir), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
else:
service.downloadEncfsFile(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', folderName=str(encfs_source) + str(dir), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
#already downloaded (partial or full)
for file in files:
index = ''
if encfs_inode == 0:
index = str(xbmcvfs.Stat(encfs_source + str(file)).st_ino())
else:
index = str(xbmcvfs.Stat(encfs_source + str(file)).st_ctime())
#we found a file
if index in fileListINodes.keys():
#resume
if settings.encfsLast == str(encryptedPath) +str(title):
if settings.encfsExp:
service.downloadEncfsFile2(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', force=False,folderName=str(encfs_source) + str(file), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
else:
service.downloadEncfsFile(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', force=False,folderName=str(encfs_source) + str(file), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
#new file
else:
addon.setSetting('encfs_last', str(encryptedPath) +str(title))
if settings.encfsExp:
service.downloadEncfsFile2(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', force=True, folderName=str(encfs_source) + str(file), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
else:
service.downloadEncfsFile(mediaURL, package, playbackURL=encfs_target + 'encfs.mp4', force=True, folderName=str(encfs_source) + str(file), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
else:
#service.downloadEncfsFile2(mediaURL, package, playbackURL=playbackTarget, folderName=str(encfs_source) + encryptedPath +str(title), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
service.downloadEncfsFile(mediaURL, package, playbackURL=playbackTarget, folderName=str(encfs_source) + encryptedPath +str(title), playback=resolvedPlayback,item=item, player=player, srt=encfsSubTitles)
#should already be playing by this point, so don't restart it
startPlayback = False
#exists; resolve for an opening stream dialog
# elif resolvedPlayback:
# xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
# need to seek?
#if seek > 0:
# player.PlayStream(playbackTarget, item, seek, startPlayback=startPlayback, package=package)
#elif float(package.file.resume) > 0:
# player.PlayStream(playbackTarget, item, package.file.resume, startPlayback=startPlayback, package=package)
#else:
# player.PlayStream(playbackTarget, item, 0, startPlayback=startPlayback, package=package)
#loop until finished
while not player.isExit:
player.saveTime()
xbmc.sleep(5000)
elif mode == 'search' and contextType != '':
if title == '':
try:
dialog = xbmcgui.Dialog()
title = dialog.input(addon.getLocalizedString(30110), type=xbmcgui.INPUT_ALPHANUM)
except:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30100))
title = 'test'
mediaItems = service.getMediaList(title=title, contentType=contentType)
resolvedPlayback = False
startPlayback = False
options = []
urls = []
if mediaItems:
for item in mediaItems:
if item.file is None:
service.addDirectory( item.folder, contextType=contextType)
else:
service.addMediaFile(item, contextType=contextType)
# non-encfs
else:
# file ID provided
#if we don't have the docid, search for the video for playback
if (filename != '' and mode == 'audio'):
mediaFile = file.file(filename, title, '', service.MEDIA_TYPE_MUSIC, '','')
mediaFolder = folder.folder(folderID,'')
(mediaURLs,package) = service.getPlaybackCall(package=package.package(mediaFile,mediaFolder))
elif filename != '':
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(folderID,'')
(mediaURLs,package) = service.getPlaybackCall(package=package.package(mediaFile,mediaFolder))
# search
elif mode == 'search' and contextType == '':
if title == '':
try:
dialog = xbmcgui.Dialog()
title = dialog.input(addon.getLocalizedString(30110), type=xbmcgui.INPUT_ALPHANUM)
except:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30100))
title = 'test'
mediaItems = service.getMediaList(title=title, contentType=contentType)
resolvedPlayback = False
startPlayback = False
options = []
urls = []
if mediaItems:
for item in mediaItems:
if item.file is None:
service.addDirectory( item.folder, contextType=contextType)
else:
options.append(item.file.title)
urls.append(service.addMediaFile(item, contextType=contextType))
#search from STRM
if contextType == '':
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30112), options)
playbackPath = urls[ret]
item = xbmcgui.ListItem(path=playbackPath+'|' + service.getHeadersEncoded())
item.setInfo( type="Video", infoLabels={ "Title": options[ret] , "Plot" : options[ret] } )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
# playback of entire folder?
# folder only
elif folderID != '' and title == '':
mediaItems = service.getMediaList(folderName=folderID, contentType=contentType)
if mediaItems:
player.setMedia(mediaItems)
player.playList(service)
resolvedPlayback = False
toExit = True
# title provided
else:
(mediaURLs,package) = service.getPlaybackCall(None,title=title)
#ensure there is something play
if package is not None:
# right-click - download (download only + force)
if not seek > 0 and not (settings.download and not settings.play):
# TESTING
if addon_parameters.spreadsheet and service.cloudResume == '2':
if service.worksheetID == '':
try:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
spreadsheets = service.gSpreadsheet.getSpreadsheetList()
except:
pass
for title in spreadsheets.iterkeys():
if title == 'CLOUD_DB':
worksheets = service.gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[title])
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
service.worksheetID = worksheets[worksheet]
addon.setSetting(instanceName + '_spreadsheet', service.worksheetID)
break
break
# TESTING
if addon_parameters.spreadsheet and service.cloudResume == '2':
if service.gSpreadsheet is None:
service.gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
media = service.gSpreadsheet.updateMediaPackage(service.worksheetID, package)
if package.file.commands != '':
exp = re.compile('([^\|]+):([^\|]+)\|?', re.IGNORECASE)
for cmd in exp.finditer(package.file.commands):
if cmd.group(1) == 'seek':
seek = cmd.group(2)
elif cmd.group(1) == 'title':
package.file.title = cmd.group(2)
elif cmd.group(1) == 'resume':
package.file.resume = cmd.group(2)
elif cmd.group(1) == 'original':
if cmd.group(2).lower() == 'true':
settings.playOriginal = True
else:
settings.playOriginal = False
elif cmd.group(1) == 'promptquality':
if cmd.group(2).lower() == 'true':
settings.promptQuality = True
else:
settings.promptQuality = False
item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail)
item.setInfo( type="Video", infoLabels={ "Title": package.file.title , "Plot" : package.file.title } )
originalURL = ''
if mode != 'audio':
cache = cache.cache(package)
service.cache = cache
package.file.thumbnail = cache.setThumbnail(service)
# SRTURL = ''
srtpath = ''
if settings.srt and (service.protocol == 2 or service.protocol == 3):
cache.setSRT(service)
# download closed-captions
if settings.cc and (service.protocol == 2 or service.protocol == 3):
cache.setCC(service)
mediaURL = service.getMediaSelection(mediaURLs, folderID, filename)
#mediaURL.url = mediaURL.url +'|' + service.getHeadersEncoded()
# if not seek > 0 and package.file.resume > 0 and not settings.cloudResumePrompt:
# returnPrompt = xbmcgui.Dialog().yesno(addon.getLocalizedString(30000), addon.getLocalizedString(30176), str(int(float(package.file.resume)/360)) + ':'+ str(int(float(package.file.resume)/60)) + ':' + str(int(float(package.file.resume)%60)))
# if not returnPrompt:
# package.file.resume = 0
###
#right-menu context OR STRM
##
if contextType == '':
# right-click - download (download only + force)
if not mediaURL.offline and settings.download and not settings.play:
# service.downloadMediaFile('',playbackPath, str(title)+'.'+ str(playbackQuality), folderID, filename, fileSize, force=True)
service.downloadMediaFile(mediaURL, item, package, force=True, playback=service.PLAYBACK_NONE)
resolvedPlayback = False
startPlayback = False
# right-click - play + cache (download and play)
elif not mediaURL.offline and settings.download and settings.play:
# service.downloadMediaFile(int(sys.argv[1]), playbackPath, str(title)+'.'+ str(playbackQuality), folderID, filename, fileSize)
service.downloadMediaFile(mediaURL, item, package, playback=service.PLAYBACK_PLAYER, player=player)
resolvedPlayback = False
# STRM (force resolve) -- resolve-only
elif settings.username != '' or settings.strm:
startPlayback = False
resolvedPlayback = True
if not seek > 0 and package.file.cloudResume > 0 and not settings.cloudResumePrompt:
returnPrompt = xbmcgui.Dialog().yesno(addon.getLocalizedString(30000), addon.getLocalizedString(30176), str(int(float(package.file.cloudResume)/360)) + ':'+ str(int(float(package.file.cloudResume)/60)) + ':' + str(int(float(package.file.cloudResume)%60)))
if not returnPrompt:
package.file.resume = 0
else:
package.file.resume = package.file.cloudResume
item.setProperty('isResumable', '1')
item.setProperty('ResumeTime', str(package.file.resume))
item.setProperty('TotalTime', str(package.file.duration))
# right-click - play original / SRT / CC / Start At
elif settings.playOriginal or settings.srt or settings.cc or settings.seek:
startPlayback = True
resolvedPlayback = False
#### not in use
elif 0 and settings.resume:
spreadshetModule = settings.getSetting('library', False)
spreadshetName = settings.getSetting('library_filename', 'TVShows')
media = {}
if spreadshetModule:
try:
gSpreadsheet = gSpreadsheets.gSpreadsheets(service,addon, user_agent)
service.gSpreadsheet = gSpreadsheet
spreadsheets = gSpreadsheet.getSpreadsheetList()
except:
spreadshetModule = False
if spreadshetModule:
for title in spreadsheets.iterkeys():
if title == spreadshetName:
worksheets = gSpreadsheet.getSpreadsheetWorksheets(spreadsheets[title])
for worksheet in worksheets.iterkeys():
if worksheet == 'db':
media = gSpreadsheet.getMedia(worksheets[worksheet], fileID=package.file.id)
item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail)
item.setInfo( type="Video", infoLabels={ "Title": package.file.title , "Plot" : package.file.title } )
player.setWorksheet(worksheets['db'])
if len(media) == 0:
player.PlayStream(mediaURL.url, item, 0, package)
else:
player.PlayStream(mediaURL.url, item,media[0][7],package)
while not player.isExit:
player.saveTime()
xbmc.sleep(5000)
#offline
elif mediaURL.offline:
resolvedPlayback = True
# left-click - always cache (download and play)
elif not mediaURL.offline and settings.download and settings.play:
service.downloadMediaFile(mediaURL, item, package, player=player)
resolvedPlayback = False
else:
resolvedPlayback = True
else:
cache = cache.cache(package)
service.cache = cache
(localResolutions,localFiles) = service.cache.getFiles(service)
if len(localFiles) > 0:
mediaURL = mediaurl.mediaurl(str(localFiles[0]), 'offline', 0, 0)
mediaURL.offline = True
else:
mediaURL = mediaURLs[0]
if not settings.download:
mediaURL.url = mediaURL.url +'|' + service.getHeadersEncoded()
resolvedPlayback = True
###
#right-menu context or STRM
##
if contextType == '':
#download - only, no playback
if not mediaURL.offline and settings.download and not settings.play:
service.downloadMediaFile(mediaURL, item, package, force=True, playback=service.PLAYBACK_NONE)
resolvedPlayback = False
# for STRM (force resolve) -- resolve-only
elif settings.username != '':
startPlayback = False
#download & playback
elif not mediaURL.offline and settings.download and settings.play:
service.downloadMediaFile(mediaURL, item, package, playback=service.PLAYBACK_PLAYER, player=player)
resolvedPlayback = False
else:
startPlayback = True
# from within pictures mode, music won't be playable, force
#direct playback from within plugin
elif contextType == 'image' and settings.cache:
item = xbmcgui.ListItem(path=str(playbackPath))
# local, not remote. "Music" is ok
item.setInfo( type="Music", infoLabels={ "Title": title } )
player.play(mediaURL.url, item)
resolvedPlayback = False
# from within pictures mode, music won't be playable, force
#direct playback from within plugin
elif contextType == 'image':
item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail, path=mediaURL.url)
# for unknown reasons, for remote music, if Music is tagged as Music, it errors-out when playing back from "Music", doesn't happen when labeled "Video"
item.setInfo( type="Video", infoLabels={ "Title": title } )
player.play(mediaURL.url, item)
resolvedPlayback = False
#download and play
elif settings.download and settings.play:
service.downloadMediaFile(mediaURL, item, package, player=player)
resolvedPlayback = False
if float(package.file.cloudResume) > 0 or float(package.file.resume) > 0:
options = []
options.append('Resume from ' + str(int(float(package.file.resume))/60).zfill(2) +':' + str(int(float(package.file.resume))%60).zfill(2) )
options.append('Start from begining')
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30176), options)
if ret == 1:
package.file.resume = 0
if resolvedPlayback:
# use streamer if defined
# streamer
useStreamer = False
if service is not None and service.settings.streamer:
# test streamer
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from resources.lib import streamer
import urllib, urllib2
from SocketServer import ThreadingMixIn
import threading
try:
server = streamer.MyHTTPServer(('', service.settings.streamPort), streamer.myStreamer)
server.setAccount(service, '')
#if we make it here, streamer was not already running as a service, so we need to abort and playback using normal method, otherwise we will lock
except:
useStreamer = True
if useStreamer and service is not None and service.settings.streamer:
url = 'http://localhost:' + str(service.settings.streamPort) + '/playurl'
req = urllib2.Request(url, 'url=' + mediaURL.url)
try:
response = urllib2.urlopen(req)
response_data = response.read()
response.close()
except urllib2.URLError, e:
xbmc.log(self.addon.getAddonInfo('name') + ': ' + str(e), xbmc.LOGERROR)
item.setPath('http://localhost:' + str(service.settings.streamPort) + '/play')
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
# regular playback
item.setPath(mediaURL.url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
## contribution by dabinn
# handle situation where playback is skipped to next file, wait for new source to load
if player.isPlaying():
xbmc.sleep(100)
# need to seek?
if seek > 0:
player.PlayStream(mediaURL.url, item, seek, startPlayback=startPlayback, package=package)
elif float(package.file.cloudResume) > 0:
player.PlayStream(mediaURL.url, item, package.file.cloudResume, startPlayback=startPlayback, package=package)
elif float(package.file.resume) > 0:
player.PlayStream(mediaURL.url, item, package.file.resume, startPlayback=startPlayback, package=package)
else:
player.PlayStream(mediaURL.url, item, 0, startPlayback=startPlayback, package=package)
# must occur after playback started (resolve or startPlayback in player)
# load captions
if (settings.srt or settings.cc) and (service.protocol == 2 or service.protocol == 3):
while not (player.isPlaying()):
xbmc.sleep(1000)
files = cache.getSRT(service)
for file in files:
if file != '':
try:
#file = file.decode('unicode-escape')
file = file.encode('utf-8')
except:
pass
player.setSubtitles(file)
xbmc.sleep(100)
# we need to keep the plugin alive for as long as there is playback from the plugin, or the player object closes
while not player.isExit:
player.saveTime()
xbmc.sleep(5000)
xbmcplugin.endOfDirectory(plugin_handle)
# must load after all other (becomes blocking)
# streamer
if service is not None and service.settings.streamer:
localTVDB = {}
localMOVIEDB = {}
#load data structure containing TV and Movies from KODI
if (settings.getSetting('local_db')):
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": { "sort": {"method":"lastplayed"}, "filter": {"field": "title", "operator": "isnot", "value":"1"}, "properties": [ "file"]}, "id": "1"}')
for match in re.finditer('"episodeid":(\d+)\,"file"\:"([^\"]+)"', result):#, re.S):
localTVDB[match.group(2)] = match.group(1)
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "sort": {"method":"lastplayed"}, "filter": {"field": "title", "operator": "isnot", "value":"1"}, "properties": [ "file"]}, "id": "1"}')
for match in re.finditer('"file":"([^\"]+)","label":"[^\"]+","movieid":(\d+)', result):#, re.S):
localMOVIEDB[match.group(1)] = match.group(2)
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from resources.lib import streamer
import urllib, urllib2
from SocketServer import ThreadingMixIn
import threading
try:
server = streamer.MyHTTPServer(('', service.settings.streamPort), streamer.myStreamer)
server.setAccount(service, '')
if (settings.getSetting('local_db')):
server.setTVDB(localTVDB)
server.setTVDB(localMOVIEDB)
print "ENABLED STREAMER \n\n\n"
while server.ready:
server.handle_request()
server.socket.close()
except: pass
#automation - create strm files
if 0 and service is not None and instanceName is not None and settings.strm:
import time
currentDate = time.strftime("%Y%m%d")
if addon.getSetting(instanceName+'_changedate') == '' or int(addon.getSetting(instanceName+'_changedate')) < int(currentDate):
try:
path = settings.getSetting('strm_path')
except:
pass
if path != '':
try:
pDialog = xbmcgui.DialogProgressBG()
pDialog.create(addon.getLocalizedString(30000), 'Building STRMs...')
except:
pass
#service = gdrive_api2.gdrive(PLUGIN_URL,addon,instanceName, user_agent, settings)
try:
addon.setSetting(instanceName + '_changedate', currentDate)
service.buildSTRM2(path, contentType=contentType, pDialog=pDialog)
except:
pass
try:
pDialog.update(100)
pDialog.close()
except:
pass
# player = gPlayer.gPlayer()
# player.play(playbackURL+'|' + service.getHeadersEncoded(), item)
# while not (player.isPlaying()):
# xbmc.sleep(1)
# player.seekTime(1000)
# w = tvWindow.tvWindow("tvWindow.xml",addon.getAddonInfo('path'),"Default")
# w.setPlayer(player)
# w.doModal()
# player.seekTime(1000)
# w = tvWindow.tvWindow("tvWindow.xml",addon.getAddonInfo('path'),"Default")
# w.setPlayer(player)
# w.doModal()
# xbmc.executebuiltin("XBMC.PlayMedia("+str(playbackPath)+'|' + service.getHeadersEncoded()+")")
#media = gSpreadsheet.setMediaStatus(worksheets[worksheet], package, watched=2, resume=2)
#item = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
# thumbnailImage=package.file.thumbnail)
#item.setInfo( type="Video", infoLabels={ "Title": package.file.title , "Plot" : package.file.title } )
#player = gPlayer.gPlayer()
#player.setService(service)
#player.setWorksheet(worksheets['db'])
#if len(media) == 0:
# player.PlayStream(mediaURL.url, item, 0, package)
#else:
# player.PlayStream(mediaURL.url, item,media[0][7],package)
#while not player.isExit:
# player.saveTime()
# xbmc.sleep(5000)
| gpl-2.0 | -7,872,480,484,363,564,000 | 43.931135 | 283 | 0.560778 | false |
Azure/azure-sdk-for-python | sdk/serialconsole/azure-mgmt-serialconsole/azure/mgmt/serialconsole/_microsoft_serial_console_client.py | 1 | 4023 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import MicrosoftSerialConsoleClientConfiguration
from .operations import MicrosoftSerialConsoleClientOperationsMixin
from .operations import SerialPortsOperations
from . import models
class MicrosoftSerialConsoleClient(MicrosoftSerialConsoleClientOperationsMixin):
"""The Azure Serial Console allows you to access the serial console of a Virtual Machine or VM scale set instance.
:ivar serial_ports: SerialPortsOperations operations
:vartype serial_ports: azure.mgmt.serialconsole.operations.SerialPortsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription ID which uniquely identifies the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call requiring it.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = MicrosoftSerialConsoleClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.serial_ports = SerialPortsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MicrosoftSerialConsoleClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| mit | -5,325,386,730,990,529,000 | 44.202247 | 182 | 0.673627 | false |
RedTea/gaedav | pyxml/dom/html/HTMLImageElement.py | 4 | 3892 | ########################################################################
#
# File Name: HTMLImageElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from pyxml.dom import Node
from pyxml.dom.html.HTMLElement import HTMLElement
class HTMLImageElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="IMG"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_lowSrc(self):
return self.getAttribute("LOWSRC")
def _set_lowSrc(self, value):
self.setAttribute("LOWSRC", value)
def _get_name(self):
return self.getAttribute("NAME")
def _set_name(self, value):
self.setAttribute("NAME", value)
def _get_align(self):
return string.capitalize(self.getAttribute("ALIGN"))
def _set_align(self, value):
self.setAttribute("ALIGN", value)
def _get_alt(self):
return self.getAttribute("ALT")
def _set_alt(self, value):
self.setAttribute("ALT", value)
def _get_border(self):
return self.getAttribute("BORDER")
def _set_border(self, value):
self.setAttribute("BORDER", value)
def _get_height(self):
return self.getAttribute("HEIGHT")
def _set_height(self, value):
self.setAttribute("HEIGHT", value)
def _get_hspace(self):
return self.getAttribute("HSPACE")
def _set_hspace(self, value):
self.setAttribute("HSPACE", value)
def _get_isMap(self):
return self.hasAttribute("ISMAP")
def _set_isMap(self, value):
if value:
self.setAttribute("ISMAP", "ISMAP")
else:
self.removeAttribute("ISMAP")
def _get_longDesc(self):
return self.getAttribute("LONGDESC")
def _set_longDesc(self, value):
self.setAttribute("LONGDESC", value)
def _get_src(self):
return self.getAttribute("SRC")
def _set_src(self, value):
self.setAttribute("SRC", value)
def _get_useMap(self):
return self.getAttribute("USEMAP")
def _set_useMap(self, value):
self.setAttribute("USEMAP", value)
def _get_vspace(self):
return self.getAttribute("VSPACE")
def _set_vspace(self, value):
self.setAttribute("VSPACE", value)
def _get_width(self):
return self.getAttribute("WIDTH")
def _set_width(self, value):
self.setAttribute("WIDTH", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"lowSrc" : _get_lowSrc,
"name" : _get_name,
"align" : _get_align,
"alt" : _get_alt,
"border" : _get_border,
"height" : _get_height,
"hspace" : _get_hspace,
"isMap" : _get_isMap,
"longDesc" : _get_longDesc,
"src" : _get_src,
"useMap" : _get_useMap,
"vspace" : _get_vspace,
"width" : _get_width
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"lowSrc" : _set_lowSrc,
"name" : _set_name,
"align" : _set_align,
"alt" : _set_alt,
"border" : _set_border,
"height" : _set_height,
"hspace" : _set_hspace,
"isMap" : _set_isMap,
"longDesc" : _set_longDesc,
"src" : _set_src,
"useMap" : _set_useMap,
"vspace" : _set_vspace,
"width" : _set_width
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| lgpl-2.1 | 8,082,525,727,885,236,000 | 25.657534 | 77 | 0.581963 | false |
BorgERP/borg-erp-6of3 | addons/point_of_sale/wizard/pos_payment.py | 1 | 4106 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import osv, fields
from tools.translate import _
import pos_box_entries
import netsvc
class pos_make_payment(osv.osv_memory):
_name = 'pos.make.payment'
_description = 'Point of Sale Payment'
def check(self, cr, uid, ids, context=None):
"""Check the order:
if the order is not paid: continue payment,
if the order is paid print ticket.
"""
context = context or {}
order_obj = self.pool.get('pos.order')
obj_partner = self.pool.get('res.partner')
active_id = context and context.get('active_id', False)
order = order_obj.browse(cr, uid, active_id, context=context)
amount = order.amount_total - order.amount_paid
data = self.read(cr, uid, ids, context=context)[0]
# this is probably a problem of osv_memory as it's not compatible with normal OSV's
#data['journal'] = data['journal'][0]
if amount != 0.0:
order_obj.add_payment(cr, uid, active_id, data, context=context)
if order_obj.test_paid(cr, uid, [active_id]):
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'pos.order', active_id, 'paid', cr)
return self.print_report(cr, uid, ids, context=context)
return self.launch_payment(cr, uid, ids, context=context)
def launch_payment(self, cr, uid, ids, context=None):
return {
'name': _('Payment'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
}
def print_report(self, cr, uid, ids, context=None):
active_id = context.get('active_id', [])
datas = {'ids' : [active_id]}
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.receipt',
'datas': datas,
'context': context,
}
def _default_journal(self, cr, uid, context=None):
res = pos_box_entries.get_journal(self, cr, uid, context=context)
return len(res)>1 and res[1][0] or False
def _default_amount(self, cr, uid, context=None):
order_obj = self.pool.get('pos.order')
active_id = context and context.get('active_id', False)
if active_id:
order = order_obj.browse(cr, uid, active_id, context=context)
return order.amount_total - order.amount_paid
return False
_columns = {
'journal': fields.selection(pos_box_entries.get_journal, "Payment Mode", required=True),
'amount': fields.float('Amount', digits=(16,2), required= True),
'payment_name': fields.char('Payment Reference', size=32),
'payment_date': fields.date('Payment Date', required=True),
}
_defaults = {
'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'),
'amount': _default_amount,
'journal': _default_journal
}
pos_make_payment()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,585,044,777,944,473,000 | 37.018519 | 96 | 0.587433 | false |
flarno11/teslarent | teslarent/manage_views.py | 1 | 8703 | import base64
import logging
import uuid
import datetime
from json import encoder
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, HttpResponseForbidden
from django.http.response import Http404, JsonResponse
from django.shortcuts import redirect, render
from jsonview.decorators import json_view
from teslarent.forms import CredentialsForm, RentalForm
from teslarent.management.commands.rental_start_end import BackgroundTask
from teslarent.models import *
from teslarent.teslaapi import teslaapi
encoder.FLOAT_REPR = lambda o: format(o, '.2f') # monkey patching https://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
log = logging.getLogger('manage')
@json_view
def ping(request):
t = BackgroundTask.Instance()
t.ensure_thread_running()
return JsonResponse({'initialized_at': t.initialized_at})
def each_context(request, title="Title"):
return {
'title': title,
'site_title': "Tesla Rental Admin",
'site_header': "Tesla Rental Admin",
'has_permission': admin.site.has_permission(request),
}
def check_basic_auth(request):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2 and auth[0].lower() == "basic":
return base64.b64decode(auth[1]).decode('utf-8').split(':')
return None, None
def metrics(request):
if settings.METRICS_SECRET:
username, password = check_basic_auth(request)
if not username or not password or password != settings.METRICS_SECRET:
return HttpResponseForbidden()
content = []
for vehicle in Vehicle.objects.all():
content.append('vehicle{{id="{}", vehicle_id="{}", mobile_enabled="{}"}} 1'.format(vehicle.tesla_id, vehicle.vehicle_id, int(vehicle.mobile_enabled is True)))
latest_vehicle_data_any = VehicleData.objects.filter(vehicle=vehicle).order_by('-created_at').first()
if not latest_vehicle_data_any:
continue
latest_vehicle_data_online = VehicleData.objects.filter(vehicle=vehicle).filter(data__state='online').order_by('-created_at').first()
content.append('vehicle_updated_at{vehicle="' + str(vehicle.vehicle_id) + '"} ' + str(latest_vehicle_data_any.created_at.timestamp()))
content.append('vehicle_offline{vehicle="' + str(vehicle.vehicle_id) + '"} ' + ('1' if latest_vehicle_data_any.is_offline else '0'))
if latest_vehicle_data_online:
content.append('vehicle_last_online_at{vehicle="' + str(vehicle.vehicle_id) + '"} ' + str(latest_vehicle_data_online.created_at.timestamp()))
latest_vehicle_data_locked = VehicleData.objects.filter(vehicle=vehicle).filter(data__vehicle_state__locked=True).order_by('-created_at').first()
latest_vehicle_data_unlocked = VehicleData.objects.filter(vehicle=vehicle).filter(data__vehicle_state__locked=False).order_by('-created_at').first()
if latest_vehicle_data_locked and latest_vehicle_data_unlocked:
if latest_vehicle_data_unlocked.created_at > latest_vehicle_data_locked.created_at:
content.append('vehicle_locked{vehicle="' + str(vehicle.vehicle_id) + '"} ' + str(latest_vehicle_data_locked.created_at.timestamp()))
else:
content.append('vehicle_locked{vehicle="' + str(vehicle.vehicle_id) + '"} ' + str(timezone.now().timestamp()))
for credential in Credentials.objects.all():
content.append('token_expires_at{id="' + str(credential.email) + '"} ' + str(credential.token_expires_at.timestamp()))
t = BackgroundTask.Instance()
t.ensure_thread_running()
content.append('background_task_initialized_at ' + str(t.initialized_at.timestamp()))
content.append('')
return HttpResponse("\n".join(content), content_type='text/plain')
def sum_non_null(func, iterable):
return sum(filter(None, map(func, iterable)))
@staff_member_required
def index(request):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
one_day_from_now = now + datetime.timedelta(days=1)
rentals = Rental.objects.all().order_by('start')
earnings_total_price_netto = 0
earnings_total_price_charging = 0
earnings_total_distance_driven = 0
for r in rentals:
if r.price_netto and r.distance_driven: # only sum up if a price and distance is set
earnings_total_price_netto += r.price_netto
if r.price_charging:
earnings_total_price_netto -= r.price_charging
earnings_total_price_charging += r.price_charging
earnings_total_distance_driven += r.distance_driven
totals = {
'distance_driven_all': sum_non_null(lambda r: r.distance_driven, rentals),
'distance_driven_paid': earnings_total_distance_driven,
'price_brutto': sum_non_null(lambda r: r.price_brutto, rentals),
'price_netto': sum_non_null(lambda r: r.price_netto, rentals),
'price_charging_all': round(sum_non_null(lambda r: r.price_charging, rentals), 2),
'price_charging_paid': round(earnings_total_price_charging, 2),
'earnings_per_km': round(earnings_total_price_netto/earnings_total_distance_driven, 2),
}
vehicles = Vehicle.objects.all()
for vehicle in vehicles:
vehicle.d = VehicleData.objects.filter(vehicle=vehicle)\
.filter(data__charge_state__battery_level__isnull=False)\
.order_by('-created_at').first()
context = dict(
each_context(request, title="Manage Rentals"),
debug=bool(settings.DEBUG),
active_rentals=Rental.objects.filter(start__lt=one_day_from_now, end__gt=one_day_ago).order_by('start'),
rentals=rentals,
totals=totals,
credentials=Credentials.objects.all(),
vehicles=vehicles,
has_any_vehicle=len(vehicles) > 0,
has_active_vehicle=any([v.is_active for v in vehicles]),
)
return render(request, 'manage.html', context)
@staff_member_required
def add_credentials(request):
if request.method == "POST":
form = CredentialsForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
c = Credentials.objects.filter(email=email).first()
if not c:
c = Credentials(email=email)
teslaapi.login_and_save_credentials(c, form.cleaned_data['password'])
del form.cleaned_data['password']
teslaapi.load_vehicles(c)
return redirect('./')
else:
form = CredentialsForm()
context = dict(
each_context(request, title="Add Credentials"),
form=form,
)
return render(request, 'add_credentials.html', context)
@staff_member_required
def delete_credentials(request, credentials_id):
c = Credentials.objects.get(id=credentials_id)
if request.method == "POST":
c.delete()
return redirect('/manage/')
context = dict(
each_context(request, title="Delete Credentials"),
credentials=c,
)
return render(request, 'delete_credentials.html', context)
@staff_member_required
def add_rental(request):
vehicles = Vehicle.get_all_active_vehicles()
if len(vehicles) == 0:
# TODO show error message to user
log.warning("Cannot add rental, there is no active vehicle")
return redirect('/manage/')
vehicle = vehicles[0] if len(vehicles) == 1 else None
start = timezone.now().replace(minute=0, second=0, microsecond=0) + datetime.timedelta(hours=1)
end = start + datetime.timedelta(days=1)
initial = Rental(start=start, end=end, vehicle=vehicle, code=str(uuid.uuid4()))
return add_or_edit_rental(request, rental=initial)
@staff_member_required
def edit_rental(request, rental_id):
return add_or_edit_rental(request, rental=Rental.objects.get(id=rental_id))
@staff_member_required
def delete_rental(request, rental_id):
r = Rental.objects.get(id=rental_id)
if request.method == "POST":
r.delete()
BackgroundTask.Instance().ensure_thread_running()
return redirect('/manage/')
def add_or_edit_rental(request, rental):
form = RentalForm(request.POST or None, instance=rental)
if request.method == "POST":
if form.is_valid():
form.save()
BackgroundTask.Instance().ensure_thread_running()
return redirect('/manage/')
context = dict(
each_context(request, title="Add/edit Rental"),
form=form,
)
return render(request, 'edit_rental.html', context)
| apache-2.0 | 1,031,027,588,890,160,100 | 37.171053 | 166 | 0.667011 | false |
JoaquimPatriarca/senpy-for-gis | gasp/pgsql/q/time.py | 1 | 7149 | """
Analyse time references in PGTABLES
"""
def ID_rows_with_temporal_proximity_by_entities(conParam, table, entity_field,
day_field, hour_field, hour_decimal_field,
time_tolerance, outXlsPath):
"""
Retrieve rows from one pgtable with some temporal proximity
Table structure should be
entity | day | hour | hour_decimal
0 | 2018-01-02 | 5 | 5,10
0 | 2018-01-03 | 4 | 4,15
0 | 2018-01-02 | 5 | 5,12
0 | 2018-01-02 | 5 | 5,8
1 | 2018-01-02 | 4 | 4,10
1 | 2018-01-02 | 5 | 5,12
1 | 2018-01-02 | 4 | 4,20
1 | 2018-01-02 | 4 | 4,12
1 | 2018-01-02 | 4 | 4,6
For a time_tolerance of 5 minutes, the output table will have
the rows with a temporal difference inside/bellow that time tolerance
entity_field could be more than one field
This method only identifies if one entity, for one day, has rows
very close of each others, in terms of time.
Not a good strategy for large tables. For large tables, SQL based methods
are needed
"""
import pandas
from gasp import goToList
from gasp.frompsql import sql_query
from gasp.pgsql.fields import get_columns_type
from gasp.toxls import df_to_xls
entity_field = goToList(entity_field)
COLS = entity_field + [day_field, hour_field]
COLS_TYPE = get_columns_type(conParam, table)
# TIME TOLERANCE IN HOURS
TIME_TOLERANCE = time_tolerance / 60.0
def thereIsRowsSameTimeInt(row):
whr = []
for c in COLS:
if COLS_TYPE[c] == str:
whr.append("{}='{}'".format(c, row[c]))
else:
whr.append("{}={}".format(c, row[c]))
hourRows = sql_query(conParam,
"SELECT {} FROM {} WHERE {}".format(
hour_decimal_field, table,
" AND ".join(whr)
)
)
for i in range(len(hourRows)):
for e in range(i+1, len(hourRows)):
dif = abs(hourRows[i][0] - hourRows[e][0])
if dif < TIME_TOLERANCE:
break
if dif < TIME_TOLERANCE:
break
if dif < TIME_TOLERANCE:
row['time_difference'] = 1
else:
row['time_difference'] = 0
return row
# Count entity occourrences for one day and hour
countsByEntityTime = pandas.DataFrame(sql_query(
conParam,
("SELECT {scols}, conta FROM "
"(SELECT {scols}, COUNT({ent}) AS conta FROM {tbl} "
"GROUP BY {scols}) AS foo WHERE conta > 1").format(
scols = ', '.join(COLS),
ent = entity_field[0],
tbl = table
)
), columns=COLS + ['conta'])
# For each row in the last count, When count is > 1
# Check time difference between rows for one day and hour
countsByEntityTime = countsByEntityTime.apply(
lambda x: thereIsRowsSameTimeInt(x), axis=1
)
df_to_xls(countsByEntityTime, outXlsPath)
return outXlsPath
def del_rows_by_temporal_proximity(conpsql, table, entity_fields,
day_field, hour_field, hour_decimal,
minute_field, second_field,
time_tolerance, outresult, exclusionRows=None):
"""
Exclude rows from one pgtable within some temporal interval from the
previous row.
Table structure should be
entity | day | hour | minute | seconds | hour_decimal
0 | 2018-01-02 | 5 | X | X | 5,10
0 | 2018-01-03 | 4 | X | X | 4,15
0 | 2018-01-02 | 5 | X | X | 5,12
0 | 2018-01-02 | 5 | X | X | 5,8
1 | 2018-01-02 | 4 | X | X | 4,10
1 | 2018-01-02 | 5 | X | X | 5,12
1 | 2018-01-02 | 4 | X | X | 4,20
1 | 2018-01-02 | 4 | X | X | 4,12
1 | 2018-01-02 | 4 | X | X | 4,6
"""
from gasp import goToList
from gasp.pgsql.tables.w import create_table_by_query
from gasp.pgsql.fields import get_columns_name
entity_fields = goToList(entity_fields)
if not entity_fields:
raise ValueError("entity_fields value is not valid!")
if exclusionRows:
# Get Rows deleted in table
sql = (
"SELECT *, ({hourDec} - previous_hour) AS deltatime FROM ("
"SELECT *, {lag_entity}, "
"LAG({hourDec}) OVER(PARTITION BY "
"{entityCols}, {dayF} ORDER BY "
"{entityCols}, {dayF}, {hourF}, {minutesF}, {secondsF}"
") AS previous_hour "
"FROM {mtable} ORDER BY {entityCols}, {dayF}, "
"{hourF}, {minutesF}, {secondsF}"
") AS w_previous_tbl "
"WHERE previous_hour IS NOT NULL AND "
"({hourDec} - previous_hour) < {tol} / 60.0"
).format(
hourDec=hour_decimal,
lag_entity = ", ".join([
"LAG({cl}) OVER(PARTITION BY {ent}, {d} ORDER BY {ent}, {d}, {h}, {m}, {s}) AS prev_{cl}".format(
cl=c, ent=", ".join(entity_fields),
d=day_field, h=hour_field, m=minute_field, s=second_field
) for c in entity_fields]),
entityCols=", ".join(entity_fields), dayF=day_field,
hourF=hour_field, minutesF=minute_field, secondsF=second_field,
mtable=table, tol=str(time_tolerance)
)
create_table_by_query(conpsql, exclusionRows, sql)
# Get rows outside the given time tolerance
sql = (
"SELECT *, ({hourDec} - previous_hour) AS deltatime FROM ("
"SELECT *, {lag_entity}, "
"LAG({hourDec}) OVER(PARTITION BY {entityCols}, {dayF} ORDER BY "
"{entityCols}, {dayF}, {hourF}, {minutesF}, "
"{secondsF}) AS previous_hour "
"FROM {mtable} ORDER BY {entityCols}, {dayF}, {hourF}, "
"{minutesF}, {secondsF}"
") AS w_previous_tbl "
"WHERE ({hourDec} - previous_hour) IS NULL OR "
"({hourDec} - previous_hour) > {tol} / 60.0"
).format(
hourDec=hour_decimal,
lag_entity=", ".join([
"LAG({cl}) OVER(PARTITION BY {ent}, {d} ORDER BY {ent}, {d}, {h}, {m}, {s}) AS prev_{cl}".format(
cl=c, ent=", ".join(entity_fields), d=day_field,
h=hour_field, m=minute_field, s=second_field
) for c in entity_fields]),
entityCols=", ".join(entity_fields), dayF=day_field, hourF=hour_field,
minutesF=minute_field, secondsF=second_field,
mtable=table, tol=str(time_tolerance)
)
create_table_by_query(conpsql, outresult, sql)
| gpl-3.0 | 5,071,065,966,288,899,000 | 37.026596 | 113 | 0.504966 | false |
allanderek/drunken-octo-avenger | app/sgftools.py | 2 | 3309 | import re
class ParseError(Exception):
pass
class SgfTree(object):
# at the moment just a wrapper around 'nodes', a list of
# dictionaries representing SGF tags. This wrapper exists in case
# we want to add sub-trees, navigation methods etc. later, in
# which case I suggest following the general shape of the objects
# used by the Javascript library at
# https://github.com/neagle/smartgame
def __init__(self, nodes=None):
if nodes is None:
nodes = []
self.nodes = nodes
self.main_line = nodes
def generate(sgf_tree):
nodes = sgf_tree.nodes
if nodes == []:
nodes = [{}]
sgf = '('
for node in nodes:
sgf += ';'
for tag, values in node.items():
if not values:
continue
sgf += tag
for value in values:
sgf += '[' + value + ']'
sgf += ')'
return sgf
def parse(sgf):
d = {}
d['rest'] = sgf
def accept(char):
return accept_re('\\' + char)
def accept_re(pattern):
"""If pattern matches at current point in input, advance and return.
If the pattern contains a group, return the content of the
group. Otherwise, return the entire match.
Either way, advance the current point over the entire match.
If pattern does not match, do nothing and return None.
"""
regexp = re.compile(pattern)
match = regexp.match(d['rest'])
if match:
whole = match.group()
groups = match.groups()
if len(groups) > 0:
result = groups[0]
else:
result = whole
d['rest'] = d['rest'][len(whole):]
return result
else:
return None
def expect(char):
if not accept(char):
raise ParseError("expected '{}' at '|' sign in '{}|{}'".format(
char, sgf[:len(sgf)-len(d['rest'])], d['rest']))
def accept_whitespace():
accept_re(r"[ \t\n]*")
def sequence():
expect('(')
nodes_ = nodes()
expect(')')
return SgfTree(nodes_)
def nodes():
result = []
accept_whitespace()
while accept(';'):
result.append(node_body())
accept_whitespace()
return result
def node_body():
result = {}
while True:
tag = accept_re(r"[A-Z]+")
if not tag:
break
values = tag_values()
result[tag] = values
accept_whitespace()
return result
def tag_values():
result = []
while True:
value = accept_re(r"\[([^]]*)\]")
if value is None:
break
result.append(value)
return result
result = sequence()
if result.nodes == [{}]:
return SgfTree([])
else:
return result
_ord_a = ord('a')
def encode_coord(x, y):
return "{}{}".format(chr(x + _ord_a), chr(y + _ord_a))
def decode_coord(chars):
try:
x = ord(chars[0]) - _ord_a
y = ord(chars[1]) - _ord_a
except IndexError:
raise ValueError("not enough digits in encoded coord: '{}'".format(
chars))
return x, y
| cc0-1.0 | -6,763,316,814,104,656,000 | 24.453846 | 76 | 0.508613 | false |
mjg/PyX-svn | test/functional/test_axis.py | 1 | 3080 | #!/usr/bin/env python
import sys; sys.path[:0] = ["../.."]
import math
from pyx import *
from pyx.graph.axis.parter import linear as linparter
from pyx.graph.axis.painter import regular, ticklength, rotatetext
from pyx.graph.axis.texter import rational, default
from pyx.graph.axis.axis import lin, pathaxis
c = canvas.canvas()
lintest = {"title": "axis title", "min": 0, "max": 1, "parter": linparter(["0.25", "0.1/0.8"])}
c.insert(pathaxis(path.path(path.moveto(0, 0), path.lineto(0, 8)),
lin(**lintest),
direction=-1))
c.insert(pathaxis(path.path(path.moveto(1, 0), path.lineto(1, 8)),
lin(**lintest)))
c.insert(pathaxis(path.path(path.moveto(5, 0), path.lineto(5, 8)),
lin(painter=regular(labelattrs=[trafo.rotate(45)], titleattrs=[trafo.rotate(45)]), **lintest),
direction=-1))
c.insert(pathaxis(path.path(path.moveto(8, 0), path.lineto(8, 8)),
lin(painter=regular(labelattrs=[trafo.rotate(45), text.halign.right], titleattrs=[trafo.rotate(-45)]), **lintest),
direction=-1))
c.insert(pathaxis(path.path(path.moveto(11, 0), path.lineto(11, 8)),
lin(painter=regular(tickattrs=[color.rgb.red], innerticklength=0, outerticklength=ticklength.normal), **lintest),
direction=-1))
c.insert(pathaxis(path.path(path.moveto(12, 0), path.lineto(12, 8)),
lin(painter=regular(tickattrs=[attr.changelist([None, color.rgb.green])]), **lintest)))
c.insert(pathaxis(path.path(path.moveto(16, 0), path.lineto(16, 8)),
lin(texter=default(), **lintest),
direction=-1))
c.insert(pathaxis(path.path(path.moveto(18, 0), path.lineto(18, 8)),
lin(texter=rational(), **lintest),
direction=-1))
lintest = {"title": "axis title", "min": -2*math.pi, "max": 0, "divisor": math.pi, "parter": linparter(["0.25"])}
c.insert(pathaxis(path.path(path.moveto(0, 11), path.lineto(8, 11)),
lin(texter=rational(suffix="\pi"), **lintest)))
lintest = {"title": "axis title", "min": 0, "max": 2*math.pi, "divisor": math.pi, "parter": linparter(["0.5"])}
c.insert(pathaxis(path.path(path.moveto(10, 11), path.lineto(18, 11)),
lin(texter=rational(numsuffix="\pi", over="%s/%s"), **lintest)))
lintest = {"min": 0, "max": 2*math.pi, "divisor": math.pi, "parter": linparter(["0.125"])}
c.insert(pathaxis(path.circle(4, 17, 4),
lin(texter=rational(suffix="\pi"), **lintest)))
lintest = {"min": 0, "max": 2*math.pi, "divisor": math.pi/180, "parter": linparter(["30"])}
c.insert(pathaxis(path.circle(14, 17, 4),
lin(painter=regular(labeldirection=rotatetext.parallel), **lintest)))
c.writeEPSfile("test_axis", page_paperformat=document.paperformat.A4)
c.writePDFfile("test_axis", page_paperformat=document.paperformat.A4)
c.writeSVGfile("test_axis", page_paperformat=document.paperformat.A4)
| gpl-2.0 | 7,655,938,381,475,660,000 | 60.6 | 138 | 0.600649 | false |
mbauskar/frappe | frappe/data_migration/doctype/data_migration_connector/connectors/postgres.py | 3 | 1430 | from __future__ import unicode_literals
import frappe, psycopg2
from .base import BaseConnection
class PostGresConnection(BaseConnection):
def __init__(self, properties):
self.__dict__.update(properties)
self._connector = psycopg2.connect("host='{0}' dbname='{1}' user='{2}' password='{3}'".format(self.hostname,
self.database_name, self.username, self.password))
self.cursor = self._connector.cursor()
def get_objects(self, object_type, condition, selection):
if not condition:
condition = ''
else:
condition = ' WHERE ' + condition
self.cursor.execute('SELECT {0} FROM {1}{2}'.format(selection, object_type, condition))
raw_data = self.cursor.fetchall()
data = []
for r in raw_data:
row_dict = frappe._dict({})
for i, value in enumerate(r):
row_dict[self.cursor.description[i][0]] = value
data.append(row_dict)
return data
def get_join_objects(self, object_type, field, primary_key):
"""
field.formula 's first line will be list of tables that needs to be linked to fetch an item
The subsequent lines that follows will contain one to one mapping across tables keys
"""
condition = ""
key_mapping = field.formula.split('\n')
obj_type = key_mapping[0]
selection = field.source_fieldname
for d in key_mapping[1:]:
condition += d + ' AND '
condition += str(object_type) + ".id=" + str(primary_key)
return self.get_objects(obj_type, condition, selection)
| mit | 1,917,267,790,391,735,000 | 32.255814 | 110 | 0.690909 | false |
parheliamm/mosquitto-1.3.4 | test/lib/02-unsubscribe.py | 19 | 1712 | #!/usr/bin/env python
# Test whether a client sends a correct UNSUBSCRIBE packet.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("unsubscribe-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
unsubscribe_packet = mosq_test.gen_unsubscribe(mid, "unsubscribe/test")
unsuback_packet = mosq_test.gen_unsuback(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = subprocess.Popen(client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "unsubscribe", unsubscribe_packet):
conn.send(unsuback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| bsd-3-clause | -8,659,736,514,398,265,000 | 25.338462 | 129 | 0.691589 | false |
MrCrawdaddy/humans | humans/urls.py | 1 | 1330 | """humans URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.core.urlresolvers import reverse_lazy
urlpatterns = [
url(r'^accounts/login/$', auth_views.login, {'template_name': 'documentaries/login.html'}, name='login'),
url(r'^accounts/logout/$', auth_views.logout, {'next_page': reverse_lazy('documentaries:index')},
name='logout'),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^comments/', include('comments.urls')),
url(r'^documentaries/', include('documentaries.urls')),
url(r'profiles/', include('profiles.urls')),
url(r'^admin/', admin.site.urls),
]
| mit | -3,313,737,812,417,281,000 | 41.903226 | 109 | 0.693985 | false |
shashisp/junction | junction/urls.py | 2 | 3334 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Third Party Stuff
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView, TemplateView
from rest_framework import routers
from junction.schedule import views as schedule_views
from junction.conferences import views as conference_views
router = routers.DefaultRouter()
router.register('conferences', conference_views.ConferenceView)
router.register('venues', conference_views.VenueView)
router.register('rooms', conference_views.RoomView)
router.register('schedules', schedule_views.ScheduleView)
'''
Root url routering file.
You should put the url config in their respective app putting only a
reference to them here.
'''
urlpatterns = patterns(
'',
# Django Admin
url(r'^nimda/', include(admin.site.urls)),
# Third Party Stuff
url(r'^accounts/', include('allauth.urls')),
url('^markdown/', include('django_markdown.urls')),
# Proposals related
url(r'^(?P<conference_slug>[\w-]+)/proposals/', include('junction.proposals.urls')),
url(r'^(?P<conference_slug>[\w-]+)/dashboard/reviewers/',
'junction.proposals.dashboard.reviewer_comments_dashboard',
name='proposal-reviewers-dashboard'),
url(r'^(?P<conference_slug>[\w-]+)/dashboard/',
'junction.proposals.dashboard.proposals_dashboard', name='proposal-dashboard'),
url(r'^api/v1/', include(router.urls)),
# User Dashboard
url(r'^profiles/', include('junction.profiles.urls', namespace="profiles")),
# Schedule related
url(r'^(?P<conference_slug>[\w-]+)/schedule/',
include('junction.schedule.urls')),
# Static Pages. TODO: to be refactored
url(r'^speakers/$', TemplateView.as_view(template_name='static-content/speakers.html',), name='speakers-static'),
url(r'^schedule/$', TemplateView.as_view(template_name='static-content/schedule.html',), name='schedule-static'),
url(r'^venue/$', TemplateView.as_view(template_name='static-content/venue.html',), name='venue-static'),
url(r'^sponsors/$', TemplateView.as_view(template_name='static-content/sponsors.html',), name='sponsors-static'),
url(r'^blog/$', TemplateView.as_view(template_name='static-content/blog-archive.html',), name='blog-archive'),
url(r'^coc/$', TemplateView.as_view(template_name='static-content/coc.html',), name='coc-static'),
url(r'^faq/$', TemplateView.as_view(template_name='static-content/faq.html',), name='faq-static'),
# Proposals as conference home page. TODO: Needs to be enhanced
url(r'^(?P<conference_slug>[\w-]+)--/',
RedirectView.as_view(pattern_name="proposals-list"),
name='conference-detail'),
# add at the last for minor performance gain
url(r'^', include('junction.pages.urls', namespace='pages')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^400/$', 'django.views.defaults.bad_request'), # noqa
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
| mit | 140,312,243,866,703,200 | 40.675 | 117 | 0.695561 | false |
kslundberg/pants | src/python/pants/binaries/binary_util.py | 7 | 11585 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import posixpath
import subprocess
from contextlib import closing, contextmanager
import six.moves.urllib.error as urllib_error
import six.moves.urllib.request as urllib_request
from twitter.common.collections import OrderedSet
from pants.base.exceptions import TaskError
from pants.option.custom_types import dict_option, list_option
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_file
from pants.util.dirutil import chmod_plus_x, safe_delete, safe_open
from pants.util.osutil import get_os_id
_DEFAULT_PATH_BY_ID = {
('linux', 'x86_64'): ['linux', 'x86_64'],
('linux', 'amd64'): ['linux', 'x86_64'],
('linux', 'i386'): ['linux', 'i386'],
('linux', 'i686'): ['linux', 'i386'],
('darwin', '9'): ['mac', '10.5'],
('darwin', '10'): ['mac', '10.6'],
('darwin', '11'): ['mac', '10.7'],
('darwin', '12'): ['mac', '10.8'],
('darwin', '13'): ['mac', '10.9'],
('darwin', '14'): ['mac', '10.10'],
('darwin', '15'): ['mac', '10.11'],
}
logger = logging.getLogger(__name__)
class BinaryUtil(object):
"""Wraps utility methods for finding binary executables."""
class Factory(Subsystem):
options_scope = 'binaries'
@classmethod
def register_options(cls, register):
register('--baseurls', type=list_option, advanced=True,
default=['https://dl.bintray.com/pantsbuild/bin/build-support'],
help='List of urls from which binary tools are downloaded. Urls are searched in '
'order until the requested path is found.')
register('--fetch-timeout-secs', type=int, default=30, advanced=True,
help='Timeout in seconds for url reads when fetching binary tools from the '
'repos specified by --baseurls')
register("--path-by-id", type=dict_option, advanced=True,
help='Maps output of uname for a machine to a binary search path. e.g. '
'{ ("darwin", "15"): ["mac", "10.11"]), ("linux", "arm32"): ["linux", "arm32"] }')
@classmethod
def create(cls):
# NB: create is a class method to ~force binary fetch location to be global.
options = cls.global_instance().get_options()
return BinaryUtil(options.baseurls, options.fetch_timeout_secs, options.pants_bootstrapdir,
options.path_by_id)
class MissingMachineInfo(TaskError):
"""Indicates that pants was unable to map this machine's OS to a binary path prefix."""
pass
class BinaryNotFound(TaskError):
def __init__(self, binary, accumulated_errors):
super(BinaryUtil.BinaryNotFound, self).__init__(
'Failed to fetch binary {binary} from any source: ({sources})'
.format(binary=binary, sources=', '.join(accumulated_errors)))
class NoBaseUrlsError(TaskError):
"""Indicates that no urls were specified in pants.ini."""
pass
class MissingBinaryUtilOptionsError(Exception):
"""Internal error. --supportdir and --version must be registered in register_options()"""
pass
def _select_binary_base_path(self, supportdir, version, name, uname_func=None):
"""Calculate the base path.
Exposed for associated unit tests.
:param supportdir: the path used to make a path under --pants_bootstrapdir.
:param version: the version number of the tool used to make a path under --pants-bootstrapdir.
:param name: name of the binary to search for. (e.g 'protoc')
:param uname_func: method to use to emulate os.uname() in testing
:returns: Base path used to select the binary file.
"""
uname_func = uname_func or os.uname
sysname, _, release, _, machine = uname_func()
try:
os_id = get_os_id(uname_func=uname_func)
except KeyError:
os_id = None
if os_id is None:
raise self.MissingMachineInfo("Pants has no binaries for {}".format(sysname))
try:
middle_path = self._path_by_id[os_id]
except KeyError:
raise self.MissingMachineInfo(
"Update --binaries-path-by-id to find binaries for {sysname} {machine} {release}.".format(
sysname=sysname, release=release, machine=machine))
return os.path.join(supportdir, *(middle_path + [version, name]))
def __init__(self, baseurls, timeout_secs, bootstrapdir, path_by_id=None):
"""Creates a BinaryUtil with the given settings to define binary lookup behavior.
This constructor is primarily used for testing. Production code will usually initialize
an instance using the BinaryUtil.Factory.create() method.
:param baseurls: URL prefixes which represent repositories of binaries.
:type baseurls: list of string
:param int timeout_secs: Timeout in seconds for url reads.
:param string bootstrapdir: Directory to use for caching binaries. Uses this directory to
search for binaries in, or download binaries to if needed.
:param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool
directory naming
"""
self._baseurls = baseurls
self._timeout_secs = timeout_secs
self._pants_bootstrapdir = bootstrapdir
self._path_by_id = _DEFAULT_PATH_BY_ID.copy()
if path_by_id:
self._path_by_id.update(path_by_id)
@contextmanager
def _select_binary_stream(self, supportdir, version, name, url_opener=None):
"""Select a binary matching the current os and architecture.
:param string supportdir: The path the `name` binaries are stored under.
:param string version: The version number of the binary to select.
:param string name: The name of the binary to fetch.
:param url_opener: Optional argument used only for testing, to 'pretend' to open urls.
:returns: a 'stream' to download it from a support directory. The returned 'stream' is actually
a lambda function which returns the files binary contents.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
and name could be found for the current platform.
"""
if not self._baseurls:
raise self.NoBaseUrlsError(
'No urls are defined for the --pants-support-baseurls option.')
binary_path = self._select_binary_base_path(supportdir, version, name)
if url_opener is None:
url_opener = lambda u: closing(urllib_request.urlopen(u, timeout=self._timeout_secs))
downloaded_successfully = False
accumulated_errors = []
for baseurl in OrderedSet(self._baseurls): # Wrap in OrderedSet because duplicates are wasteful.
url = posixpath.join(baseurl, binary_path)
logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))
try:
with url_opener(url) as binary:
logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))
downloaded_successfully = True
yield lambda: binary.read()
break
except (IOError, urllib_error.HTTPError, urllib_error.URLError, ValueError) as e:
accumulated_errors.append('Failed to fetch binary from {url}: {error}'
.format(url=url, error=e))
if not downloaded_successfully:
raise self.BinaryNotFound((supportdir, version, name), accumulated_errors)
def select_binary(self, supportdir, version, name):
"""Selects a binary matching the current os and architecture.
:param string supportdir: The path the `name` binaries are stored under.
:param string version: The version number of the binary to select.
:param string name: The name of the binary to fetch.
:raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
and name could be found for the current platform.
"""
# TODO(John Sirois): finish doc of the path structure expected under base_path
binary_path = self._select_binary_base_path(supportdir, version, name)
bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir))
bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
if not os.path.exists(bootstrapped_binary_path):
downloadpath = bootstrapped_binary_path + '~'
try:
with self._select_binary_stream(supportdir, version, name) as stream:
with safe_open(downloadpath, 'wb') as bootstrapped_binary:
bootstrapped_binary.write(stream())
os.rename(downloadpath, bootstrapped_binary_path)
chmod_plus_x(bootstrapped_binary_path)
finally:
safe_delete(downloadpath)
logger.debug('Selected {binary} binary bootstrapped to: {path}'
.format(binary=name, path=bootstrapped_binary_path))
return bootstrapped_binary_path
@contextmanager
def safe_args(args,
options,
max_args=None,
argfile=None,
delimiter='\n',
quoter=None,
delete=True):
"""Yields args if there are less than a limit otherwise writes args to an argfile and yields an
argument list with one argument formed from the path of the argfile.
:param args: The args to work with.
:param OptionValueContainer options: scoped options object for this task
:param max_args: The maximum number of args to let though without writing an argfile. If not
specified then the maximum will be loaded from the --max-subprocess-args option.
:param argfile: The file to write args to when there are too many; defaults to a temporary file.
:param delimiter: The delimiter to insert between args written to the argfile, defaults to '\n'
:param quoter: A function that can take the argfile path and return a single argument value;
defaults to: <code>lambda f: '@' + f<code>
:param delete: If True deletes any arg files created upon exit from this context; defaults to
True.
"""
max_args = max_args or options.max_subprocess_args
if len(args) > max_args:
def create_argfile(f):
f.write(delimiter.join(args))
f.close()
return [quoter(f.name) if quoter else '@{}'.format(f.name)]
if argfile:
try:
with safe_open(argfile, 'w') as fp:
yield create_argfile(fp)
finally:
if delete and os.path.exists(argfile):
os.unlink(argfile)
else:
with temporary_file(cleanup=delete) as fp:
yield create_argfile(fp)
else:
yield args
def _mac_open(files):
subprocess.call(['open'] + list(files))
def _linux_open(files):
cmd = "xdg-open"
if not _cmd_exists(cmd):
raise TaskError("The program '{}' isn't in your PATH. Please install and re-run this "
"goal.".format(cmd))
for f in list(files):
subprocess.call([cmd, f])
# From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def _cmd_exists(cmd):
return subprocess.call(["/usr/bin/which", cmd], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
_OPENER_BY_OS = {
'darwin': _mac_open,
'linux': _linux_open
}
def ui_open(*files):
"""Attempts to open the given files using the preferred native viewer or editor."""
if files:
osname = os.uname()[0].lower()
if not osname in _OPENER_BY_OS:
print('Sorry, open currently not supported for ' + osname)
else:
_OPENER_BY_OS[osname](files)
| apache-2.0 | 4,552,434,797,696,347,000 | 40.523297 | 101 | 0.672249 | false |
AeonGames/AeonEngine | tools/blender/addons/io_images/export.py | 1 | 1485 | # Copyright (C) 2017,2019 Rodrigo Jose Hernandez Cordoba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import os
class IMG_OT_exporter(bpy.types.Operator):
'''Saves All Images in a specified directory'''
bl_idname = "export_images.img"
bl_label = "All Images"
directory: bpy.props.StringProperty(subtype='DIR_PATH')
@classmethod
def poll(cls, context):
return len(bpy.data.images) > 0
def execute(self, context):
if len(bpy.data.images) == 0:
return {'CANCELLED'}
for image in bpy.data.images:
filepath = image.filepath
image.filepath = self.directory + os.sep + os.path.basename(filepath)
print("Saving ",image.filepath)
image.save()
image.filepath = filepath
print("Done.")
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {"RUNNING_MODAL"}
| apache-2.0 | 3,983,112,824,774,501,400 | 32.75 | 81 | 0.66734 | false |
mjvakili/ccppabc | ccppabc/code/test_emcee.py | 1 | 2414 | import sys
import numpy as np
import emcee
from emcee.utils import MPIPool
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(yerr * yerr)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
def lnlike(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]
def lnprior(theta):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = 3, 100
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
#pool = MPIPool()
#if not pool.is_master():
# pool.wait()
# sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr)) , threads=15)
f = open("chain.dat", "w")
f.close()
for result in sampler.sample(pos, iterations=600, storechain=False):
#print result
position = result[0]
print position.shape
f = open("chain.dat", "a")
for k in range(position.shape[0]):
output_str = '\t'.join(position[k].astype('str')) + '\n'
f.write(output_str)
f.close()
sampler.run_mcmc(pos, 600)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
#pool.close()
#import corner
#fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
# truths=[m_true, b_true, np.log(f_true)])
#fig.savefig("/home/mj/public_html/triangle_final.png")
#
#samples2 = np.loadtxt("chain.dat")
#
#fig = corner.corner(samples2[100:], labels=["$m$", "$b$", "$\ln\,f$"],
# truths=[m_true, b_true, np.log(f_true)])
#
#fig.savefig("/home/mj/public_html/triangle_incremental.png")
| mit | 1,180,406,701,961,943,800 | 26.431818 | 88 | 0.597349 | false |
redhat-openstack/trove | trove/tests/unittests/api/test_versions.py | 3 | 8858 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock
from trove.tests.unittests import trove_testtools
from trove.versions import BaseVersion
from trove.versions import Version
from trove.versions import VersionDataView
from trove.versions import VERSIONS
from trove.versions import VersionsAPI
from trove.versions import VersionsController
from trove.versions import VersionsDataView
BASE_URL = 'http://localhost'
class VersionsControllerTest(trove_testtools.TestCase):
def setUp(self):
super(VersionsControllerTest, self).setUp()
self.controller = VersionsController()
self.assertIsNotNone(self.controller,
"VersionsController instance was None")
def test_index_json(self):
request = Mock()
result = self.controller.index(request)
self.assertIsNotNone(result,
'Result was None')
result._data = Mock()
result._data.data_for_json = \
lambda: {'status': 'CURRENT',
'updated': '2012-08-01T00:00:00Z',
'id': 'v1.0',
'links': [{'href': 'http://localhost/v1.0/',
'rel': 'self'}]}
# can be anything but xml
json_data = result.data("application/json")
self.assertIsNotNone(json_data,
'Result json_data was None')
self.assertEqual('v1.0', json_data['id'],
'Version id is incorrect')
self.assertEqual('CURRENT', json_data['status'],
'Version status is incorrect')
self.assertEqual('2012-08-01T00:00:00Z', json_data['updated'],
'Version updated value is incorrect')
def test_show_json(self):
request = Mock()
request.url_version = '1.0'
result = self.controller.show(request)
self.assertIsNotNone(result,
'Result was None')
json_data = result.data("application/json")
self.assertIsNotNone(json_data, "JSON data was None")
version = json_data.get('version', None)
self.assertIsNotNone(version, "Version was None")
self.assertEqual('CURRENT', version['status'],
"Version status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', version['updated'],
"Version updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', version['id'], "Version id was not 'v1.0'")
class BaseVersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(BaseVersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.base_version = BaseVersion(id, status, base_url, updated)
self.assertIsNotNone(self.base_version,
'BaseVersion instance was None')
def test_data(self):
data = self.base_version.data()
self.assertIsNotNone(data, 'Base Version data was None')
self.assertTrue(type(data) is dict,
"Base Version data is not a dict")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
def test_url(self):
url = self.base_version.url()
self.assertIsNotNone(url, 'Url was None')
self.assertEqual('http://localhost/v1.0/', url,
"Base Version url is incorrect")
class VersionTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
def test_url_no_trailing_slash(self):
url = self.version.url()
self.assertIsNotNone(url, 'Version url was None')
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
def test_url_with_trailing_slash(self):
self.version.base_url = 'http://localhost/'
url = self.version.url()
self.assertEqual(BASE_URL + '/', url,
'Base url value was incorrect')
class VersionDataViewTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionDataViewTestCase, self).setUp()
# get a version object first
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
# then create an instance of VersionDataView
self.version_data_view = VersionDataView(self.version)
self.assertIsNotNone(self.version_data_view,
'Version Data view instance was None')
def test_data_for_json(self):
json_data = self.version_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON version data is not a dict")
self.assertIsNotNone(json_data.get('version'),
"Dict json_data has no key 'version'")
data = json_data['version']
self.assertIsNotNone(data, "JSON data version was None")
self.assertEqual('CURRENT', data['status'],
"Data status was not 'CURRENT'")
self.assertEqual('2012-08-01T00:00:00Z', data['updated'],
"Data updated was not '2012-08-01T00:00:00Z'")
self.assertEqual('v1.0', data['id'],
"Data status was not 'v1.0'")
class VersionsDataViewTestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionsDataViewTestCase, self).setUp()
# get a version object, put it in a list
self.versions = []
id = VERSIONS['1.0']['id']
status = VERSIONS['1.0']['status']
base_url = BASE_URL
updated = VERSIONS['1.0']['updated']
self.version = Version(id, status, base_url, updated)
self.assertIsNotNone(self.version,
'Version instance was None')
self.versions.append(self.version)
# then create an instance of VersionsDataView
self.versions_data_view = VersionsDataView(self.versions)
self.assertIsNotNone(self.versions_data_view,
'Versions Data view instance was None')
def test_data_for_json(self):
json_data = self.versions_data_view.data_for_json()
self.assertIsNotNone(json_data, "JSON data was None")
self.assertTrue(type(json_data) is dict,
"JSON versions data is not a dict")
self.assertIsNotNone(json_data.get('versions', None),
"Dict json_data has no key 'versions'")
versions = json_data['versions']
self.assertIsNotNone(versions, "Versions was None")
self.assertEqual(1, len(versions), "Versions length != 1")
# explode the version object
versions_data = [v.data() for v in self.versions]
d1 = versions_data.pop()
d2 = versions.pop()
self.assertEqual(d1['id'], d2['id'],
"Version ids are not equal")
class VersionAPITestCase(trove_testtools.TestCase):
def setUp(self):
super(VersionAPITestCase, self).setUp()
def test_instance(self):
self.versions_api = VersionsAPI()
self.assertIsNotNone(self.versions_api,
"VersionsAPI instance was None")
| apache-2.0 | 4,118,092,789,275,633,700 | 37.181034 | 78 | 0.591556 | false |
Ryanglambert/pybrain | docs/tutorials/networks.py | 25 | 4239 | from __future__ import print_function
############################################################################
# PyBrain Tutorial "Networks, Modules, Connections"
#
# Author: Tom Schaul, [email protected]
############################################################################
__author__ = 'Tom Schaul, [email protected]'
from pybrain.structure import FeedForwardNetwork, LinearLayer, SigmoidLayer, FullConnection, RecurrentNetwork
from pybrain.tools.shortcuts import buildNetwork
""" This tutorial will attempt to guide you for using one of PyBrain's most basic structural elements:
Networks, and with them Modules and Connections.
Let us start with a simple example, building a multi-layer-perceptron (MLP).
First we make a new network object: """
n = FeedForwardNetwork()
""" Next, we're constructing the input, hidden and output layers. """
inLayer = LinearLayer(2)
hiddenLayer = SigmoidLayer(3)
outLayer = LinearLayer(1)
""" (Note that we could also have used a hidden layer of type TanhLayer, LinearLayer, etc.)
Let's add them to the network: """
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
""" We still need to explicitly determine how they should be connected. For this we use the most
common connection type, which produces a full connectivity between two layers (or Modules, in general):
the 'FullConnection'. """
in2hidden = FullConnection(inLayer, hiddenLayer)
hidden2out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in2hidden)
n.addConnection(hidden2out)
""" All the elements are in place now, so we can do the final step that makes our MLP usable,
which is to call the 'sortModules()' method. """
n.sortModules()
""" Let's see what we did. """
print(n)
""" One way of using the network is to call its 'activate()' method with an input to be transformed. """
print(n.activate([1, 2]))
""" We can access the trainable parameters (weights) of a connection directly, or read
all weights of the network at once. """
print(hidden2out.params)
print(n.params)
""" The former are the last slice of the latter. """
print(n.params[-3:] == hidden2out.params)
""" Ok, after having covered the basics, let's move on to some additional concepts.
First of all, we encourage you to name all modules, or connections you create, because that gives you
more readable printouts, and a very concise way of accessing them.
We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
n2.addInputModule(LinearLayer(2, name='in'))
n2.addModule(SigmoidLayer(3, name='h'))
n2.addOutputModule(LinearLayer(1, name='out'))
n2.addConnection(FullConnection(n2['in'], n2['h'], name='c1'))
n2.addConnection(FullConnection(n2['h'], n2['out'], name='c2'))
n2.sortModules()
""" Printouts look more concise and readable: """
print(n2)
""" There is an even quicker way to build networks though, as long as their structure is nothing
more fancy than a stack of fully connected layers: """
n3 = buildNetwork(2, 3, 1, bias=False)
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction.
We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """
n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))
""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""
n2.sortModules()
print(n2)
""" As the network is now recurrent, successive activations produce different outputs: """
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))
""" The 'reset()' method re-initializes the network, and with it sets the recurrent
activations to zero, so now we get the same results: """
n2.reset()
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]), end=' ')
print(n2.activate([1, 2]))
""" This is already a good coverage of the basics, but if you're an advanced user
you might want to find out about the possibilities of nesting networks within
others, using weight-sharing, and more exotic types of networks, connections
and modules... but that goes beyond the scope of this tutorial.
"""
| bsd-3-clause | -4,781,165,897,439,055,000 | 33.745902 | 109 | 0.713612 | false |
verdverm/pypge | experiments/01_baseline/theann.py | 1 | 3775 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
import data as DATA
import tensorflow.python.platform
import tensorflow as tf
def run_explicit_loop():
for problem in DATA.explicit_problems:
prob,target = problem.split(";")
df = DATA.read_datafile("explicit", prob)
cols = [col for col in df.columns if not (col == target or col == "T" or (len(col)>2 and col[:2] == "D_"))]
ins = df[cols].as_matrix()
outs = df[target].values
print("\n\n", prob, target, ins.shape, outs.shape, "\n=======================\n")
ann_model(ins,outs, 24,24)
for problem in DATA.diffeq_problems:
prob,target = problem.split(";")
df = DATA.read_datafile("diffeq", prob)
cols = [col for col in df.columns if not (col == target or col == "T" or (len(col)>2 and col[:2] == "D_"))]
ins = df[cols].as_matrix()
outs = df[target].values
print("\n\n", prob, target, ins.shape, outs.shape, "\n=======================\n")
ann_model(ins,outs, 24,24)
def print_model(name, regr, ins, outs):
print(" ",name,"\n--------------")
# The mean square error
yhat = regr.predict(ins)
print(" Residual: %g" % np.mean((yhat - outs) ** 2))
# Explained variance score: 1 is perfect prediction
r2 = r2_score(outs, yhat)
print(' R2: %g' % r2)
print()
# Create model
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1'])) #Hidden layer with RELU activation
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2'])) #Hidden layer with RELU activation
return tf.matmul(layer_2, _weights['out']) + _biases['out']
# layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, _weights['h3']), _biases['b3'])) #Hidden layer with RELU activation
# return tf.matmul(layer_3, _weights['out']) + _biases['out']
def ann_model(ins, outs, n_hidden_1, n_hidden_2):
outs = outs.reshape(len(outs),1)
n_input = len(ins[0])
n_samples = len(outs)
# tf Graph input
x = tf.placeholder("float", [n_samples, n_input])
y = tf.placeholder("float", [n_samples, 1])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1)),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([n_hidden_2, 1], stddev=0.1))
# 'h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3], stddev=0.1)),
# 'out': tf.Variable(tf.truncated_normal([n_hidden_3, 1], stddev=0.1))
}
biases = {
'b1': tf.Variable(tf.constant(0.1, shape=[n_hidden_1])),
'b2': tf.Variable(tf.constant(0.1, shape=[n_hidden_2])),
# 'b3': tf.Variable(tf.constant(0.1, shape=[n_hidden_3])),
'out': tf.Variable(tf.constant(0.1, shape=[1]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
## use mean sqr error for cost function
cost = (tf.pow(y-pred, 2))
accuracy = tf.reduce_mean(cost)
# construct an optimizer to minimize cost and fit the data
train_op = tf.train.AdamOptimizer(0.01).minimize(accuracy)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
NUM_EPOCH = 2501
DISPLAY = 100
a_sum = 0
for epoch in range(NUM_EPOCH):
sess.run(train_op, feed_dict={x: ins, y: outs})
# c = sess.run(cost, feed_dict={x: ins, y: outs})
if epoch % DISPLAY == 0:
a = sess.run(accuracy, feed_dict={x: ins, y: outs})
print(epoch, a)
a = sess.run(accuracy, feed_dict={x: ins, y: outs})
print("Final: ", a)
y_hat = sess.run(pred, feed_dict={x: ins, y: outs})
r2 = r2_score(outs, y_hat)
print("R2: ", r2)
run_explicit_loop()
| mit | 5,630,535,016,137,693,000 | 28.960317 | 120 | 0.62702 | false |
freeflightsim/ffs-app-engine | ffs-cal.appspot.com/atom/core.py | 18 | 20432 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
if isinstance(attribute_def, (list, tuple)):
attribute_def = attribute_def[0]
member = getattr(self, attribute_def)
# TODO: ensure this hasn't broken existing behavior.
#member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None):
"""Converts this object to XML."""
return ElementTree.tostring(self._to_tree(version, encoding))
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
return qname[qname.find('}')+1:]
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname[0] = self._get_tag(1)
else:
if namespace:
self._qname = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname = self._get_tag(1)
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| gpl-2.0 | 1,320,230,897,144,665,300 | 37.262172 | 79 | 0.652702 | false |
xuxiao19910803/edx | lms/djangoapps/ccx/views.py | 9 | 19058 | """
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from contextlib import contextmanager
from copy import deepcopy
from cStringIO import StringIO
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole
from instructor.offline_gradecalc import student_grades
from instructor.views.api import _split_input_list
from instructor.views.tools import get_student_from_identifier
from .models import CustomCourseForEdX, CcxMembership
from .overrides import (
clear_override_for_ccx,
get_override_for_ccx,
override_field_for_ccx,
)
from .utils import (
enroll_email,
unenroll_email,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_for_coach(course, request.user)
if coach_ccx is None or coach_ccx.id != ccx.id:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, ccx.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CcxMembership.objects.filter(ccx=ccx)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id', course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id) # pylint: disable=no-member
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
clear_override_for_ccx(ccx, block, 'start')
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
clear_override_for_ccx(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
if children:
override_fields(block, children, graded, earliest)
return earliest
graded = {}
earliest = override_fields(course, json.loads(request.body), graded)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section['min_count']:
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
ccxs = CustomCourseForEdX.objects.filter(
course_id=course.id,
coach=coach
)
# XXX: In the future, it would be nice to support more than one ccx per
# coach per course. This is a place where that might happen.
if ccxs.exists():
return ccxs[0]
return None
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
"""
for child in node.get_children():
start = get_override_for_ccx(ccx, child, 'start', None)
if start:
start = str(start)[:-9]
due = get_override_for_ccx(ccx, child, 'due', None)
if due:
due = str(due)[:-9]
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, mimetype='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = True if 'auto-enroll' in request.POST else False
email_students = True if 'email-students' in request.POST else False
for identifier in identifiers:
user = None
email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
if action == 'Enroll':
enroll_email(
ccx,
email,
auto_enroll=auto_enroll,
email_students=email_students
)
if action == "Unenroll":
unenroll_email(ccx, email, email_students=email_students)
except ValidationError:
log.info('Invalid user name or email when trying to invite students: %s', email)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
user = email = None
try:
user = get_student_from_identifier(student_id)
except User.DoesNotExist:
email = student_id
else:
email = user.email
try:
validate_email(email)
if action == 'add':
# by decree, no emails sent to students added this way
# by decree, any students added this way are auto_enrolled
enroll_email(ccx, email, auto_enroll=True, email_students=False)
elif action == 'revoke':
unenroll_email(ccx, email, email_students=False)
except ValidationError:
log.info('Invalid user name or email when trying to enroll student: %s', email)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
@contextmanager
def ccx_course(ccx_locator):
"""Create a context in which the course identified by course_locator exists
"""
course = get_course_by_id(ccx_locator)
yield course
def prep_course_for_grading(course, request):
"""Set up course module for overrides to function properly"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
ccxmembership__ccx=ccx,
ccxmembership__active=1
).order_by('username').select_related("profile")
student_info = [
{
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': student_grades(student, request, course),
'realname': student.profile.name,
}
for student in enrolled_students
]
return render_to_response('courseware/gradebook.html', {
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
ccxmembership__ccx=ccx,
ccxmembership__active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
return HttpResponse(buf.getvalue(), content_type='text/plain')
| agpl-3.0 | 5,619,142,379,369,631,000 | 32.376532 | 92 | 0.61082 | false |
alchemistry/alchemlyb | src/alchemlyb/tests/test_ti_estimators.py | 1 | 6592 | """Tests for all TI-based estimators in ``alchemlyb``.
"""
import pytest
import pandas as pd
import alchemlyb
from alchemlyb.parsing import gmx, amber, gomc
from alchemlyb.estimators import TI
import alchemtest.gmx
import alchemtest.amber
import alchemtest.gomc
from alchemtest.gmx import load_benzene
from alchemlyb.parsing.gmx import extract_dHdl
def gmx_benzene_coul_dHdl():
dataset = alchemtest.gmx.load_benzene()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['Coulomb']])
return dHdl
def gmx_benzene_vdw_dHdl():
dataset = alchemtest.gmx.load_benzene()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['VDW']])
return dHdl
def gmx_expanded_ensemble_case_1_dHdl():
dataset = alchemtest.gmx.load_expanded_ensemble_case_1()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def gmx_expanded_ensemble_case_2_dHdl():
dataset = alchemtest.gmx.load_expanded_ensemble_case_2()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def gmx_expanded_ensemble_case_3_dHdl():
dataset = alchemtest.gmx.load_expanded_ensemble_case_3()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def gmx_water_particle_with_total_energy_dHdl():
dataset = alchemtest.gmx.load_water_particle_with_total_energy()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def gmx_water_particle_with_potential_energy_dHdl():
dataset = alchemtest.gmx.load_water_particle_with_potential_energy()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def gmx_water_particle_without_energy_dHdl():
dataset = alchemtest.gmx.load_water_particle_without_energy()
dHdl = alchemlyb.concat([gmx.extract_dHdl(filename, T=300)
for filename in dataset['data']['AllStates']])
return dHdl
def amber_simplesolvated_charge_dHdl():
dataset = alchemtest.amber.load_simplesolvated()
dHdl = alchemlyb.concat([amber.extract_dHdl(filename, T=300)
for filename in dataset['data']['charge']])
return dHdl
def amber_simplesolvated_vdw_dHdl():
dataset = alchemtest.amber.load_simplesolvated()
dHdl = alchemlyb.concat([amber.extract_dHdl(filename, T=300)
for filename in dataset['data']['vdw']])
return dHdl
def gomc_benzene_dHdl():
dataset = alchemtest.gomc.load_benzene()
dHdl = alchemlyb.concat([gomc.extract_dHdl(filename, T=298)
for filename in dataset['data']])
return dHdl
class TIestimatorMixin:
def test_get_delta_f(self, X_delta_f):
dHdl, E, dE = X_delta_f
est = self.cls().fit(dHdl)
delta_f = est.delta_f_.iloc[0, -1]
d_delta_f = est.d_delta_f_.iloc[0, -1]
assert E == pytest.approx(delta_f, rel=1e-3)
assert dE == pytest.approx(d_delta_f, rel=1e-3)
class TestTI(TIestimatorMixin):
"""Tests for TI.
"""
cls = TI
T = 300
kT_amber = amber.k_b * T
@pytest.fixture(scope="class",
params = [(gmx_benzene_coul_dHdl, 3.089, 0.02157),
(gmx_benzene_vdw_dHdl, -3.056, 0.04863),
(gmx_expanded_ensemble_case_1_dHdl, 76.220, 0.15568),
(gmx_expanded_ensemble_case_2_dHdl, 76.247, 0.15889),
(gmx_expanded_ensemble_case_3_dHdl, 76.387, 0.12532),
(gmx_water_particle_with_total_energy_dHdl, -11.696, 0.091775),
(gmx_water_particle_with_potential_energy_dHdl, -11.751, 0.091149),
(gmx_water_particle_without_energy_dHdl, -11.687, 0.091604),
(amber_simplesolvated_charge_dHdl, -60.114/kT_amber, 0.08186/kT_amber),
(amber_simplesolvated_vdw_dHdl, 3.824/kT_amber, 0.13254/kT_amber),
])
def X_delta_f(self, request):
get_dHdl, E, dE = request.param
return get_dHdl(), E, dE
def test_TI_separate_dhdl_multiple_column():
dHdl = gomc_benzene_dHdl()
estimator = TI().fit(dHdl)
assert all([isinstance(dhdl, pd.Series) for dhdl in estimator.separate_dhdl()])
assert sorted([len(dhdl) for dhdl in estimator.separate_dhdl()]) == [8, 16]
def test_TI_separate_dhdl_single_column():
dHdl = gmx_benzene_coul_dHdl()
estimator = TI().fit(dHdl)
assert all([isinstance(dhdl, pd.Series) for dhdl in estimator.separate_dhdl()])
assert [len(dhdl) for dhdl in estimator.separate_dhdl()] == [5, ]
def test_TI_separate_dhdl_no_pertubed():
'''The test for the case where two lambda are there and one is not pertubed'''
dHdl = gmx_benzene_coul_dHdl()
dHdl.insert(1, 'bound-lambda', [1.0, ] * len(dHdl))
dHdl.insert(1, 'bound', [1.0, ] * len(dHdl))
dHdl.set_index('bound-lambda', append=True, inplace=True)
estimator = TI().fit(dHdl)
assert all([isinstance(dhdl, pd.Series) for dhdl in estimator.separate_dhdl()])
assert [len(dhdl) for dhdl in estimator.separate_dhdl()] == [5, ]
class Test_Units():
'''Test the units.'''
@staticmethod
@pytest.fixture(scope='class')
def dhdl():
bz = load_benzene().data
dHdl_coul = alchemlyb.concat(
[extract_dHdl(xvg, T=300) for xvg in bz['Coulomb']])
dHdl_coul.attrs = extract_dHdl(load_benzene().data['Coulomb'][0], T=300).attrs
return dHdl_coul
def test_ti(self, dhdl):
ti = TI().fit(dhdl)
assert ti.delta_f_.attrs['temperature'] == 300
assert ti.delta_f_.attrs['energy_unit'] == 'kT'
assert ti.d_delta_f_.attrs['temperature'] == 300
assert ti.d_delta_f_.attrs['energy_unit'] == 'kT'
assert ti.dhdl.attrs['temperature'] == 300
assert ti.dhdl.attrs['energy_unit'] == 'kT'
def test_ti_separate_dhdl(self, dhdl):
ti = TI().fit(dhdl)
dhdl_list = ti.separate_dhdl()
for dhdl in dhdl_list:
assert dhdl.attrs['temperature'] == 300
assert dhdl.attrs['energy_unit'] == 'kT'
| bsd-3-clause | 2,148,707,599,273,751,000 | 33.694737 | 101 | 0.613774 | false |
3bot/3bot | threebot/forms.py | 1 | 14674 | from django import forms
from django.forms.models import modelformset_factory
from django.utils.safestring import mark_safe
from organizations.models import Organization
from organizations.utils import create_organization
from threebot.utils import get_possible_parameters, get_possible_worker, get_curr_org
from threebot.utils import get_preset_param, get_preset_worker, get_possible_owners, get_possible_lists, get_preset_list
from threebot.models import Worker
from threebot.models import Workflow
from threebot.models import Task
from threebot.models import UserParameter, OrganizationParameter, ParameterList
from threebot.utils import order_workflow_tasks
class UserParameterCreateForm(forms.ModelForm):
next = forms.CharField(required=False, widget=forms.HiddenInput())
class Meta:
model = UserParameter
fields = ['data_type', 'name', 'value', 'owner', ]
widgets = {
'data_type': forms.Select(attrs={'class': 'form-control', }),
'name': forms.TextInput(attrs={'class': 'form-control', }),
'value': forms.TextInput(attrs={'class': 'form-control', }),
'owner': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(UserParameterCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].initial = user
class UserParameterChangeForm(UserParameterCreateForm):
class Meta(UserParameterCreateForm.Meta):
exclude = ('owner',)
class OrganizationParameterCreateForm(forms.ModelForm):
class Meta:
model = OrganizationParameter
fields = ['data_type', 'name', 'value', 'owner', ]
widgets = {
'data_type': forms.Select(attrs={'class': 'form-control', }),
'name': forms.TextInput(attrs={'class': 'form-control', }),
'value': forms.TextInput(attrs={'class': 'form-control', }),
'owner': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
org = kwargs.pop('org')
super(OrganizationParameterCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].initial = org
class OrganizationParameterChangeForm(OrganizationParameterCreateForm):
class Meta(OrganizationParameterCreateForm.Meta):
exclude = ('owner',)
def make_organization_parameter_formset(org, extra=3):
"""
This is a workaround for passing custom parameters (the parameter owner in our case)
to a ModelFormset and is based on this stackoverflow answer:
http://stackoverflow.com/a/1376616
"""
class _OrganizationParameterCreateForm(forms.ModelForm):
remove_from_list = forms.BooleanField(required=False)
class Meta:
model = OrganizationParameter
fields = ['data_type', 'name', 'value', 'owner', ]
widgets = {
'data_type': forms.Select(attrs={'class': 'form-control', }),
'name': forms.TextInput(attrs={'class': 'form-control', }),
'value': forms.TextInput(attrs={'class': 'form-control', }),
'owner': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
# self.org = org
super(_OrganizationParameterCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].initial = org
return modelformset_factory(OrganizationParameter, form=_OrganizationParameterCreateForm, extra=extra, can_delete=True)
def make_user_parameter_formset(user, extra=3):
"""
This is a workaround for passing custom parameters (the parameter owner in our case)
to a ModelFormset and is based on this stackoverflow answer:
http://stackoverflow.com/a/1376616
"""
class _UserParameterCreateForm(forms.ModelForm):
class Meta:
model = UserParameter
fields = ['data_type', 'name', 'value', 'owner', ]
widgets = {
'data_type': forms.Select(attrs={'class': 'form-control', }),
'name': forms.TextInput(attrs={'class': 'form-control', }),
'value': forms.TextInput(attrs={'class': 'form-control', }),
'owner': forms.HiddenInput(),
}
def __init__(self, *args, **kwargs):
# user = kwargs.pop('user')
super(_UserParameterCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].initial = user
return modelformset_factory(UserParameter, form=_UserParameterCreateForm, extra=extra, can_delete=True)
class ParameterListSelectForm(forms.Form):
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
workflow = kwargs.pop('workflow')
self.workflow = workflow
super(ParameterListSelectForm, self).__init__(*args, **kwargs)
possible_lists = [('', '--Choose a Parameter List--')]
possible_lists += get_possible_lists(request, workflow)
preset_list_id = get_preset_list(request, workflow, id=True)
self.fields['parameter_list'] = forms.ChoiceField(
label="Parameter List",
choices=possible_lists,
initial=preset_list_id,
widget=forms.Select(attrs={'class': 'form-control', }),
)
self.fields['parameter_list'].empty_label = None
def clean_parameter_list(self, *args, **kwargs):
list_id = self.cleaned_data['parameter_list']
parameter_list = ParameterList.objects.get(id=list_id)
workflow_tasks = order_workflow_tasks(self.workflow)
# make a list of ::all_required_inputs, with name and data type
all_required_inputs = []
for wf_task in workflow_tasks:
for name, data_type in wf_task.task.required_inputs.iteritems():
if name not in all_required_inputs:
all_required_inputs.append((name, data_type))
# make a list of ::all_parameters from the list, with name and data type
all_parameters = []
for parameter in parameter_list.parameters.all():
all_parameters.append((parameter.name, parameter.data_type))
# if not every element of ::all_parameters is also in ::all_required_inputs
# the list is not valid for this workflow, raise an form validation error
if not False not in [e in all_parameters for e in all_required_inputs]:
error_message = "Not all required inputs found in ParameterList."
error_message += "<br><br><strong>Required inputs by the Tasks are</strong>:"
for param in all_required_inputs:
error_message += "<br>" + str(param[0]) + ":" + str(param[1])
error_message += "<br><br><strong>List contains</strong>:"
for param in all_parameters:
error_message += "<br>" + str(param[0]) + ":" + str(param[1])
raise forms.ValidationError(mark_safe(error_message))
return list_id
class WorkerSelectForm(forms.Form):
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
workflow = kwargs.pop('workflow')
super(WorkerSelectForm, self).__init__(*args, **kwargs)
possible_workers = get_possible_worker(request, as_list=True)
preset_worker = get_preset_worker(request, workflow, flat=True)
self.fields['worker'] = forms.MultipleChoiceField(
required=True,
label="Worker",
choices=possible_workers,
initial=preset_worker,
widget=forms.SelectMultiple(attrs={'class': 'form-control', }),
)
self.fields['worker'].empty_label = None
# workaround for displaying a message to the user
# if no worker is acessible while initializing the form
if len(possible_workers) <= 0:
self.cleaned_data = {}
msg = "No Worker found. Please configure a Worker first."
self.add_error('worker', msg)
def clean(self):
cleaned_data = super(WorkerSelectForm, self).clean()
worker_ids = cleaned_data.get('worker', [])
workers = Worker.objects.filter(id__in=worker_ids)
for worker in workers:
if not worker.is_accessible:
raise forms.ValidationError("worker not accessible")
# Always return the full collection of cleaned data.
# This has changed in django 1.7 but raises errors in django <1.7
# We want to support both, so we return cleanded_data
return cleaned_data
class WorkerForm(forms.ModelForm):
"""Base Worker Form"""
class Meta:
model = Worker
fields = ['title', 'ip', 'port', 'muted', 'secret_key', 'pre_task', 'post_task', ]
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control', }),
'ip': forms.TextInput(attrs={'class': 'form-control', }),
'addr': forms.TextInput(attrs={'class': 'form-control', 'rows': '5', }),
'port': forms.TextInput(attrs={'class': 'form-control', }),
'secret_key': forms.TextInput(attrs={'class': 'form-control', }),
'pre_task': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
'post_task': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
}
def __init__(self, *args, **kwargs):
super(WorkerForm, self).__init__(*args, **kwargs)
class WorkerCreateForm(WorkerForm):
"""Form to create a Worker"""
class Meta(WorkerForm.Meta):
fields = ['owner', ] + WorkerForm.Meta.fields
exclude = ['muted']
WorkerForm.Meta.widgets['owner'] = forms.Select(attrs={'class': 'form-control'})
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
super(WorkerCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].queryset = get_possible_owners(request)
self.fields['owner'].initial = get_curr_org(request)
class WorkerChangeForm(WorkerForm):
"""Form to edit a Worker"""
pass
class TaskForm(forms.ModelForm):
"""Base Task Form"""
class Meta:
model = Task
fields = ['title', 'desc', 'template', 'changelog', 'is_readonly', ]
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control', }),
'desc': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
'template': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', 'style': 'font-family:monospace;'}),
'changelog': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
'is_readonly': forms.CheckboxInput(attrs={}),
}
def __init__(self, *args, **kwargs):
super(TaskForm, self).__init__(*args, **kwargs)
class TaskCreateForm(TaskForm):
"""Form to create a Task"""
class Meta(TaskForm.Meta):
fields = ['owner', ] + TaskForm.Meta.fields
TaskForm.Meta.widgets['owner'] = forms.Select(attrs={'class': 'form-control'})
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
super(TaskCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].queryset = get_possible_owners(request)
self.fields['owner'].initial = get_curr_org(request)
class TaskChangeForm(TaskForm):
"""Form to edit a Task"""
pass
class TaskImportForm(forms.Form):
"""Form to import a Task"""
task_json = forms.FileField()
class WorkflowForm(forms.ModelForm):
"""Base Workflow Form"""
class Meta:
model = Workflow
fields = ['title', 'desc', 'pre_task', 'post_task', ]
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control', }),
'desc': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
'pre_task': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
'post_task': forms.Textarea(attrs={'class': 'form-control', 'rows': '5', }),
}
def __init__(self, *args, **kwargs):
super(WorkflowForm, self).__init__(*args, **kwargs)
class WorkflowCreateForm(WorkflowForm):
"""Form to create a Workflow"""
class Meta(WorkflowForm.Meta):
fields = ['owner', ] + WorkflowForm.Meta.fields
WorkflowForm.Meta.widgets['owner'] = forms.Select(attrs={'class': 'form-control'})
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
super(WorkflowCreateForm, self).__init__(*args, **kwargs)
self.fields['owner'].queryset = get_possible_owners(request)
self.fields['owner'].initial = get_curr_org(request)
class WorkflowChangeForm(WorkflowForm):
"""Form to edit a Workflow"""
pass
class WorkflowReorderForm(forms.Form):
workflow_id = forms.CharField(max_length=30, widget=forms.HiddenInput())
order = forms.CharField(widget=forms.HiddenInput())
class TaskParameterForm(forms.Form):
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
extra = kwargs.pop('extra')
workflow_task = kwargs.pop('workflow_task')
super(TaskParameterForm, self).__init__(*args, **kwargs)
for name, data_type in extra.iteritems():
possible_parameters = get_possible_parameters(request, workflow_task, data_type)
preset_parameter = get_preset_param(request, workflow_task, name, data_type)
self.fields['wt_task_%i.%s.%s' % (workflow_task.id, data_type, name)] = forms.ChoiceField(
label="%s (%s)" % (name, data_type),
choices=possible_parameters,
initial=preset_parameter,
widget=forms.Select(attrs={'class': 'form-control', }),
)
# each param gets a hidden field for saving prompted data
self.fields['prompt_wt_task_%i.%s.%s' % (workflow_task.id, data_type, name)] = forms.CharField(widget=forms.HiddenInput(), required=False, )
class OrganizationCreateForm(forms.ModelForm):
"""
Form class for creating a new organization, complete with new owner, including a
User instance, OrganizationUser instance, and OrganizationOwner instance.
"""
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
super(OrganizationCreateForm, self).__init__(*args, **kwargs)
class Meta:
model = Organization
exclude = ('users', 'is_active',)
def save(self, **kwargs):
"""
Create the organization, then get the user, then make the owner.
"""
user = self.request.user
return create_organization(user, self.cleaned_data['name'], self.cleaned_data['slug'], is_active=True)
| bsd-3-clause | -5,820,275,743,983,912,000 | 39.535912 | 152 | 0.612853 | false |
Intel-tensorflow/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic_v1_no_variable_lifting.py | 6 | 2020 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/basic_v1_no_variable_lifting | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-NOT: tf_saved_model.global_tensor
def Test():
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.compat.v1.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r)
return {
'key': (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function'))
}, None, None
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test, lift_variables=False)
| apache-2.0 | 6,024,694,492,854,849,000 | 34.438596 | 80 | 0.683663 | false |
krafczyk/spack | var/spack/repos/builtin/packages/icet/package.py | 2 | 2017 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Icet(CMakePackage):
"""The Image Composition Engine for Tiles (IceT) is a high-performance
sort-last parallel rendering library."""
homepage = "http://icet.sandia.gov"
url = "https://gitlab.kitware.com/api/v4/projects/icet%2Ficet/repository/archive.tar.bz2?sha=IceT-2.1.1"
git = "https://gitlab.kitware.com/icet/icet.git"
version('develop', branch='master')
version('2.1.1', '4f971c51105a64937460d482adca2a6c')
depends_on('mpi')
def cmake_args(self):
return ['-DICET_USE_OPENGL:BOOL=OFF']
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Work-around for ill-placed CMake modules"""
spack_env.prepend_path('CMAKE_PREFIX_PATH', self.prefix.lib)
| lgpl-2.1 | 2,029,701,255,037,552,600 | 42.847826 | 113 | 0.672286 | false |
joshrabinowitz/bitcoin | test/functional/p2p_timeouts.py | 3 | 2305 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various net timeouts.
- Create three bitcoind nodes:
no_verack_node - we never send a verack in response to their version
no_version_node - we never send a version (only a ping)
no_send_node - we never send any P2P message.
- Start all three nodes
- Wait 1 second
- Assert that we're connected
- Send a ping to no_verack_node and no_version_node
- Wait 30 seconds
- Assert that we're still connected
- Send a ping to no_verack_node and no_version_node
- Wait 31 seconds
- Assert that we're no longer connected (timeout to receive version/verack is 60 seconds)
"""
from time import sleep
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TestP2PConn(P2PInterface):
def on_version(self, message):
# Don't send a verack in response
pass
class TimeoutsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
# Setup the p2p connections
no_verack_node = self.nodes[0].add_p2p_connection(TestP2PConn())
no_version_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)
no_send_node = self.nodes[0].add_p2p_connection(TestP2PConn(), send_version=False)
sleep(1)
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(30)
assert "version" in no_verack_node.last_message
assert no_verack_node.is_connected
assert no_version_node.is_connected
assert no_send_node.is_connected
no_verack_node.send_message(msg_ping())
no_version_node.send_message(msg_ping())
sleep(31)
assert not no_verack_node.is_connected
assert not no_version_node.is_connected
assert not no_send_node.is_connected
if __name__ == '__main__':
TimeoutsTest().main()
| mit | 821,442,777,463,884,500 | 30.575342 | 93 | 0.689805 | false |
inkerra/cinder | cinder/openstack/common/processutils.py | 2 | 9178 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type proces_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
| apache-2.0 | 5,934,840,926,830,299,000 | 36.308943 | 79 | 0.574853 | false |
gunnarleffler/instapost | tdg_or_wa_avgs/cwms_read.py | 1 | 8073 | # -*- coding: utf-8 -*-
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
def reindex(df, start_date, end_date, freq):
date = pd.date_range(start = datetime(*start_date), end = datetime(*end_date), freq = freq)
date = [pd.Timestamp(x) for x in date]
df = df.reindex(date)
df.index.rename('date', inplace = True)
return df
def get_frequency(index):
"""
Args:
index: a pd.core.indexes.datetimes.DatetimeIndex from a timeseries
Returns:
freq: a string value of either a daily, hourly, minutely, or secondly
Offset Alias with the appropriate multiple.
This is not very robust, and returns False if it is not able to
easily determine the frequency
"""
seconds = index.to_series().diff().median().total_seconds()
minutes = seconds/60
hours = minutes/60
days = hours/24
if days>=1 and days%int(days) == 0:
freq = str(int(days))+'D'
elif hours>=1 and hours%int(hours) == 0:
freq = str(int(hours))+'H'
elif minutes>=1 and minutes%int(minutes) == 0:
freq = str(int(minutes))+'min'
elif seconds>=1 and seconds%int(seconds) == 0:
freq = str(int(seconds))+'S'
else:
freq = False
return freq
def time_window_url(paths, public=True, lookback = 7, start_date = False, end_date = False, timezone = 'PST'):
"""
helper function for cwms_read
Arguments:
path -- cwms data path,
public -- boolean,
start_date -- date integer tuple format (YYYY, m, d)
end_date -- date integer tuple format (YYYY, m, d)
timezone -- optional keyword argument if time zone is specified.
Defaults to 'PST' if nothing set
Returns:
url -- url string of CWMS data webservice for the specified
data path and time window
"""
if isinstance(paths, list):
path = '%22%2C%22'.join(paths)
else: path = paths
if public:
url = r'http://pweb.crohms.org/dd/common/web_service/webexec/getjson?timezone=TIMEZONE_&query=%5B%22PATH%22%5D&'
else:
url = r'http://nwp-wmlocal2.nwp.usace.army.mil/common/web_service/webexec/getjson?timezone=TIMEZONE_&query=%5B%22PATH%22%5D&'
url = url.replace('PATH', path).replace('TIMEZONE_', timezone)
if lookback:
time = 'backward=' + str(lookback) + 'd'
url = url + time
else:
url = url + 'startdate=START_MONTH%2FSTART_DAY%2FSTART_YEAR+00%3A00&enddate=END_MONTH%2FEND_DAY%2FEND_YEAR+00%3A00'
sy,sm,sd = start_date
start_date = datetime(sy,sm,sd)
ey,em,ed = end_date
end_date = datetime(ey,em,ed)
url = url.replace('START_MONTH', str(start_date.month)).replace('START_DAY', str(start_date.day)).replace('START_YEAR', str(start_date.year))
url = url.replace('END_MONTH', str(end_date.month)).replace('END_DAY', str(end_date.day)).replace('END_YEAR', str(end_date.year))
return url
def get_cwms(path, public = True, fill = True, **kwargs):
"""
A function to parse CWMS json data from webservice into a pandas dataframe
Positional Arguments:
paths -- single string or list of string of CWMS data paths, example: 'TDDO.Temp-Water.Inst.1Hour.0.GOES-REV'
Keyword Arguments:
The web service can either get a lookback, which is just a number of
days from the current day, or a time window. Two key word arguments are
needed for a time wondow, start_date, end_date. The Timezone can also
be set.
lookback -- The number of days from current day to grab data.
(int or str)
example: 7
start_date -- The start of a time window (tuple) formatted
(year, month, day)
example: (2017, 3, 22)
end_date -- The end of a time window (tuple) formatted
(year, month, day)
example: (2017, 3, 22)
timezone -- "PST", "PDT", "MST", "MDT", "GMT"
Returns:
df -- A pandas dataframe with metadata from the webservice is returned.
Metadata is stored in df.__dict__['metadata'], the data is used in
some of the plotting functions. The metadata is easily lost if a df
is copied or transformed in some way. It may be best to export the
metadata if it is needed. meta = df.__dict__['metadata']
"""
try:
lookback = kwargs['lookback']
start_date = False
end_date = False
except:
lookback = False
start_date = kwargs['start_date']
end_date = kwargs['end_date']
try:timezone = kwargs['timezone']
except: timezone = 'PST'
url = time_window_url(path,start_date=start_date, end_date=end_date, lookback = lookback, public=public,timezone = timezone)
r = requests.get(url)
json_data = json.loads(r.text)
df_list = []
meta = {}
if not isinstance(path, list):
path = [path]
for site in path:
s = site.split('.')[0]
try:
data = json_data[s]
except KeyError:
print('No data for %s' % site)
continue
lat = data['coordinates']['latitude']
long = data['coordinates']['longitude']
tz_offset = data['tz_offset']
tz = data['timezone']
for path, vals in data['timeseries'].items():
column_name = '_'.join(path.split('.')[:2])
column_name = '_'.join(column_name.split('-'))
try:path_data = vals['values']
except KeyError:
print('No data for %s' % site)
continue
date = [val[0] for val in path_data]
values = [val[1] for val in path_data]
df= pd.DataFrame({'date': date, column_name: values})
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace = True)
df_list.append(df)
vals.pop('values', None)
vals.update({'path':path, 'lat':lat,'long':long, 'tz_offset':tz_offset, 'timezone':tz})
meta.update({column_name:vals})
df = pd.concat(df_list, axis = 1)
if fill:
freq = get_frequency(df.index)
if not freq:
print('Unable to determine frequency, returning data frame unfilled')
else:
if lookback:
end = datetime.now()
start = end - timedelta(days=lookback)
start_date = (start.year,start.month,start.day)
end_date = (end.year,end.month,end.day)
df = df.pipe(reindex, start_date, end_date, freq)
df.__dict__['metadata'] = meta
return df
def catalog():
"""
Requests the CWMS catalog. Returns a large dict and not easy
wade through, it would be easier to go to a dataquery site to find
what you are looking for http://www.nwd-wc.usace.army.mil/dd/common/dataquery/www/
Arguments:
Returns: dict
"""
url = r'http://www.nwd-wc.usace.army.mil/dd/common/web_service/webexec/getjson?catalog=%5B%5D'
r = requests.get(url)
return json.loads(r.text)
def site_catalog(site):
"""
Returns a dictionary of CWMS data paths for a particular site
Arguments:
site -- cwms site name, example TDDO
Returns:
json.loads(r.text) -- dictionary of available site data
"""
url = r'http://www.nwd-wc.usace.army.mil/dd/common/web_service/webexec/getjson?tscatalog=%5B%22SITE%22%5D'
url = url.replace('SITE', site.upper())
r = requests.get(url)
return json.loads(r.text)
| mit | 4,054,802,065,479,725,600 | 34.253275 | 149 | 0.560263 | false |
codefisher/codefisher_apps | site_crawler/migrations/0001_initial.py | 1 | 1310 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrawledPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField()),
('title', models.CharField(max_length=200, null=True, blank=True)),
('size', models.IntegerField()),
('status', models.IntegerField()),
('parents', models.TextField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CrawlProcess',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('crawler', models.CharField(max_length=50, choices=[(b'site_crawler', b'Site Crawler')])),
('deny', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| mit | -2,904,308,545,585,652,700 | 32.589744 | 114 | 0.512977 | false |
bnaul/scikit-learn | sklearn/linear_model/__init__.py | 12 | 2872 | """
The :mod:`sklearn.linear_model` module implements a variety of linear models.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from ._base import LinearRegression
from ._bayes import BayesianRidge, ARDRegression
from ._least_angle import (Lars, LassoLars, lars_path, lars_path_gram, LarsCV,
LassoLarsCV, LassoLarsIC)
from ._coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from ._glm import (PoissonRegressor,
GammaRegressor, TweedieRegressor)
from ._huber import HuberRegressor
from ._sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from ._stochastic_gradient import SGDClassifier, SGDRegressor
from ._ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from ._logistic import LogisticRegression, LogisticRegressionCV
from ._omp import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV)
from ._passive_aggressive import PassiveAggressiveClassifier
from ._passive_aggressive import PassiveAggressiveRegressor
from ._perceptron import Perceptron
from ._ransac import RANSACRegressor
from ._theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lars_path_gram',
'lasso_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor',
'PoissonRegressor',
'GammaRegressor',
'TweedieRegressor']
| bsd-3-clause | -3,305,525,107,772,083,000 | 34.9 | 78 | 0.601671 | false |
gajim/python-nbxmpp | nbxmpp/modules/eme.py | 1 | 1740 | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import EMEData
from nbxmpp.modules.base import BaseModule
class EME(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_eme,
ns=Namespace.EME,
priority=40)
]
def _process_eme(self, _client, stanza, properties):
encryption = stanza.getTag('encryption', namespace=Namespace.EME)
if encryption is None:
return
name = encryption.getAttr('name')
namespace = encryption.getAttr('namespace')
if namespace is None:
self._log.warning('No namespace on message')
return
properties.eme = EMEData(name=name, namespace=namespace)
self._log.info('Found data: %s', properties.eme)
| gpl-3.0 | 221,082,884,221,363,200 | 35.229167 | 73 | 0.6682 | false |
alunem/bioman | fasta/bmn-FastaRandDivides.py | 1 | 1085 | #!/usr/bin/env python
import string
import sys
import os
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from StringIO import StringIO
import random
import tempfile
fastafile = sys.argv[1]
#fastafile = "/bank/fasta/Roth/E1.454.fasta"
randomYorN = sys.argv[2]
nbPart=int(sys.argv[3])
#nbPart=int(5)
seqs=[]
nbSeqs=0
handle = open(fastafile)
for seq_record in SeqIO.parse(handle, "fasta"):
seqs.append(seq_record)
nbSeqs+=1
handle.close()
if randomYorN.lower()=="yes":
random.shuffle(seqs)
elif randomYorN.lower()!="no":
print("The second argument should be yes or no. Do you want to randomize the sequences before dividing the fasta file ?")
#### writes the divided-input fasta files into it
nbSeqsbyfile=nbSeqs/nbPart
modulo=nbSeqs%nbPart
iteSeqs=0
for i in range(0,nbPart-1):
handleout = open("part"+str(i+1)+"."+fastafile, "w")
SeqIO.write(seqs[iteSeqs:iteSeqs+nbSeqsbyfile], handleout, "fasta")
iteSeqs+=nbSeqsbyfile
handleout = open("part."+str(nbPart)+"."+fastafile, "w")
SeqIO.write(seqs[iteSeqs:nbSeqs], handleout, "fasta")
| gpl-2.0 | -8,486,432,452,396,793,000 | 21.142857 | 122 | 0.738249 | false |
infinit/drake | src/drake/flex.py | 1 | 5564 | # Copyright (C) 2011-2016, Quentin "mefyl" Hocquet
#
# This software is provided "as is" without warranty of any kind,
# either expressed or implied, including but not limited to the
# implied warranties of fitness for a particular purpose.
#
# See the LICENSE file for more information.
import drake
import sys
class SDK(drake.Configuration):
"""Configuration for the Flex SDK library."""
def __init__(self, prefix = None):
"""Find and create a configuration for the Flex SDK.
prefix -- Where to find the Flex SDK, should contain bin/acompc.
"""
# Compute the search path.
if prefix is None:
test = [drake.Path('/usr'), drake.Path('/usr/local')]
else:
test = [drake.Path(prefix)]
self.__prefix = self._search_all('bin/acompc', test)[0]
self.__options = []
self.__external_library_path = []
def add_option(self, option):
self.__options.append(option)
def flags(self):
def mkpath(path):
if not path.absolute():
path = drake.srctree() / path
return str(path)
options = ''.join(map(lambda o: ' %s' % o, self.__options))
external_libraries = ''
if self.__external_library_path:
external_libraries = ':'.join(map(mkpath, self.__external_library_path))
external_libraries = ' -external-library-path+=%s' % external_libraries
return options + external_libraries
def add_external_library_path(self, path):
path = drake.Path(path)
self.__external_library_path.append(path)
def acompc(self):
return self.__prefix / 'bin/acompc'
def mxmlc(self):
return self.__prefix / 'bin/mxmlc'
def __repr__(self):
return 'Flex(prefix = %s)' % repr(self.__prefix)
class Source(drake.Node):
pass
drake.Node.extensions['as'] = Source
class Library(drake.Node):
def __init__(self, name, sources, flex_sdk):
self.__sources = sources
self.__sdk = flex_sdk
drake.Node.__init__(self, name)
LibraryBuilder(sources, self, flex_sdk)
drake.Node.extensions['swc'] = Library
class LibraryBuilder(drake.Builder):
def __init__(self, sources, target, sdk):
self.__sources = list(filter(lambda s: not isinstance(s, Library), sources))
self.__libraries = list(filter(lambda s: isinstance(s, Library), sources))
self.__target = target
self.__sdk = sdk
drake.Builder.__init__(self, sources, [target])
def execute(self):
# Options
options = ' -compiler.locale -compiler.optimize -static-link-runtime-shared-libraries -compiler.debug'
# Sources
sources = ' '.join(map(lambda src: str(src.path()), self.__sources))
# Libraries
libraries = ''
if self.__libraries:
libraries = ' -external-library-path+=%s' % ':'.join(map(lambda n: str(n.path().dirname()), self.__libraries))
# Output
output = str(self.__target.path())
# Command
return self.cmd('Flex %s' % self.__target, '%s%s %s -include-sources %s%s -output %s', str(self.__sdk.acompc()), self.__sdk.flags(), options, sources, libraries, output)
# _local/flex_sdk/bin/acompc -external-library-path+=modules/bindings/flash/UObjectFlash/libs/ -compiler.locale -compiler.optimize -static-link-runtime-shared-libraries -compiler.debug -keep-as3-metadata Urbi -include-sources modules/bindings/flash/UObjectFlash/com/ -output /tmp/UObjeftFlash.swc
class Application(drake.Node):
def __init__(self, name, config, source_path, source, sources, flex_sdk):
source_path = drake.Path(source_path)
self.__config = config
self.__source = source
self.__source_path = source_path
self.__sources = sources
self.__sdk = flex_sdk
drake.Node.__init__(self, name)
ApplicationBuilder(sources, self, config, source, source_path, flex_sdk)
drake.Node.extensions['swf'] = Application
class ApplicationBuilder(drake.Builder):
def __init__(self, sources, target, config, source, source_path, flex_sdk):
self.__config = config
self.__source_path = source_path
self.__source = source
self.__sources = list(filter(lambda s: not isinstance(s, Library), sources))
self.__libraries = list(filter(lambda s: isinstance(s, Library), sources))
self.__target = target
self.__sdk = flex_sdk
drake.Builder.__init__(self, sources + [config, source], [target])
def execute(self):
# Options
options = ' -compiler.locale -compiler.optimize -static-link-runtime-shared-libraries -compiler.debug'
# Sources
sources = ' '.join(map(lambda src: str(src.path()), self.__sources))
# Libraries
libraries = ''
if self.__libraries:
libraries = ' -library-path+=%s' % ':'.join(map(lambda n: str(n.path().dirname()), self.__libraries))
# Output
output = str(self.__target.path())
# Command
return self.cmd('Flex %s' % self.__target,
'%s%s -load-config %s -compiler.source-path %s -output %s %s',
self.__sdk.mxmlc(),
libraries,
self.__config.path(),
drake.srctree() / self.__source_path,
self.__target.path(),
self.__source.path(),
)
# $PWD/_local/flex_sdk/bin/mxmlc -compiler.source-path submodules/modules/bindings/flash/FlashServer submodules/modules/bindings/flash/FlashServer/FlashServer.mxml -compiler.external-library-path+=_build/submodules/modules/bindings/flash/UObjectFlash:_build/submodules/modules/bindings/flash/UStdLib
| agpl-3.0 | -3,040,151,377,415,750,700 | 35.847682 | 300 | 0.633537 | false |
palaniyappanBala/rekall | rekall-core/rekall/plugins/collectors/ballast.py | 4 | 1600 | # Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
The Rekall Memory Forensics entity layer.
"""
__author__ = "Adam Sindelar <[email protected]>"
import datetime
from rekall.entities import definitions
from rekall.entities import collector
class BallastGenerator(collector.EntityCollector):
"""Generates ballast entities to stress-test the entity system."""
outputs = ["Timestamps", "Named"]
@classmethod
def is_active(cls, session):
return session.GetParameter("generate_ballast") > 0
def collect(self, hint):
for i in range(self.session.GetParameter("generate_ballast")):
yield [
definitions.Named(
name="Ballast entry #%d" % i,
kind="Ballast"),
definitions.Timestamps(
created_at=datetime.datetime.fromtimestamp(0))]
| gpl-2.0 | -4,227,601,355,666,402,300 | 33.042553 | 73 | 0.6975 | false |
edisonlz/fruit | web_project/base/site-packages/util/thrift/thrifttools.py | 1 | 1984 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import thread
import logging
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
import settings
SERVICE_SELECTION = 0
class ThreadMappedPool(dict):
def __new__(cls, master):
return super(ThreadMappedPool, cls).__new__(cls)
def __init__(self, master):
self.master = master
@property
def current_key(self):
return thread.get_ident()
@contextmanager
def reserve(self):
"""Reserve a client.
Creates a new client based on the master client if none exists for the
current thread.
"""
key = self.current_key
mc = self.pop(key, None)
if mc is None:
mc = self.master.clone()
try:
yield mc
finally:
self[key] = mc
def relinquish(self):
"""Relinquish any reserved client for the current context.
Call this method before exiting a thread if it might potentially use
this pool.
"""
return self.pop(self.current_key, None)
# transport
_trans = None
def transport(reconn=False):
global _trans
if reconn or not (_trans and _trans.isOpen()):
_trans = TSocket.TSocket(**settings.thrift_server)
logging.debug("http://%s:%s" % (settings.thrift_server['host'], str(settings.thrift_server['port'])))
_trans = TTransport.TBufferedTransport(_trans)
_trans.open()
return _trans
def protocol(trans):
return TBinaryProtocol.TBinaryProtocolAccelerated(trans)
def service(sname, protocol):
def deco(fn):
def f(*args, **kwargs):
protocol.writeMessageBegin(sname, SERVICE_SELECTION, 0)
result = fn(*args, **kwargs)
protocol.writeMessageEnd()
return result
return f
return deco
#test
#service_client_map, service_iface_map = register_service_client()
| apache-2.0 | 1,768,033,562,762,747,000 | 24.113924 | 109 | 0.62752 | false |
hyperized/ansible | lib/ansible/modules/network/f5/bigip_irule.py | 38 | 17060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_irule
short_description: Manage iRules across different modules on a BIG-IP
description:
- Manage iRules across different modules on a BIG-IP.
version_added: 2.2
options:
content:
description:
- When used instead of 'src', sets the contents of an iRule directly to
the specified value. This is for simple values, but can be used with
lookup plugins for anything complex or with formatting. Either one
of C(src) or C(content) must be provided.
type: str
module:
description:
- The BIG-IP module to add the iRule to.
type: str
required: True
choices:
- ltm
- gtm
name:
description:
- The name of the iRule.
type: str
required: True
src:
description:
- The iRule file to interpret and upload to the BIG-IP. Either one
of C(src) or C(content) must be provided.
type: path
required: True
state:
description:
- Whether the iRule should exist or not.
type: str
choices:
- present
- absent
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the iRule contained in template irule.tcl to the LTM module
bigip_irule:
content: "{{ lookup('template', 'irule.tcl') }}"
module: ltm
name: MyiRule
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add the iRule contained in static file irule.tcl to the LTM module
bigip_irule:
module: ltm
name: MyiRule
src: irule.tcl
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
module:
description: The module that the iRule was added to
returned: changed and success
type: str
sample: gtm
src:
description: The filename that included the iRule source
returned: changed and success, when provided
type: str
sample: /opt/src/irules/example1.tcl
content:
description: The content of the iRule that was managed
returned: changed and success
type: str
sample: "when LB_FAILED { set wipHost [LB::server addr] }"
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'apiAnonymous': 'content',
}
updatables = [
'content',
]
api_attributes = [
'apiAnonymous',
]
returnables = [
'content', 'src', 'module',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def content(self):
if self._values['content'] is None:
result = self.src_content
else:
result = self._values['content']
return str(result).strip()
@property
def src(self):
if self._values['src'] is None:
return None
return self._values['src']
@property
def src_content(self):
if not os.path.exists(self._values['src']):
raise F5ModuleError(
"The specified 'src' was not found."
)
with open(self._values['src']) as f:
result = f.read()
return result
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
self.kwargs = kwargs
def exec_module(self):
if self.module.params['module'] == 'ltm':
manager = self.get_manager('ltm')
elif self.module.params['module'] == 'gtm':
manager = self.get_manager('gtm')
else:
raise F5ModuleError(
"An unknown iRule module type was specified"
)
return manager.exec_module()
def get_manager(self, type):
if type == 'ltm':
return LtmManager(**self.kwargs)
elif type == 'gtm':
return GtmManager(**self.kwargs)
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state in ["present"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if not self.want.content and not self.want.src:
raise F5ModuleError(
"Either 'content' or 'src' must be provided"
)
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError("Failed to create the iRule")
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the iRule")
return True
class LtmManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class GtmManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/rule/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
content=dict(),
src=dict(
type='path',
),
name=dict(required=True),
module=dict(
required=True,
choices=['gtm', 'ltm']
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['content', 'src']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | -6,118,759,176,677,095,000 | 28.566724 | 91 | 0.578312 | false |
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/cloud/clouds/dimensiondata_test.py | 2 | 4949 | # -*- coding: utf-8 -*-
'''
:codeauthor: `Anthony Shaw <[email protected]>`
tests.unit.cloud.clouds.dimensiondata_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Python libs
from __future__ import absolute_import
import libcloud.security
import platform
import os
# Import Salt Libs
from salt.cloud.clouds import dimensiondata
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
# Global Variables
dimensiondata.__active_provider_name__ = ''
dimensiondata.__opts__ = {
'providers': {
'my-dimensiondata-cloud': {
'dimensiondata': {
'driver': 'dimensiondata',
'region': 'dd-au',
'user_id': 'jon_snow',
'key': 'IKnowNothing'
}
}
}
}
VM_NAME = 'winterfell'
HAS_CERTS = True
ON_SUSE = True if 'SuSE' in platform.dist() else False
ON_MAC = True if 'Darwin' in platform.system() else False
if not os.path.exists('/etc/ssl/certs/YaST-CA.pem') and ON_SUSE:
if os.path.isfile('/etc/ssl/ca-bundle.pem'):
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/ca-bundle.pem')
else:
HAS_CERTS = False
elif ON_MAC:
if os.path.isfile('/opt/local/share/curl/curl-ca-bundle.crt'):
pass # libcloud will already find this file
elif os.path.isfile('/usr/local/etc/openssl/cert.pem'):
pass # libcloud will already find this file
else:
HAS_CERTS = False
class ExtendedTestCase(TestCase):
'''
Extended TestCase class containing additional helper methods.
'''
def assertRaisesWithMessage(self, exc_type, exc_msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as exc:
self.assertEqual(type(exc), exc_type)
self.assertEqual(exc.message, exc_msg)
@skipIf(not HAS_CERTS, 'Cannot find CA cert bundle')
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.cloud.clouds.dimensiondata.__virtual__', MagicMock(return_value='dimensiondata'))
class DimensionDataTestCase(ExtendedTestCase):
'''
Unit TestCase for salt.cloud.clouds.dimensiondata module.
'''
def test_avail_images_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_images
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_images,
call='action'
)
def test_avail_locations_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_locations
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_locations,
call='action'
)
def test_avail_sizes_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call avail_sizes
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.avail_sizes,
call='action'
)
def test_list_nodes_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call list_nodes
with --action or -a.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.list_nodes,
call='action'
)
def test_destroy_call(self):
'''
Tests that a SaltCloudSystemExit is raised when trying to call destroy
with --function or -f.
'''
self.assertRaises(
SaltCloudSystemExit,
dimensiondata.destroy,
name=VM_NAME,
call='function'
)
def test_avail_sizes(self):
'''
Tests that avail_sizes returns an empty dictionary.
'''
sizes = dimensiondata.avail_sizes(call='foo')
self.assertEqual(
len(sizes),
1
)
self.assertEqual(
sizes['default']['name'],
'default'
)
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_nodes', MagicMock(return_value=[]))
def test_list_nodes(self):
nodes = dimensiondata.list_nodes()
self.assertEqual(
nodes,
{}
)
@patch('libcloud.compute.drivers.dimensiondata.DimensionDataNodeDriver.list_locations', MagicMock(return_value=[]))
def test_list_locations(self):
locations = dimensiondata.avail_locations()
self.assertEqual(
locations,
{}
)
if __name__ == '__main__':
from integration import run_tests
run_tests(DimensionDataTestCase, needs_daemon=False)
| apache-2.0 | -612,343,151,610,206,000 | 27.94152 | 119 | 0.599313 | false |
damian1976/openstack-accounting | util/server.py | 1 | 4730 | from .os_data import AccountData
import unicodedata
import re
# 'Server' class
# Contains all server accounting data
class Server(AccountData):
def __init__(self, name):
AccountData.__init__(self)
m_name = unicodedata.normalize('NFD',
re.sub("[\(\[].*?[\)\]]",
"",
name)).\
encode('ascii', 'ignore')
self.__name = m_name.decode('UTF-8')
#self.__name = name
self.__id = ''
self.__state = 'active'
self.__projectId = ''
self.__projectName = ''
def setName(self, name):
self.__name = name
def getName(self):
return self.__name
def setId(self, id):
self.__id = id
def getId(self):
return self.__id
def setState(self, state):
self.__state = state
def getState(self):
return self.__state
def setProjectId(self, id):
self.__projectId = id
def getProjectId(self):
return self.__projectId
def setProjectName(self, name):
self.__projectName = name
def getProjectName(self):
return self.__projectName
def __repr__(self):
return "<Server>"
# Returns String representation for a server object
def __str__(self):
str = "Server name: {0} ({1})\n" \
"\tHours: {2:.2f}\n" \
"\tCPU Hours: {3:.2f}\n" \
"\tCPU Hours cost: {4:.2f}\n" \
"\tRAM GB-Hours: {5:.2f}\n" \
"\tRAM GB-Hours cost: {6:.2f}\n" \
"\tDisk GB-Hours: {7:.2f}\n" \
"\tDisk GB-Hours cost: {8:.2f}\n" \
"\tServer total cost: {9:.2f}\n"
return str.format(self.getName(),
self.getId(),
self.getHrs(),
self.getCPU('hours'),
self.getCPU('cost'),
self.getRAM('hours'),
self.getRAM('cost'),
self.getDisk('hours'),
self.getDisk('cost'),
self.getTotalCost())
# Updates server flavors with STOP. SHELVE statuses from config
def updateHoursAndVolumes(self,
stop_timeframes,
shelve_timeframes,
delete_timeframes,
coeff,
):
if delete_timeframes:
for hours in delete_timeframes:
self.subHrs(hours)
self.subDisk(self.getDisk('value')*hours, 'hours')
self.subCPU(self.getCPU('value')*hours, 'hours')
self.subRAM(self.getRAM('value')*hours, 'hours')
if stop_timeframes and coeff:
try:
for hours in stop_timeframes:
self.subHrs(hours*(1.0 - coeff['stop']))
self.subCPU(self.getCPU('value')*hours*(1.0 - coeff['stop_cpu']), 'hours')
self.subRAM(self.getRAM('value')*hours*(1.0 - coeff['stop_ram']), 'hours')
self.subDisk(self.getDisk('value')*hours*(1.0 - coeff['stop_disk']), 'hours')
except KeyError:
pass
if shelve_timeframes and coeff:
try:
for hours in shelve_timeframes:
self.subHrs(hours*(1.0 - coeff['shelve']))
self.subCPU(self.getCPU('value')*hours*(1.0 - coeff['shelve_cpu']), 'hours')
self.subRAM(self.getRAM('value')*hours*(1.0 - coeff['shelve_ram']), 'hours')
self.subDisk(self.getDisk('value')*hours*(1.0 - coeff['shelve_disk']), 'hours')
except KeyError:
pass
if (self.getHrs() == 0.0):
self.setCPU(0.0, 'hours')
self.setRAM(0.0, 'hours')
self.setDisk(0.0, 'hours')
# Updates server flavors with ACTIVE status coefficients from config
def updateMetricHoursWithActiveStatus(self, coeff):
if (not coeff):
return
self.mulHrs(coeff['active'])
self.mulDisk(coeff['active_disk'], 'hours')
self.mulCPU(coeff['active_cpu'], 'hours')
self.mulRAM(coeff['active_ram'], 'hours')
# Returns total cost for a server
def getTotalCost(self):
try:
self.__totalCost = (max(
self.getCPU('cost'),
self.getRAM('cost')
) + self.getDisk('cost'))
except Exception as e:
print("Error {0}".format(e))
return 0.0
return self.__totalCost
| gpl-2.0 | 8,854,014,067,219,134,000 | 34.56391 | 99 | 0.47611 | false |
ruslanchasep/bartendro | ui/bartendro/view/ws/drink.py | 4 | 5611 | # -*- coding: utf-8 -*-
import json
from time import sleep
from operator import itemgetter
from bartendro import app, db, mixer
from flask import Flask, request
from flask.ext.login import login_required, current_user
from werkzeug.exceptions import ServiceUnavailable, BadRequest, InternalServerError
from bartendro.model.drink import Drink
from bartendro.model.drink_name import DrinkName
from bartendro.model.booze import Booze
from bartendro.model.drink_booze import DrinkBooze
from bartendro.model.dispenser import Dispenser
from bartendro.error import BartendroBusyError, BartendroBrokenError, BartendroCantPourError, BartendroCurrentSenseError
def ws_make_drink(drink_id):
recipe = {}
for arg in request.args:
disp = int(arg[5:])
recipe[disp] = int(request.args.get(arg))
drink = Drink.query.filter_by(id=int(drink_id)).first()
try:
app.mixer.make_drink(drink, recipe)
except mixer.BartendroCantPourError, err:
raise BadRequest(err)
except mixer.BartendroBrokenError, err:
raise InternalServerError(err)
except mixer.BartendroBusyError, err:
raise ServiceUnavailable(err)
return "ok\n"
@app.route('/ws/drink/<int:drink>')
def ws_drink(drink):
drink_mixer = app.mixer
if app.options.must_login_to_dispense and not current_user.is_authenticated():
return "login required"
return ws_make_drink(drink)
@app.route('/ws/drink/custom')
def ws_custom_drink():
if app.options.must_login_to_dispense and not current_user.is_authenticated():
return "login required"
return ws_make_drink(0)
@app.route('/ws/drink/<int:drink>/available/<int:state>')
def ws_drink_available(drink, state):
if not drink:
db.session.query(Drink).update({'available' : state})
else:
db.session.query(Drink).filter(Drink.id==drink).update({'available' : state})
db.session.flush()
db.session.commit()
return "ok\n"
@app.route('/ws/shots/<int:booze_id>')
def ws_shots(booze_id):
if app.options.must_login_to_dispense and not current_user.is_authenticated():
return "login required"
dispensers = db.session.query(Dispenser).all()
dispenser = None
for d in dispensers:
if d.booze.id == booze_id:
dispenser = d
if not dispenser:
return "this booze is not available"
try:
app.mixer.dispense_shot(dispenser, app.options.shot_size)
except mixer.BartendroCantPourError, err:
raise BadRequest(err)
except mixer.BartendroBrokenError, err:
raise InternalServerError(err)
except mixer.BartendroBusyError, err:
raise ServiceUnavailable(err)
return ""
@app.route('/ws/drink/<int:id>/load')
@login_required
def ws_drink_load(id):
return drink_load(id)
def drink_load(id):
drink = Drink.query.filter_by(id=int(id)).first()
boozes = []
for booze in drink.drink_boozes:
boozes.append((booze.booze_id, booze.value))
drink = {
'id' : id,
'name' : drink.name.name,
'desc' : drink.desc,
'popular' : drink.popular,
'available' : drink.available,
'boozes' : boozes,
'num_boozes' : len(boozes)
}
return json.dumps(drink)
@app.route('/ws/drink/<int:drink>/save', methods=["POST"])
def ws_drink_save(drink):
data = request.json['drink']
id = int(data["id"] or 0)
if id > 0:
drink = Drink.query.filter_by(id=int(id)).first()
else:
id = 0
drink = Drink()
db.session.add(drink)
try:
drink.name.name = data['name']
drink.desc = data['desc']
if data['popular']:
drink.popular = True
else:
drink.popular = False
if data['available']:
drink.available = True
else:
drink.available = False
except ValueError:
raise BadRequest
for selected_booze_id, parts, old_booze_id in data['boozes']:
try:
selected_booze_id = int(selected_booze_id) # this is the id that comes from the most recent selection
old_booze_id = int(old_booze_id) # this id is the id that was previously used by this slot. Used for
# cleaning up or updateing existing entries
parts = int(parts)
except ValueError:
raise BadRequest
# if the parts are set to zero, remove this drink_booze from this drink
if parts == 0:
if old_booze_id != 0:
for i, dbooze in enumerate(drink.drink_boozes):
if dbooze.booze_id == old_booze_id:
db.session.delete(drink.drink_boozes[i])
break
continue
# if there is an old_booze_id, then update the existing entry
if old_booze_id > 0:
for drink_booze in drink.drink_boozes:
if old_booze_id == drink_booze.booze_id:
drink_booze.value = parts
if (selected_booze_id != drink_booze.booze_id):
drink_booze.booze = Booze.query.filter_by(id=selected_booze_id).first()
break
else:
# Create a new drink-booze entry
booze = Booze.query.filter_by(id=selected_booze_id).first()
DrinkBooze(drink, booze, parts, 0)
db.session.commit()
mc = app.mc
mc.delete("top_drinks")
mc.delete("other_drinks")
mc.delete("available_drink_list")
return drink_load(drink.id)
| gpl-2.0 | 8,901,950,534,734,962,000 | 32.201183 | 120 | 0.615933 | false |
yangbh/Hammer | lib/theHarvester/discovery/baidusearch.py | 3 | 1605 | import string
import httplib, sys
import myparser
import re
import time
class search_baidu:
def __init__(self,word,limit,start):
self.word=word.replace(' ', '%20')
self.results=""
self.totalresults=""
self.server="www.baidu.com"
#self.apiserver="api.search.live.net"
self.hostname="www.baidu.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;zh-cn; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity="50"
self.limit=int(limit)
self.bingApi=""
self.counter=start
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/s?wd=%40" + self.word + "&rn=100&pn="+ str(self.counter))
h.putheader('Host', self.hostname)
h.putheader('Cookie: H_PS_PSSID=4454_1421_4414_4261_4202_4587; BAIDUID=ABE16F3C528AB718BFDBAAAA76626AC3:SL=0:NR=100:FG=1; BDRCVFR[feWj1Vr5u3D]=mk3SLVN4HKm; sug=3; bdime=0; BD_TMP_CK=true')
h.putheader('Accept-Language: zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3')
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def process(self):
while self.counter <= self.limit:
self.do_search()
# print "\tSearching "+ str(self.counter) + " results..."
self.counter+=100
def get_emails(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.emails()
def get_hostnames(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.hostnames()
def get_allhostnames(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.hostnames_all() | gpl-2.0 | -5,663,959,412,377,216,000 | 33.170213 | 190 | 0.708411 | false |
yezyilomo/dorm | dorm/db.py | 1 | 39874 | from flask import Flask
from flaskext.mysql import MySQL
import random
import collections
import hashlib
import copy
## Database configuration information ###################################
app = Flask(__name__)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = None
app.config['MYSQL_DATABASE_PASSWORD'] = None
app.config['MYSQL_DATABASE_DB'] = None
app.config['MYSQL_DATABASE_HOST'] = None
mysql.init_app(app)
#########################################################################
db__name__=None
db__tables__=[]
def _execute(sql_statement):
"""This is a method for executing sql statements given as string argument
This is for internal use only
"""
connection=mysql.connect()
cursor=connection.cursor()
cursor.execute(sql_statement)
connection.commit()
cursor.close()
connection.close()
if cursor.description is None:
return None
else:
class empty(object):
pass
columns=tuple( [column[0] for column in cursor.description] )
obj=empty()
obj.columns=columns
obj.fetch=cursor.fetchall()
return obj
def execute(sql_statement): ##This method is for further modification, currently it does the same thing as _execute(sql_statement)
"""This is a method for executing sql statements given as string argument
This is meant for external use
"""
result= _execute(sql_statement)
return result
def functions_maker(name):
"""This is a method used to return objects of class table, it allow users to
access database tables through their names
"""
def new_function():
return actual_table(name)
return new_function
def configure(**data):
"""This is a method for configuring a database to be used,
It generally accept three specified arguments which are
db_user, db_password, db_name and db_host, it's called as
db.configure( db_user='your_value',db_name='your_value',db_host='your_value',db_password='your_value' )
in 'database/config.py' file
"""
app.config['MYSQL_DATABASE_USER'] = data['db_user']
app.config['MYSQL_DATABASE_PASSWORD'] = data['db_password']
app.config['MYSQL_DATABASE_DB'] = data['db_name']
app.config['MYSQL_DATABASE_HOST'] = data['db_host']
global db__name__
db__name__ = data['db_name']
all_tables=_execute("show tables").fetch
global db__tables__
for table_name in all_tables:
globals().update({ table_name[0] : functions_maker(table_name[0]) })
db__tables__.append(table_name[0])
def hash(string):
"""This is a method which is used to hash information(eg passwords) for
security purpose, it uses sha3 algorithm to hash, and it add some characters
to hashed string for increasing security
"""
hashed=hashlib.sha3_512(string.encode()).hexdigest()
additional_security="^dorm@ilo@yezy^#$%!flaskapp^"
return hashed+additional_security
def drop_tb_with_foreign_key_check(table):
"""This is a method which is used in droping database tables with argument
as a table name to be dropped
"""
sql_statement="drop table "+table
_execute(sql_statement)
def drop_tb_without_foreign_key_check(table):
"""This is a method which is used in droping database tables with argument
as a table name to be dropped
"""
sql_statement="SET FOREIGN_KEY_CHECKS = 0; drop table if exists "+ table +" ; SET FOREIGN_KEY_CHECKS = 1;"
_execute(sql_statement)
def truncate_tb_with_foreign_key_check(table):
"""This is a method which is used in truncating database tables with argument
as a table name to be truncated
"""
sql_statement="truncate table "+ table
_execute(sql_statement)
def truncate_tb_without_foreign_key_check(table):
"""This is a method which is used in truncating database tables with argument
as a table name to be truncated
"""
sql_statement="SET FOREIGN_KEY_CHECKS = 0; truncate table "+ table+ " ; SET FOREIGN_KEY_CHECKS = 1;"
_execute(sql_statement)
def create_db(db_name):
"""This is a method which is used to create database with argument
as a database name to be created
"""
sql_statement="create database "+db_name
_execute(sql_statement)
def drop_db(db_name):
"""This is a method which is used in droping database with argument
as database name to be dropped
"""
sql_statement="drop database "+db_name
_execute(sql_statement)
def get_objects(raw_records, columns, table):
"""This is the actual method which convert records extracted from a
database into record objects, it generally create those objects from class
record and assign them attributes corresponding to columns and their
values as extracted from a database, It returns a normal tuple
containing record objects
"""
columns=list(columns)
for column in table.table__columns__: ##check if there are colums with the same name for joined tables
splitted_column=column.split('.')
if '.' in column and len( splitted_column )>1:
columns[ columns.index( splitted_column[1] ) ]=column ##update column with the format 'table.column' to avoid name conflict
Record_list=[]
for record in raw_records:
rec_object=Record(table)
for col, value in zip(columns, record):
if "." in col: ##if the column is in a form of table.column make it accessible by using the same format( table.column )########
splitted_col=col.split('.')
setattr( rec_object, str(splitted_col[0]), type('name', (object, ), {splitted_col[1]: value}) ) #############################
setattr( rec_object, str(col), value )
Record_list.append( rec_object )
return tuple(Record_list)
def get_query_condition(data):
"""This method format a condition to be used in db query during database
update and lookup, it generally returns a formated string with a condition
to be used after where clause in a db query
"""
list_of_strings=[]
for key in data:
if isinstance(data[key],str):
list_of_strings.append( key+"='"+str(data[key])+"'" )
else:
list_of_strings.append( key+"="+str(data[key]) )
formated_str=", ".join(list_of_strings)
return formated_str
def random_table():
"""This is not necessary, it's just a method which select a table name
randomly from a list of tables in a database used and return it as
string
"""
sql_statement="show tables"
all_tables=_execute(sql_statement).fetch
rd=random.randint(0,len(all_tables)-1)
return all_tables[rd][0]
def list_tables():
"""This is a method which return all tables in a database as list
"""
sql_statement="show tables"
all_tables=_execute(sql_statement).fetch
return list(all_tables)
class field(object):
"""This is a class used to define table fields and their constrains
"""
def __init__(self,**data):
self.model=""
self.ref_field=""
self.field={"key":None,'sql_statement': "field__name "+data['type'] }
if len(data)==2 and 'constrain' in data:
self.field['sql_statement']+=" "+data['constrain']
elif len(data)==2 and 'key' in data:
self.field['key']=data['key']
elif len(data)==3 and 'constrain' in data and 'key' in data:
self.field['sql_statement']+=" "+data['constrain']
self.field['key']=data['key']
elif len(data)==3 and 'key' in data and 'ref' in data:
self.field['key']=data['key']
reference=data['ref'].split('.')
self.model=reference[0]
self.ref_field=reference[1]
elif len(data)==4 and 'key' in data and 'ref' in data and 'constrain' in data:
self.field['sql_statement']+=" "+data['constrain']
self.field['key']=data['key']
reference=data['ref'].split('.')
self.model=reference[0]
self.ref_field=reference[1]
else:
pass
class arranged_attrs(type):
"""This is a metaclass intended to arrange model attributes the way they were
defined
"""
def __new__(cls, name, bases, attrs):
class_=type.__new__(cls,name, bases,attrs)
class_.all__fields__=attrs
return class_
@classmethod
def __prepare__(mcls, cls, bases):
"""This is a method which arrange model attributes as they were defined
"""
return collections.OrderedDict()
class model(object, metaclass=arranged_attrs):
"""This is a class which is used to define raw database(schema), it's inherited
by all classes used in creating database tables
"""
def create(self):
"""This is a method used to create database table(s)
"""
create_statement="create table "+str(self.__class__.__name__)+"("
sql_statement=""
primary_key=""
foreign_key=""
for table_field in self.all__fields__:
field_value=self.all__fields__[table_field]
if isinstance(field_value, field):
sql_statement=sql_statement+field_value.field['sql_statement'].replace('field__name',table_field)+" ,"
if field_value.field['key']=='primary':
primary_key+=table_field+" ,"
if field_value.field['key']=='foreign':
foreign_key+=', FOREIGN KEY ('+table_field+') REFERENCES ' +field_value.model+ ' ('+ field_value.ref_field +')'
primary_key='PRIMARY KEY('+primary_key[:len(primary_key)-1]+")"
create_statement=create_statement+sql_statement+primary_key+foreign_key+")"
_execute(create_statement)
class partial_table(object):
"""This is a class for defining partial table as object, it's the result of
using select statement, which tends to eliminate some columns and produce
a partial table
"""
def __init__(self,table, *columns, **kwargs):
"""This is a constructor method which takes table name as
argument and create table object from it
"""
self.table__name__=table.table__name__
self.table__columns__=table.table__columns__
self.selected__columns__=table.selected__columns__
self.primary__keys__=table.primary__keys__
self.table__type__="partial"
columns_to_remove=[]
columns_arrangement=[]
calculated_columns=[]
if len(columns)==1 and isinstance( columns[0], (tuple,list) ) :
columns=tuple(columns[0])
if 'all_except' in kwargs.keys() and ( isinstance(kwargs['all_except'], tuple) or isinstance( kwargs['all_except'], list) ):
columns_to_remove=list(kwargs['all_except'])
del kwargs['all_except']
for col in columns_to_remove:
self.table__columns__.remove(col)
columns_arrangement =self.table__columns__ + list(kwargs.keys())
calculated_columns=self.table__columns__
elif len(columns)==1 and columns[0]=="*":
##leave selected tables as they were in actual table
return
else:
columns_arrangement=list(columns)+list(kwargs.keys())
self.table__columns__=columns_arrangement
for column in kwargs:
calculated_columns.append( kwargs[column]+" as "+column )
temp_list=[", ".join(columns), ", ".join(calculated_columns)]
if temp_list[0]!="" and temp_list[1]!="" :
self.selected__columns__=", ".join(temp_list)
elif temp_list[0]!="" and temp_list[1]=="" :
self.selected__columns__=temp_list[0]
elif temp_list[0]=="" and temp_list[1]!="" :
self.selected__columns__=temp_list[1]
else:
raise Exception("Invalid arguments")
def get(self, col=None):
"""This is a method which returns all records from a database as
a custom tuple of objects of record when no argument is passed,
but it returns a tuple of values of a specified column passed
as a string argument(column name)
"""
raw_records=_execute("select " +self.selected__columns__+ " from "+str(self.table__name__))
if col is not None:
return read_only_records(get_objects(raw_records.fetch, raw_records.columns ,self)).get(col)
return read_write_records(get_objects(raw_records.fetch, raw_records.columns,self))
def getd(self,col_name):
"""This is a method for extracting distinct values in a specified column,
it takes string argument as column name from which values are
suppossed to be extracted
"""
return tuple(set(self.get(col_name)))
def find(self,**pri_key_with_val):
"""This is a method for finding a single specific record by using it's
primary key(s), here the argument to this method is the dict which
contains primary key(s) and it's/their corresponding value(s), the
format of argument is { primary_key1: value1, primary_key2: value2, ...}
"""
primary_keys=pri_key_with_val.keys()
if set(pri_key_with_val) != set(self.primary__keys__): #if user provide a non-primary key argument
raise Exception("You hava passed non-primary key argument(s)")
list_of_strings=[]
for key in pri_key_with_val:
if isinstance(pri_key_with_val[key],str):
list_of_strings.append( key+"='"+str(pri_key_with_val[key])+"'" )
else:
list_of_strings.append( key+"="+str(pri_key_with_val[key]) )
condition=" and ".join(list_of_strings)
record=self.where(condition)
if len(record)==0:
return None
else:
return record[0]
def where(self,*data):
"""This is a method which is used to query and return records from a
database as a custom tuple of objects of record, the criteria used
to query records is specified as argument(s), This method accept two
forms of arguments, the first form is three specified arguments which
form a query condition eg where("age", ">", 20),
and the second form is a single argument which specify a query condition
eg in the first example we could obtain the same result by using
where("age > 20")
"""
sql_statement=""
if len(data)==3:
col,expr,val=data[0],data[1],data[2]
if isinstance(val, list):
val=tuple(val)
if isinstance(val,str):
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ","'",val,"'"])
else:
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ",str(val)])
elif len(data)==1 :
cond=data[0]
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",cond])
else:
raise Exception("Invalid agruments")
raw_records=_execute(sql_statement)
return read_write_records( get_objects(raw_records.fetch, raw_records.columns, self) )
class computational_table(object): ##this is a class for computations, it's meant to separate computations from normal queries
"""This is a class for defining a computation database table as object, this
table is only meant for computations, it's meant to separate computations from normal queries
"""
def __init__(self,table, operation,column):
"""This is a constructor method which takes table name as
argument and create table object from it
"""
self.table__name__=table.table__name__
self.table__type__="computation"
self.selected__columns__=selected__columns__=operation+"("+column+")"
def get(self): ## get for computations
"""This is a method which returns all records from a database as
a custom tuple of objects of record
"""
raw_records=_execute("select " +self.selected__columns__+ " from "+str(self.table__name__))
return raw_records.fetch[0][0]
def where(self,*data): ## where for computations
"""This is a method which is used to query and return records from a
database as a custom tuple of objects of record, the criteria used
to query records is specified as argument(s), This method accept two
forms of arguments, the first form is three specified arguments which
form a query condition eg where("age", ">", 20),
and the second form is a single argument which specify a query condition
eg in the first example we could obtain the same result by using
where("age > 20")
"""
sql_statement=""
if len(data)==3:
col,expr,val=data[0],data[1],data[2]
if isinstance(val, list):
val=tuple(val)
if isinstance(val,str):
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ","'",val,"'"])
else:
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ",str(val)])
elif len(data)==1 :
cond=data[0]
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",cond])
else:
raise Exception("Invalid agruments")
raw_records=_execute(sql_statement)
return raw_records.fetch[0][0]
class actual_table(object):
"""This is a class for defining actual database table as object
"""
def __init__(self,table_name):
"""This is a constructor method which takes table name as
argument and create table object from it
"""
self.table__name__=table_name
self.selected__columns__="*"
self.table__type__="actual"
all_cols=_execute("show columns from "+str(self.table__name__))
self.table__columns__=[]
for col_name in all_cols.fetch:
self.table__columns__.append( str(col_name[0]) )
keys=_execute("show index from "+str(self.table__name__)+" where Key_name='PRIMARY'").fetch
self.primary__keys__=[]
for key in keys:
self.primary__keys__.append( str(key[4]) )
def select(self, *columns, **kwargs ):
"""This is a method which is used to select several columns to be included
in SQL query, it accept a number of arguments which are column names passed
as strings, if you want to select all columns except few columns you can pass
all_except=['column1', 'column2', ...] as kwarg
"""
partial_tb=partial_table(self, *columns, **kwargs) ##Here we use partial_table because not all columns are going to be included in select statement
return partial_tb
def selectd(self, *columns, **kwargs):
"""This is a method which is used to select several distinct columns to be included
in SQL query, it accept a number of arguments which are column names passed
as strings, if you want to select all columns except few columns you can pass
all_except=['column1', 'column2', ...] as kwarg
"""
partial_tb=partial_table(self, *columns, **kwargs) ##Here we use partial_table because not all columns are going to be included in select statement
partial_tb.selected__columns__="distinct "+partial_tb.selected__columns__
return partial_tb
def max(self,column):
calc_table=computational_table(self,'max',column)
return calc_table
def min(self,column):
calc_table=computational_table(self,'min',column)
return calc_table
def sum(self,column):
calc_table=computational_table(self,'sum',column)
return calc_table
def avg(self,column):
calc_table=computational_table(self,'avg',column)
return calc_table
def count(self,column="*"):
calc_table=computational_table(self,'count',column)
return calc_table
def get(self, col='no_column'):
"""This is a method which returns all records from a database as
a custom tuple of objects of record when no argument is passed,
but it returns a tuple of values of a specified column passed
as a string argument(column name)
"""
raw_records=_execute("select " +self.selected__columns__+ " from "+str(self.table__name__))
if self.table__type__=="computation":
return raw_records.fetch[0][0]
if col != 'no_column':
return read_only_records(get_objects(raw_records.fetch, raw_records.columns ,self)).get(col)
return read_write_records(get_objects(raw_records.fetch, raw_records.columns,self))
def getd(self,col_name):
"""This is a method for extracting distinct values in a specified column,
it takes string argument as column name from which values are
suppossed to be extracted
"""
return tuple(set(self.get(col_name)))
def find(self,**pri_key_with_val):
"""This is a method for finding a single specific record by using it's
primary key(s), here the argument to this method is the dict which
contains primary key(s) and it's/their corresponding value(s), the
format of argument is { primary_key1: value1, primary_key2: value2, ...}
"""
primary_keys=pri_key_with_val.keys()
if set(pri_key_with_val) != set(self.primary__keys__) : #if user provide a non-primary key argument
raise Exception("You hava passed non-primary key argument(s)")
list_of_strings=[]
for key in pri_key_with_val:
if isinstance(pri_key_with_val[key],str):
list_of_strings.append( key+"='"+str(pri_key_with_val[key])+"'" )
else:
list_of_strings.append( key+"="+str(pri_key_with_val[key]) )
condition=" and ".join(list_of_strings)
record=self.where(condition)
if len(record)==0:
return None
else:
return record[0]
def where(self,*data):
"""This is a method which is used to query and return records from a
database as a custom tuple of objects of record, the criteria used
to query records is specified as argument(s), This method accept two
forms of arguments, the first form is three specified arguments which
form a query condition eg where("age", ">", 20),
and the second form is a single argument which specify a query condition
eg in the first example we could obtain the same result by using
where("age > 20")
"""
sql_statement=""
if len(data)==3:
col,expr,val=data[0],data[1],data[2]
if isinstance(val, list):
val=tuple(val)
if isinstance(val,str):
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ","'",val,"'"])
else:
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",str(col)," ",str(expr)," ",str(val)])
elif len(data)==1 :
cond=data[0]
sql_statement=''.join(["select " +self.selected__columns__+ " from ",str(self.table__name__)," where ",cond])
else:
raise Exception("Invalid agruments")
raw_records=_execute(sql_statement)
if self.table__type__=="computation":
return raw_records.fetch[0][0]
return read_write_records( get_objects(raw_records.fetch, raw_records.columns, self) )
def values_to_insert(self,data):
"""This is a method which format values as string to be used in insertion
query
"""
values="("
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value,str):
values=values+"'"+value+"',"
else :
values=values+str(value)+","
elif isinstance(data, tuple):
for value in data:
if isinstance(value,str):
values=values+"'"+value+"',"
else :
values=values+str(value)+","
values=values[:len(values)-1]+")"
return values
def insert(self,*values,**data):
"""This is a method which is used to insert records into a database table, with
specified arguments as columns and their corresponding values to insert
into a database, It generally returns a record which has been inserted
into your database
"""
if len(values)==0 and len(data) > 0:
sql_statement="insert into "+ str(self.table__name__) +" (" +", ".join(list(data.keys()))+ ") values "+self.values_to_insert(data)
elif len(data)==0 and len(values) == len(self.table__columns__):
sql_statement="insert into "+ str(self.table__name__) + " values "+self.values_to_insert(values)
elif len(data)==0 and len(values) == 1 and isinstance(values[0], dict):
sql_statement="insert into "+ str(self.table__name__) +" (" +", ".join(list(values[0].keys()))+ ") values "+self.values_to_insert(values[0])
else:
raise Exception("Invalid arguments to 'insert' function")
_execute(sql_statement)
def join(self,table2,join_type='inner'):
"""This is a method which is used in joining database tables with first
arguments as table name to join to, and second argument as join type
which is inner by default, it returns an instance of FlatTable,
where different operations can be done
"""
table1=self
table2=actual_table(table2)
whole_table=FlatTable(join_type,table1,table2)
return whole_table
class FlatTable(object):
"""This is a class which defines a table formed as a result of joining two
tables
"""
def __init__(self,join_type, table1, table2):
"""This is a constructor which initializes a table with important parameters
"""
self.table__name__=table1.table__name__+"_and_"+table2.table__name__
self.tables__=[table1.table__name__, table2.table__name__]
self.table__type__='partial'
self.join__type__=join_type
self.on__condition__=""
self.selected__columns__="*"
flat_table_columns=table1.table__columns__ + table2.table__columns__
flat_table_primary_keys=table1.primary__keys__ + table2.primary__keys__
duplicate_colums=[key for key in set(flat_table_columns) if flat_table_columns.count(key)>1] ##Identify columns with name collision and assign them full name ie table_name.column_name
for col in duplicate_colums:
if col in flat_table_primary_keys and flat_table_primary_keys.count(col)>1:
flat_table_primary_keys[flat_table_primary_keys.index(col)]=table1.table__name__+'.'+col
flat_table_primary_keys[flat_table_primary_keys.index(col)]=table1.table__name__+'.'+col
flat_table_columns[flat_table_columns.index(col)]=table1.table__name__+'.'+col
flat_table_columns[flat_table_columns.index(col)]=table2.table__name__+'.'+col
self.table__columns__=flat_table_columns
self.primary__keys__=flat_table_primary_keys
def on(self,*data):
"""This is a method which specify on condition to be used in joining
tables
"""
if len(data)==3:
col1, op, col2=data[0], data[0], data[0]
self.on__condition__=col1+ op + col2
elif len(data)==1:
self.on__condition__=data[0]
else:
raise Exception("Invalid arguments")
return self
def get(self, column=None):
"""This is a method which returns all records from a database as
a custom tuple of objects of record when no argument is passed,
but it returns a tuple of values of a specified column passed
as a string argument(column name)
"""
sql_statement="SELECT " +self.selected__columns__+ " FROM " +self.tables__[0]+ " " +self.join__type__+ " JOIN " +self.tables__[1]+" on "+self.on__condition__
raw_records=_execute(sql_statement)
if self.table__type__=="computation":
return raw_records.fetch[0][0]
elif column is not None:
rec_objects=get_objects(raw_records.fetch, raw_records.columns, self)
return tuple( [ getattr(record, column) for record in rec_objects ] )
return read_only_records(get_objects(raw_records.fetch, raw_records.columns, self))
def getd(self,col_name):
"""This is a method for extracting distinct values in a specified column,
it takes string argument as column name from which values are
suppossed to be extracted
"""
return tuple(set(self.get(col_name)))
def where(self, *data):
"""This is a method which is used to query and return records from a
database as a custom tuple of objects of record, the criteria used
to query records is specified as argument(s), This method accept two
forms of arguments, the first form is three specified arguments which
form a query condition eg where("age", ">", 20),
and the second form is a single argument which specify a query condition
eg in the first example we could obtain the same result by using
where("age > 20")
"""
sql_statement=""
if len(data)==3:
col,expr,val=data[0],data[1],data[2]
sql_statement="SELECT " +self.selected__columns__+ " FROM " +self.tables__[0]+ " " +self.join__type__+ " JOIN " +self.tables__[1]+" on "+self.on__condition__+ " where "+str(col)+" "+str(expr)+" "+str(val)
elif len(data)==1:
cond=data[0]
sql_statement="SELECT " +self.selected__columns__+ " FROM " +self.tables__[0]+ " " +self.join__type__+ " JOIN " +self.tables__[1]+" on "+self.on__condition__+" where "+cond
else:
raise Exception("Invalid agruments")
raw_records=_execute(sql_statement)
if self.table__type__=="computation":
return raw_records.fetch[0][0]
return read_only_records(get_objects(raw_records.fetch, raw_records.columns, self))
def select(self, *columns, **kwargs):
"""This is a method which is used to select several columns to be included
in SQL query, it accept a number of arguments which are column names passed
as strings, if you want to select all columns except few columns you can pass
all_except=['column1', 'column2', ...] as kwarg
"""
tb_copy=copy.deepcopy(self)
tb_copy.table__type__="partial"
columns_to_remove=[]
columns_arrangement=[]
calculated_columns=[]
if len(columns)==1 and isinstance( columns[0], (tuple,list) ) :
columns=tuple(columns[0])
if 'all_except' in kwargs.keys() and ( isinstance(kwargs['all_except'], tuple) or isinstance(kwargs['all_except'], list) ):
columns_to_remove=list(kwargs['all_except'])
del kwargs['all_except']
for col in columns_to_remove:
tb_copy.table__columns__.remove(col)
columns_arrangement=tb_copy.table__columns__+list(kwargs.keys())
calculated_columns=tb_copy.table__columns__
else:
columns_arrangement=list(columns)+list(kwargs.keys())
tb_copy.table__columns__=columns_arrangement
for column in kwargs:
calculated_columns.append( kwargs[column]+" as "+column )
temp_list=[", ".join(columns), ", ".join(calculated_columns)]
if temp_list[0]!="" and temp_list[1]!="" :
tb_copy.selected__columns__=", ".join(temp_list)
elif temp_list[0]!="" and temp_list[1]=="" :
tb_copy.selected__columns__=temp_list[0]
elif temp_list[0]=="" and temp_list[1]!="" :
tb_copy.selected__columns__=temp_list[1]
else:
raise Exception("Invalid arguments")
return tb_copy
def selectd(self, *colums, **kwargs):
"""This is a method which is used to select several distinct columns to be included
in SQL query, it accept a number of arguments which are column names passed
as strings, if you want to select all columns except few columns you can pass
all_except=['column1', 'column2', ...] as kwarg
"""
partial_tb=select(*colums,**kwargs) ##Here we use partial_table because not all columns are going to be included in select statement
partial_tb.selected__columns__="distinct "+partial_tb.selected__columns__
return partial_tb
def max(self,column):
tb_copy=copy.deepcopy(self)
tb_copy.selected__columns__='max('+column+')'
tb_copy.table__type__="computation"
return tb_copy
def min(self,column):
tb_copy=copy.deepcopy(self)
tb_copy.selected__columns__='min('+column+')'
tb_copy.table__type__="computation"
return tb_copy
def sum(self,column):
tb_copy=copy.deepcopy(self)
tb_copy.selected__columns__='sum('+column+')'
tb_copy.table__type__="computation"
return tb_copy
def avg(self,column):
tb_copy=copy.deepcopy(self)
tb_copy.selected__columns__='avg('+column+')'
tb_copy.table__type__="computation"
return tb_copy
def count(self,column="*"):
tb_copy=copy.deepcopy(self)
tb_copy.selected__columns__='count('+column+')'
tb_copy.table__type__="computation"
return tb_copy
def find(self, **pri_key_with_val):
"""This is a method for finding a single specific record by using it's
primary key(s), here the argument to this method is the dict which
contains primary key(s) and it's/their corresponding value(s), the
format of argument is { primary_key1: value1, primary_key2: value2, ...}
"""
if len(pri_key_with_val)==0:
raise Exception("Invalid arguments, Please pass primary key(s) for finding your record")
primary_keys=pri_key_with_val.keys()
if set(pri_key_with_val) != set(self.primary__keys__) : #if user provide a non-primary key argument
raise Exception("You hava passed non-primary key argument(s)")
list_of_strings=[]
for key in pri_key_with_val:
if isinstance(pri_key_with_val[key],str):
list_of_strings.append( key+"='"+str(pri_key_with_val[key])+"'" )
else:
list_of_strings.append( key+"="+str(pri_key_with_val[key]) )
condition=" and ".join(list_of_strings)
record=self.where(condition)
if len(record)==0:
return None
else:
return record[0]
class Record(object):
"""This is a class for defining records as objects,
It generally produce objects which corresponds to
records extracted from a certain database table
"""
def __init__(self, table):
"""This is a constructor which initializes record object with import parameters
from table object which is passed as the argument to it
"""
self.table__name__=table.table__name__
self.table__columns__=table.table__columns__
self.primary__keys__=table.primary__keys__
def get_query_values(self, data):
"""This method format string to be used as condition in finding a record
during record deletion and update, it generally return a formated string
with values to be inserted in a db table
"""
list_of_strings=[]
for column in data:
val=getattr( self, str(column) )
if isinstance(val,str):
list_of_strings.append( column+"='"+str(val)+"'" )
else:
list_of_strings.append( column+"="+str(val) )
formated_str=" and ".join( list_of_strings )
return formated_str
def update(self, *update_values, **data):
"""This is the actual method for updating a specific record in a database
with arguments as column names and their corresponding values for the
record
"""
if len(update_values)==0 and len(data)>0:
values=get_query_condition(data)
condition=self.get_query_values(self.primary__keys__)
sql_statement="update "+ str(self.table__name__)+" set "+values+" where "+ condition
elif len(update_values)==1 and isinstance(update_values[0], dict) and len(data)==0:
values=get_query_condition(update_values[0])
condition=self.get_query_values(self.primary__keys__)
sql_statement="update "+ str(self.table__name__)+" set "+values+" where "+ condition
_execute(sql_statement)
def delete(self):
"""This is the actual method for deleting a specific record
in a database
"""
condition=self.get_query_values(self.primary__keys__)
sql_statement="delete from "+ str(self.table__name__)+" where "+ condition
_execute(sql_statement)
class read_only_records(tuple):
"""This is a class for converting a normal tuple into a custom tuple
which has some import methods like count, get etc, for record
manipulations
"""
def count(self):
"""This is a method for counting records
"""
return len(self)
def get_column_values(self,col_name):
"""This returns all values in a given column
"""
for record in self:
yield getattr(record, col_name)
def get(self,col_name):
"""This is a method for extracting values in a specified column,
it takes string argument as column name from which values are
suppossed to be extracted
"""
col_vals=tuple(self.get_column_values(col_name))
return col_vals
def getd(self,col_name):
"""This is a method for extracting distinct values in a specified column,
it takes string argument as column name from which values are
suppossed to be extracted
"""
return tuple(set(self.get(col_name)))
def ensure_one(self):
"""This is a method for ensuring that only one record is returned and not
a tuple or custom_tuple of records
"""
if len(self)==1:
return self[0]
else:
raise Exception("There is more than one records")
class read_write_records(read_only_records):
"""This is a class for converting a normal tuple into a custom tuple
which has some import methods like update, delete etc, for record
manipulations
"""
def update(self, *update_values, **data):
"""This is a method helper for updating a group of specific records
in a database with arguments as column names and their corresponding
values for the record
"""
for record in self:
record.update(*update_values, **data)
def delete(self):
"""This is a method helper for deleting a group of specific records in a
database
"""
for record in self:
record.delete()
| gpl-3.0 | 465,960,330,465,662,300 | 44.004515 | 216 | 0.610222 | false |
lancezlin/ml_template_py | lib/python2.7/site-packages/nbconvert/preprocessors/highlightmagics.py | 9 | 3251 | """This preprocessor detect cells using a different language through
magic extensions such as `%%R` or `%%octave`. Cell's metadata is marked
so that the appropriate highlighter can be used in the `highlight`
filter.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function, absolute_import
import re
from .base import Preprocessor
from traitlets import Dict
class HighlightMagicsPreprocessor(Preprocessor):
"""
Detects and tags code cells that use a different languages than Python.
"""
# list of magic language extensions and their associated pygment lexers
default_languages = Dict({
'%%R': 'r',
'%%bash': 'bash',
'%%cython': 'cython',
'%%javascript': 'javascript',
'%%julia': 'julia',
'%%latex': 'latex',
'%%octave': 'octave',
'%%perl': 'perl',
'%%ruby': 'ruby',
'%%sh': 'sh',
})
# user defined language extensions
languages = Dict(
help=("Syntax highlighting for magic's extension languages. "
"Each item associates a language magic extension such as %%R, "
"with a pygments lexer such as r.")
).tag(config=True)
def __init__(self, config=None, **kw):
"""Public constructor"""
super(HighlightMagicsPreprocessor, self).__init__(config=config, **kw)
# Update the default languages dict with the user configured ones
self.default_languages.update(self.languages)
# build a regular expression to catch language extensions and choose
# an adequate pygments lexer
any_language = "|".join(self.default_languages.keys())
self.re_magic_language = re.compile(
r'^\s*({0})\s+'.format(any_language))
def which_magic_language(self, source):
"""
When a cell uses another language through a magic extension,
the other language is returned.
If no language magic is detected, this function returns None.
Parameters
----------
source: str
Source code of the cell to highlight
"""
m = self.re_magic_language.match(source)
if m:
# By construction of the re, the matched language must be in the
# languages dictionary
return self.default_languages[m.group(1)]
else:
return None
def preprocess_cell(self, cell, resources, cell_index):
"""
Tags cells using a magic extension language
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
# Only tag code cells
if cell.cell_type == "code":
magic_language = self.which_magic_language(cell.source)
if magic_language:
cell['metadata']['magics_language'] = magic_language
return cell, resources
| mit | -8,252,621,789,074,037,000 | 31.838384 | 78 | 0.604429 | false |
piffey/ansible | lib/ansible/modules/network/cloudengine/ce_netstream_aging.py | 43 | 18023 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_netstream_aging
version_added: "2.4"
short_description: Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
description:
- Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@CloudEngine-Ansible)
options:
timeout_interval:
description:
- Netstream timeout interval.
If is active type the interval is 1-60.
If is inactive ,the interval is 5-600.
default: 30
type:
description:
- Specifies the packet type of netstream timeout active interval.
choices: ['ip', 'vxlan']
state:
description:
- Specify desired state of the resource.
choices: ['present', 'absent']
default: present
timeout_type:
description:
- Netstream timeout type.
choices: ['active', 'inactive', 'tcp-session', 'manual']
manual_slot:
description:
- Specifies the slot number of netstream manual timeout.
"""
EXAMPLES = '''
- name: netstream aging module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure netstream ip timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: ip
timeout_type: active
state: present
provider: "{{ cli }}"
- name: Configure netstream vxlan timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: vxlan
timeout_type: active
active_state: present
provider: "{{ cli }}"
- name: Delete netstream ip timeout active interval , set the ip timeout interval to 30 minutes.
ce_netstream_aging:
type: ip
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Delete netstream vxlan timeout active interval , set the vxlan timeout interval to 30 minutes.
ce_netstream_aging:
type: vxlan
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Enable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Enable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Disable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
- name: Disable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"timeout_interval": "40",
"type": "ip",
"state": "absent",
"timeout_type": active}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": "40",
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["undo netstream timeout ip active 40"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class NetStreamAging(object):
"""
Manages netstream aging.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.timeout_interval = self.module.params['timeout_interval']
self.type = self.module.params['type']
self.state = self.module.params['state']
self.timeout_type = self.module.params['timeout_type']
self.manual_slot = self.module.params['manual_slot']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# local parameters
self.existing["active_timeout"] = list()
self.existing["inactive_timeout"] = list()
self.existing["tcp_timeout"] = list()
self.end_state["active_timeout"] = list()
self.end_state["inactive_timeout"] = list()
self.end_state["tcp_timeout"] = list()
self.active_changed = False
self.inactive_changed = False
self.tcp_changed = False
def init_module(self):
"""init module"""
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd)
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd)
def get_exist_timer_out_para(self):
"""Get exist netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.existing["active_timeout"].append(active_tmp)
self.existing["inactive_timeout"].append(inactive_tmp)
self.existing["tcp_timeout"].append(tcp_tmp)
def get_end_timer_out_para(self):
"""Get end netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
exp = "| ignore-case include evpn-overlay enable"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.end_state["active_timeout"].append(active_tmp)
self.end_state["inactive_timeout"].append(inactive_tmp)
self.end_state["tcp_timeout"].append(tcp_tmp)
def check_params(self):
"""Check all input params"""
# interval check
if not str(self.timeout_interval).isdigit():
self.module.fail_json(
msg='Error: Timeout interval should be numerical.')
if self.timeout_type == "active":
if int(self.timeout_interval) < 1 or int(self.timeout_interval) > 60:
self.module.fail_json(
msg="Error: Active interval should between 1 - 60 minutes.")
if self.timeout_type == "inactive":
if int(self.timeout_interval) < 5 or int(self.timeout_interval) > 600:
self.module.fail_json(
msg="Error: Inactive interval should between 5 - 600 seconds.")
if self.timeout_type == "manual":
if not self.manual_slot:
self.module.fail_json(
msg="Error: If use manual timeout mode,slot number is needed.")
if not str(self.manual_slot).isdigit():
self.module.fail_json(
msg='Error: Slot number should be numerical.')
def get_proposed(self):
"""get proposed info"""
if self.timeout_interval:
self.proposed["timeout_interval"] = self.timeout_interval
if self.timeout_type:
self.proposed["timeout_type"] = self.timeout_type
if self.type:
self.proposed["type"] = self.type
if self.state:
self.proposed["state"] = self.state
if self.manual_slot:
self.proposed["manual_slot"] = self.manual_slot
def get_existing(self):
"""get existing info"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
self.get_exist_timer_out_para()
if self.timeout_type == "active":
for active_tmp in self.existing["active_timeout"]:
if self.state == "present":
if str(active_tmp[self.type]) != self.timeout_interval:
self.active_changed = True
else:
if self.timeout_interval != "30":
if str(active_tmp[self.type]) != "30":
if str(active_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified active interval do not exist.')
if str(active_tmp[self.type]) != "30":
self.timeout_interval = active_tmp[self.type]
self.active_changed = True
if self.timeout_type == "inactive":
for inactive_tmp in self.existing["inactive_timeout"]:
if self.state == "present":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.inactive_changed = True
else:
if self.timeout_interval != "30":
if str(inactive_tmp[self.type]) != "30":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified inactive interval do not exist.')
if str(inactive_tmp[self.type]) != "30":
self.timeout_interval = inactive_tmp[self.type]
self.inactive_changed = True
if self.timeout_type == "tcp-session":
for tcp_tmp in self.existing["tcp_timeout"]:
if str(tcp_tmp[self.type]) != self.state:
self.tcp_changed = True
def operate_time_out(self):
"""configure timeout parameters"""
cmd = ""
if self.timeout_type == "manual":
if self.type == "ip":
self.cli_add_command("quit")
cmd = "reset netstream cache ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
elif self.type == "vxlan":
self.cli_add_command("quit")
cmd = "reset netstream cache vxlan inner-ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
if not self.active_changed and not self.inactive_changed and not self.tcp_changed:
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
return
if self.active_changed or self.inactive_changed:
if self.type == "ip":
cmd = "netstream timeout ip %s %s" % (self.timeout_type, self.timeout_interval)
elif self.type == "vxlan":
cmd = "netstream timeout vxlan inner-ip %s %s" % (self.timeout_type, self.timeout_interval)
if self.state == "absent":
self.cli_add_command(cmd, undo=True)
else:
self.cli_add_command(cmd)
if self.timeout_type == "tcp-session" and self.tcp_changed:
if self.type == "ip":
if self.state == "present":
cmd = "netstream timeout ip tcp-session"
else:
cmd = "undo netstream timeout ip tcp-session"
elif self.type == "vxlan":
if self.state == "present":
cmd = "netstream timeout vxlan inner-ip tcp-session"
else:
cmd = "undo netstream timeout vxlan inner-ip tcp-session"
self.cli_add_command(cmd)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_end_state(self):
"""get end state info"""
self.get_end_timer_out_para()
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_time_out()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
timeout_interval=dict(required=False, type='str', default='30'),
type=dict(required=False, choices=['ip', 'vxlan']),
state=dict(required=False, choices=['present', 'absent'], default='present'),
timeout_type=dict(required=False, choices=['active', 'inactive', 'tcp-session', 'manual']),
manual_slot=dict(required=False, type='str'),
)
argument_spec.update(ce_argument_spec)
module = NetStreamAging(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,614,553,412,140,755,500 | 34.339216 | 107 | 0.543972 | false |
ccastell/Transfer-System | Website/env/lib/python3.5/site-packages/django/apps/registry.py | 51 | 17239 | import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils import lru_cache
from .config import AppConfig
class Apps(object):
"""
A registry that stores the configuration of installed applications.
It also keeps track of models eg. to provide reverse-relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# Lock for thread-safe population.
self._lock = threading.Lock()
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Loads application configurations and models.
This method imports each application module and then each model module.
It is thread safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# app_config should be pristine, otherwise the code below won't
# guarantee that the order matches the order in INSTALLED_APPS.
if self.app_configs:
raise RuntimeError("populate() isn't reentrant")
# Phase 1: initialize app configs and import app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
app_config.apps = self
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Phase 2: import models modules.
for app_config in self.app_configs.values():
app_config.import_models()
self.clear_cache()
self.models_ready = True
# Phase 3: run ready() methods of app configs.
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
def check_apps_ready(self):
"""
Raises an exception if all apps haven't been imported yet.
"""
if not self.apps_ready:
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""
Raises an exception if all models haven't been imported yet.
"""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""
Imports applications and returns an iterable of app configs.
"""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Imports applications and returns an app config for the given label.
Raises LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@lru_cache.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Returns a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(list(app_config.get_models(include_auto_created, include_swapped)))
return result
def get_model(self, app_label, model_name=None, require_ready=True):
"""
Returns the model matching the given app_label and model_name.
As a shortcut, this function also accepts a single argument in the
form <app_label>.<model_name>.
model_name is case-insensitive.
Raises LookupError if no application exists with this label, or no
model exists with this name in the application. Raises ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
if require_ready:
self.check_models_ready()
else:
self.check_apps_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
app_config = self.get_app_config(app_label)
if not require_ready and app_config.models is None:
app_config.import_models()
return app_config.get_model(model_name, require_ready=require_ready)
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (app_label, model_name),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Checks whether an application with this name exists in the registry.
app_name is the full name of the app eg. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Returns the app config for the inner application in case of nesting.
Returns None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
@lru_cache.lru_cache(maxsize=None)
def get_swappable_settings_name(self, to_string):
"""
For a given model string (e.g. "auth.User"), return the name of the
corresponding settings name if it refers to a swappable model. If the
referred model is not swappable, return None.
This method is decorated with lru_cache because it's performance
critical when it comes to migrations. Since the swappable settings don't
change after Django has loaded the settings, there is no reason to get
the respective settings attribute over and over again.
"""
for model in self.get_models(include_swapped=True):
swapped = model._meta.swapped
# Is this model swapped out for the model given by to_string?
if swapped and swapped == to_string:
return model._meta.swappable
# Is this model swappable and the one given by to_string?
if model._meta.swappable and model._meta.label == to_string:
return model._meta.swappable
return None
def set_available_apps(self, available):
"""
Restricts the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe is the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = set(app_config.name for app_config in self.get_app_configs())
if not available.issubset(installed):
raise ValueError(
"Available apps isn't a subset of installed apps, extra apps: %s"
% ", ".join(available - installed)
)
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""
Cancels a previous call to set_available_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enables a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (eg. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""
Cancels a previous call to set_installed_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clears all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# Base case: no arguments, just execute the function.
if not model_keys:
function()
# Recursive case: take the head of model_keys, wait for the
# corresponding model class to be imported and registered, then apply
# that argument to the supplied function. Pass the resulting partial
# to lazy_model_operation() along with the remaining model args and
# repeat until all models are loaded and all arguments are applied.
else:
next_model, more_models = model_keys[0], model_keys[1:]
# This will be executed after the class corresponding to next_model
# has been imported and registered. The `func` attribute provides
# duck-type compatibility with partials.
def apply_next_model(model):
next_function = partial(apply_next_model.func, model)
self.lazy_model_operation(next_function, *more_models)
apply_next_model.func = function
# If the model has already been imported and registered, partially
# apply it to the function now. If not, add it to the list of
# pending operations for the model, where it will be executed with
# the model class as its sole argument once the model is ready.
try:
model_class = self.get_registered_model(*next_model)
except LookupError:
self._pending_operations[next_model].append(apply_next_model)
else:
apply_next_model(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of `Apps.register_model()`.
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
apps = Apps(installed_apps=None)
| apache-2.0 | 6,496,031,672,801,730,000 | 39.562353 | 93 | 0.614711 | false |
BurtBiel/azure-cli | src/command_modules/azure-cli-keyvault/azure/cli/command_modules/keyvault/custom.py | 1 | 9777 | #---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.keyvault.models import (VaultCreateOrUpdateParameters,
VaultProperties,
AccessPolicyEntry,
Permissions,
Sku,
SkuName)
from azure.graphrbac import GraphRbacManagementClient
from azure.cli.core._util import CLIError
import azure.cli.core._logging as _logging
logger = _logging.get_az_logger(__name__)
def list_keyvault(client, resource_group_name=None):
''' List Vaults. '''
vault_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(vault_list)
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.objects.get_current_user()
if current_user and current_user.object_id: #pylint:disable=no-member
return current_user.object_id #pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "\
"You can avoid this by specifying object id.", spn)
return
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "\
"You can avoid this by specifying object id.", upn)
return
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
elif subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
else:
logger.warning("Unknown user type '%s'", subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '\
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def create_keyvault(client, resource_group_name, vault_name, location, #pylint:disable=too-many-arguments
sku=SkuName.standard.value,
enabled_for_deployment=None,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=False,
tags=None):
from azure.cli.core._profile import Profile
profile = Profile()
cred, _, tenant_id = profile.get_login_credentials(for_graph_client=True)
graph_client = GraphRbacManagementClient(cred, tenant_id)
subscription = profile.get_subscription()
if no_self_perms:
access_policies = []
else:
# TODO Use the enums instead of strings when new keyvault SDK is released
# https://github.com/Azure/azure-sdk-for-python/blob/dev/azure-mgmt-keyvault/
# azure/mgmt/keyvault/models/key_vault_management_client_enums.py
permissions = Permissions(keys=['get',
'create',
'delete',
'list',
'update',
'import',
'backup',
'restore'],
secrets=['all'])
object_id = _get_current_user_object_id(graph_client)
if not object_id:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '\
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'\
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=Sku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
create_keyvault.__doc__ = VaultProperties.__doc__
def _object_id_args_helper(object_id, spn, upn):
if not object_id:
from azure.cli.core._profile import Profile
profile = Profile()
cred, _, tenant_id = profile.get_login_credentials(for_graph_client=True)
graph_client = GraphRbacManagementClient(cred, tenant_id)
object_id = _get_object_id(graph_client, spn=spn, upn=upn)
if not object_id:
raise CLIError('Unable to get object id from principal name.')
return object_id
def set_policy(client, resource_group_name, vault_name, #pylint:disable=too-many-arguments
object_id=None, spn=None, upn=None, perms_to_keys=None, perms_to_secrets=None):
object_id = _object_id_args_helper(object_id, spn, upn)
vault = client.get(resource_group_name=resource_group_name,
vault_name=vault_name)
# Find the existing policy to set
policy = next((p for p in vault.properties.access_policies \
if object_id.lower() == p.object_id.lower() and \
vault.properties.tenant_id.lower() == p.tenant_id.lower()), None)
if not policy:
# Add new policy as none found
vault.properties.access_policies.append(AccessPolicyEntry(
tenant_id=vault.properties.tenant_id,
object_id=object_id,
permissions=Permissions(keys=perms_to_keys,
secrets=perms_to_secrets)))
else:
# Modify existing policy.
# If perms_to_keys is not set, use prev. value (similarly with perms_to_secrets).
keys = policy.permissions.keys if perms_to_keys is None else perms_to_keys
secrets = policy.permissions.secrets if perms_to_secrets is None else perms_to_secrets
policy.permissions = Permissions(keys=keys, secrets=secrets)
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=VaultCreateOrUpdateParameters(
location=vault.location,
tags=vault.tags,
properties=vault.properties))
def delete_policy(client, resource_group_name, vault_name, object_id=None, spn=None, upn=None): #pylint:disable=too-many-arguments
object_id = _object_id_args_helper(object_id, spn, upn)
vault = client.get(resource_group_name=resource_group_name,
vault_name=vault_name)
prev_policies_len = len(vault.properties.access_policies)
vault.properties.access_policies = [p for p in vault.properties.access_policies if \
vault.properties.tenant_id.lower() != p.tenant_id.lower() \
or object_id.lower() != p.object_id.lower()]
if len(vault.properties.access_policies) == prev_policies_len:
raise CLIError('No matching policies found')
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=VaultCreateOrUpdateParameters(
location=vault.location,
tags=vault.tags,
properties=vault.properties))
| mit | 588,874,103,861,050,900 | 51.848649 | 130 | 0.563056 | false |
gaolichuang/py-essential | tests/test_imageutils.py | 2 | 7736 | # Copyright (C) 2012 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from essential import imageutils
from essential import test
load_tests = testscenarios.load_tests_apply_scenarios
class ImageUtilsRawTestCase(test.BaseTestCase):
_image_name = [
('disk_config', dict(image_name='disk.config')),
]
_file_format = [
('raw', dict(file_format='raw')),
]
_virtual_size = [
('64M', dict(virtual_size='64M',
exp_virtual_size=67108864)),
('64M_with_byte_hint', dict(virtual_size='64M (67108844 bytes)',
exp_virtual_size=67108844)),
('64M_byte', dict(virtual_size='67108844',
exp_virtual_size=67108844)),
('4.4M', dict(virtual_size='4.4M',
exp_virtual_size=4613735)),
('4.4M_with_byte_hint', dict(virtual_size='4.4M (4592640 bytes)',
exp_virtual_size=4592640)),
('2K', dict(virtual_size='2K',
exp_virtual_size=2048)),
('2K_with_byte_hint', dict(virtual_size='2K (2048 bytes)',
exp_virtual_size=2048)),
]
_disk_size = [
('96K', dict(disk_size='96K',
exp_disk_size=98304)),
('96K_byte', dict(disk_size='963434',
exp_disk_size=963434)),
('3.1M', dict(disk_size='3.1G',
exp_disk_size=3328599655)),
]
_garbage_before_snapshot = [
('no_garbage', dict(garbage_before_snapshot=None)),
('garbage_before_snapshot_list', dict(garbage_before_snapshot=False)),
('garbage_after_snapshot_list', dict(garbage_before_snapshot=True)),
]
_snapshot_count = [
('no_snapshots', dict(snapshot_count=None)),
('one_snapshots', dict(snapshot_count=1)),
('three_snapshots', dict(snapshot_count=3)),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls._image_name,
cls._file_format,
cls._virtual_size,
cls._disk_size,
cls._garbage_before_snapshot,
cls._snapshot_count)
def _initialize_img_info(self):
return ('image: %s' % self.image_name,
'file_format: %s' % self.file_format,
'virtual_size: %s' % self.virtual_size,
'disk_size: %s' % self.disk_size)
def _insert_snapshots(self, img_info):
img_info = img_info + ('Snapshot list:',)
img_info = img_info + ('ID '
'TAG '
'VM SIZE '
'DATE '
'VM CLOCK',)
for i in range(self.snapshot_count):
img_info = img_info + ('%d '
'd9a9784a500742a7bb95627bb3aace38 '
'0 2012-08-20 10:52:46 '
'00:00:00.000' % (i + 1),)
return img_info
def _base_validation(self, image_info):
self.assertEqual(image_info.image, self.image_name)
self.assertEqual(image_info.file_format, self.file_format)
self.assertEqual(image_info.virtual_size, self.exp_virtual_size)
self.assertEqual(image_info.disk_size, self.exp_disk_size)
if self.snapshot_count is not None:
self.assertEqual(len(image_info.snapshots), self.snapshot_count)
def test_qemu_img_info(self):
img_info = self._initialize_img_info()
if self.garbage_before_snapshot is True:
img_info = img_info + ('blah BLAH: bb',)
if self.snapshot_count is not None:
img_info = self._insert_snapshots(img_info)
if self.garbage_before_snapshot is False:
img_info = img_info + ('junk stuff: bbb',)
example_output = '\n'.join(img_info)
image_info = imageutils.QemuImgInfo(example_output)
self._base_validation(image_info)
ImageUtilsRawTestCase.generate_scenarios()
class ImageUtilsQemuTestCase(ImageUtilsRawTestCase):
_file_format = [
('qcow2', dict(file_format='qcow2')),
]
_qcow2_cluster_size = [
('65536', dict(cluster_size='65536', exp_cluster_size=65536)),
]
_qcow2_encrypted = [
('no_encryption', dict(encrypted=None)),
('encrypted', dict(encrypted='yes')),
]
_qcow2_backing_file = [
('no_backing_file', dict(backing_file=None)),
('backing_file_path',
dict(backing_file='/var/lib/nova/a328c7998805951a_2',
exp_backing_file='/var/lib/nova/a328c7998805951a_2')),
('backing_file_path_with_actual_path',
dict(backing_file='/var/lib/nova/a328c7998805951a_2 '
'(actual path: /b/3a988059e51a_2)',
exp_backing_file='/b/3a988059e51a_2')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls._image_name,
cls._file_format,
cls._virtual_size,
cls._disk_size,
cls._garbage_before_snapshot,
cls._snapshot_count,
cls._qcow2_cluster_size,
cls._qcow2_encrypted,
cls._qcow2_backing_file)
def test_qemu_img_info(self):
img_info = self._initialize_img_info()
img_info = img_info + ('cluster_size: %s' % self.cluster_size,)
if self.backing_file is not None:
img_info = img_info + ('backing file: %s' %
self.backing_file,)
if self.encrypted is not None:
img_info = img_info + ('encrypted: %s' % self.encrypted,)
if self.garbage_before_snapshot is True:
img_info = img_info + ('blah BLAH: bb',)
if self.snapshot_count is not None:
img_info = self._insert_snapshots(img_info)
if self.garbage_before_snapshot is False:
img_info = img_info + ('junk stuff: bbb',)
example_output = '\n'.join(img_info)
image_info = imageutils.QemuImgInfo(example_output)
self._base_validation(image_info)
self.assertEqual(image_info.cluster_size, self.exp_cluster_size)
if self.backing_file is not None:
self.assertEqual(image_info.backing_file,
self.exp_backing_file)
if self.encrypted is not None:
self.assertEqual(image_info.encrypted, self.encrypted)
ImageUtilsQemuTestCase.generate_scenarios()
class ImageUtilsBlankTestCase(test.BaseTestCase):
def test_qemu_img_info_blank(self):
example_output = '\n'.join(['image: None', 'file_format: None',
'virtual_size: None', 'disk_size: None',
'cluster_size: None',
'backing_file: None'])
image_info = imageutils.QemuImgInfo()
self.assertEqual(str(image_info), example_output)
self.assertEqual(len(image_info.snapshots), 0)
| apache-2.0 | 7,356,216,962,662,390,000 | 37.874372 | 78 | 0.558816 | false |
hyiltiz/labyrinth | labyrinth_lib/Browser.py | 2 | 13346 | # Browser.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <[email protected]>
# - Andreas Sliwka <[email protected]>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
# Standard library
import tarfile
import os
import gettext
_ = gettext.gettext
# Gtk stuff
import gtk
if os.name != 'nt':
import gconf
import gtk.glade
import pango
import gobject
# Local imports
import utils
import MainWindow
from MapList import MapList
import TrayIcon
from . import __version__
AUTHORS = ['Don Scorgie <[email protected]>',
'Martin Schaaf <[email protected]>',
'Matthias Vogelgesang <[email protected]>',
'Andreas Sliwka <[email protected]>']
class Browser (gtk.Window):
COL_ID = 0
COL_TITLE = 1
COL_MODTIME = 2
def __init__(self, start_hidden, tray_icon):
super(Browser, self).__init__()
self.glade=gtk.glade.XML(utils.get_data_file_name('labyrinth.glade'))
self.view = self.glade.get_widget ('MainView')
self.populate_view ()
self.view.connect ('row-activated', self.open_row_cb)
self.view.connect ('cursor-changed', self.cursor_change_cb)
self.view_dependants = []
self.open_button = self.glade.get_widget('OpenButton')
self.delete_button = self.glade.get_widget('DeleteButton')
self.open_menu = self.glade.get_widget('open1')
self.delete_menu = self.glade.get_widget('delete1')
self.view_dependants.append (self.open_button)
self.view_dependants.append (self.delete_button)
self.view_dependants.append (self.open_menu)
self.view_dependants.append (self.delete_menu)
self.open_button.connect ('clicked', self.open_clicked)
self.glade.get_widget('NewButton').connect ('clicked', self.new_clicked)
self.delete_button.connect ('clicked', self.delete_clicked)
self.open_menu.connect ('activate', self.open_clicked)
self.glade.get_widget('new1').connect ('activate', self.new_clicked)
self.delete_menu.connect ('activate', self.delete_clicked)
self.glade.get_widget('import1').connect ('activate', self.import_clicked)
self.glade.get_widget('quit1').connect ('activate', self.quit_clicked)
self.glade.get_widget('about1').connect ('activate', self.about_clicked)
self.glade.get_widget('showhelp').connect ('activate', self.show_help_clicked)
for x in self.view_dependants:
x.set_sensitive(False)
self.main_window = self.glade.get_widget ('MapBrowser')
# set remembered size
if os.name != 'nt':
self.config_client = gconf.client_get_default()
self.config_client.add_dir ("/apps/labyrinth", gconf.CLIENT_PRELOAD_NONE)
width = self.config_client.get_int ('/apps/labyrinth/width')
height = self.config_client.get_int ('/apps/labyrinth/height')
utils.use_bezier_curves = self.config_client.get_bool ('/apps/labyrinth/curves')
if width == 0 or height == 0:
width = 400
height = 300
else:
width = 400
height = 300
view_sortable = self.view.get_model ()
view_sortable.connect ('sort-column-changed', self.sort_column_changed_cb)
if os.name != 'nt':
sort_order = self.config_client.get_int('/apps/labyrinth/map_sort_order')
column_id = self.config_client.get_int('/apps/labyrinth/map_sort_order_column')
view_sortable.set_sort_column_id (column_id, sort_order)
self.main_window.resize (width, height)
if os.name != 'nt':
try:
self.main_window.set_icon_name ('labyrinth')
except:
self.main_window.set_icon_from_file(utils.get_data_file_name('labyrinth.svg'))
else:
self.main_window.set_icon_from_file(utils.get_data_file_name('labyrinth-32.png'))
if tray_icon:
self.main_window.connect ('delete_event', self.toggle_main_window, None)
traymenu = gtk.Menu()
quit_item = gtk.MenuItem("Quit")
quit_item.connect("activate",self.quit_clicked)
traymenu.add(quit_item)
traymenu.show_all()
self.traymenu = traymenu
self.trayicon = TrayIcon.TrayIcon(
icon_name="labyrinth",
menu=traymenu,
activate=self.toggle_main_window)
else:
self.main_window.connect('delete_event', self.quit_clicked, None)
if start_hidden:
self.main_window.hide ()
else:
self.main_window.show_all ()
def toggle_main_window(self,*args):
if self.main_window.get_property("visible"):
self.main_window.hide()
else:
self.main_window.show()
return True
def map_title_cb (self, mobj, new_title, mobj1):
map = MapList.get_by_window(mobj)
if not map:
raise AttributeError ("What a mess, can't find the map")
map.title = new_title
def get_selected_map(self):
sel = self.view.get_selection ()
(model, it) = sel.get_selected ()
if it:
(num,) = MapList.tree_view_model.get (it, self.COL_ID)
return MapList.get_by_index(num)
return None
def cursor_change_cb (self, treeview):
sensitive = bool(self.get_selected_map())
for x in self.view_dependants:
x.set_sensitive(sensitive)
def open_map_filename (self, fname):
win = MainWindow.LabyrinthWindow (fname)
win.show ()
def open_map (self, map, imported=False):
win = MainWindow.LabyrinthWindow (map.filename, imported)
win.connect ("title-changed", self.map_title_cb)
win.connect ("window_closed", self.remove_map_cb)
win.connect ("file_saved", self.file_save_cb)
win.show ()
map.window = win
return (MapList.index(map), win)
def open_selected_map(self):
map = self.get_selected_map()
if map is None:
raise ValueError("you clicked the 'open' button but had no map selected")
if map.window:
print "Window for map '%s' is already open" % map.title
# may be the window should be raised?
else:
self.open_map (map)
def show_help_clicked(self, arg):
try:
gtk.show_uri(None, 'help:labyrinth', 0)
except gobject.GError, e:
print _('Unable to display help: %s') % str(e)
def about_clicked (self, arg):
about_dialog = gtk.AboutDialog ()
about_dialog.set_name ("Labyrinth")
about_dialog.set_version (__version__)
if os.name != 'nt':
try:
about_dialog.set_logo_icon_name("labyrinth")
except:
pass
else:
about_dialog.set_logo (gtk.gdk.pixbuf_new_from_file(
utils.get_data_file_name("labyrinth-32.png")))
about_dialog.set_license (
"Labyrinth is free software; you can redistribute it and/or modify "
"it under the terms of the GNU General Public Licence as published by "
"the Free Software Foundation; either version 2 of the Licence, or "
"(at your option) any later version."
"\n\n"
"Labyrinth is distributed in the hope that it will be useful, "
"but WITHOUT ANY WARRANTY; without even the implied warranty of "
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the "
"GNU General Public Licence for more details."
"\n\n"
"You should have received a copy of the GNU General Public Licence "
"along with Labyrinth; if not, write to the Free Software Foundation, Inc., "
"59 Temple Place, Suite 330, Boston, MA 02111-1307 USA")
about_dialog.set_wrap_license (True)
about_dialog.set_copyright ("2006-2008 Don Scorgie et. al")
about_dialog.set_authors (AUTHORS)
about_dialog.set_website ("http://code.google.com/p/labyrinth")
about_dialog.set_translator_credits (_("Translation by Don Scorgie"))
about_dialog.run ()
about_dialog.hide ()
del (about_dialog)
return
def open_clicked (self, button):
self.open_selected_map()
def open_row_cb (self, view, path, col):
self.open_selected_map ()
def new_clicked (self, button):
map = MapList.create_empty_map()
self.open_map(map)
def delete_clicked (self, button):
map = self.get_selected_map ()
if not map:
raise ValueError("You clicked on delete but had no map selected")
error_message = ""
if map.window:
error_message = _("The map cannot be deleted right now. Is it open?")
elif not map.filename:
error_message = _("The map has no associated filename.")
if error_message:
dialog = gtk.MessageDialog (self, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK,
_("Cannot delete this map"))
dialog.format_secondary_text (error_message)
dialog.run ()
dialog.hide ()
del (dialog)
return
dialog = gtk.MessageDialog (self, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO,
_("Do you really want to delete this Map?"))
resp = dialog.run ()
dialog.hide ()
del (dialog)
if resp != gtk.RESPONSE_YES:
return
MapList.delete (map)
self.view.emit ('cursor-changed')
def remove_map_cb (self, mobj, a):
map = MapList.get_by_window(mobj)
if map:
MapList.delete(map)
self.view.emit ('cursor-changed')
return
raise ValueError("Cant remove map of window %s" % mobj)
def file_save_cb (self, mobj, new_fname, mobj1):
map = MapList.get_by_window(mobj)
if map:
map.window = None
map.filename = new_fname
return
def import_clicked(self, button, other=None, *data):
chooser = gtk.FileChooserDialog(title=_("Open File"), action=gtk.FILE_CHOOSER_ACTION_OPEN, \
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
filtr = gtk.FileFilter ()
filtr.set_name(_('MAPZ Compressed Map (*.mapz)'))
filtr.add_pattern('*.mapz')
chooser.add_filter(filtr)
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename = chooser.get_filename()
tf = tarfile.open(filename)
mapname = os.path.join (utils.get_save_dir (), tf.getnames()[0])
tf.extractall(utils.get_save_dir())
tf.close()
map = MapList.new_from_file(mapname)
map.filename = mapname
chooser.destroy()
def quit_clicked (self, button, other=None, *data):
for map in MapList.get_open_windows():
map.window.close_window_cb (None)
width, height = self.main_window.get_size()
if os.name != 'nt':
self.config_client.set_int('/apps/labyrinth/width', width)
self.config_client.set_int('/apps/labyrinth/height', height)
gtk.main_quit ()
def populate_view (self):
cellrenderer = gtk.CellRendererText()
cellrenderer.set_property("ellipsize", pango.ELLIPSIZE_END)
column = gtk.TreeViewColumn(_("Map Name"), cellrenderer,
text=self.COL_TITLE)
column.set_resizable(True)
column.set_expand (True)
column.set_sort_column_id (1)
self.view.append_column(column)
col1 = gtk.TreeViewColumn (_("Last Modified"), gtk.CellRendererText(),
text=self.COL_MODTIME)
col1.set_resizable(True)
col1.set_sort_column_id (2)
self.view.append_column(col1)
self.view.set_model (MapList.get_TreeViewModel())
self.view.set_search_column(self.COL_TITLE)
self.view.set_enable_search (True)
def sort_column_changed_cb (self, data):
column_id, sort_order = data.get_sort_column_id ()
if os.name != 'nt':
self.config_client.set_int('/apps/labyrinth/map_sort_order', sort_order)
self.config_client.set_int('/apps/labyrinth/map_sort_order_column', column_id)
| gpl-2.0 | 4,066,807,916,930,769,000 | 38.13783 | 153 | 0.595909 | false |
yotomyoto/benzene | tournament/program.py | 5 | 3454 | #----------------------------------------------------------------------------
# Connects to a Hex program.
#----------------------------------------------------------------------------
import os, string, sys, subprocess
from random import randrange
from select import select
#----------------------------------------------------------------------------
class Program:
class CommandDenied(Exception):
pass
class Died(Exception):
pass
def __init__(self, color, command, logName, verbose):
command = command.replace("%SRAND", `randrange(0, 1000000)`)
self._command = command
self._color = color
self._verbose = verbose
if self._verbose:
print "Creating program:", command
p = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE);
(self._stdin,
self._stdout,
self._stderr) = (p.stdin, p.stdout, p.stderr)
self._isDead = 0
self._log = open(logName, "w")
self._log.write("# " + self._command + "\n")
def getColor(self):
return self._color
def getCommand(self):
return self._command
def getDenyReason(self):
return self._denyReason
def getName(self):
name = "?"
try:
name = string.strip(self.sendCommand("name"))
version = string.strip(self.sendCommand("version"))
name += " " + version
except Program.CommandDenied:
pass
return name
def getResult(self):
try:
l = self.sendCommand("final_score")
#s = string.split(l)[0]
#return s
return l.strip()
except Program.CommandDenied:
return "?"
def getTimeRemaining(self):
try:
l = self.sendCommand("time_left");
return l.strip();
except Program.CommandDenied:
return "?"
def isDead(self):
return self._isDead
def sendCommand(self, cmd):
try:
self._log.write(">" + cmd + "\n")
if self._verbose:
print self._color + "< " + cmd
self._stdin.write(cmd + "\n")
self._stdin.flush()
return self._getAnswer()
except IOError:
self._programDied()
def _getAnswer(self):
self._logStdErr()
answer = ""
done = 0
numberLines = 0
while not done:
line = self._stdout.readline()
if line == "":
self._programDied()
self._log.write("<" + line)
if self._verbose:
sys.stdout.write(self._color + "> " + line)
numberLines += 1
done = (line == "\n")
if not done:
answer += line
if answer[0] != '=':
self._denyReason = string.strip(answer[2:])
raise Program.CommandDenied
if numberLines == 1:
return string.strip(answer[1:])
return answer[2:]
def _logStdErr(self):
list = select([self._stderr], [], [], 0)[0]
for s in list:
self._log.write(os.read(s.fileno(), 8192))
self._log.flush()
def _programDied(self):
self._isDead = 1
self._logStdErr()
raise Program.Died
| lgpl-3.0 | 3,813,835,052,945,766,000 | 28.521368 | 77 | 0.46989 | false |
jazcollins/models | domain_adaptation/domain_separation/dsn_eval.py | 11 | 5759 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Evaluation for Domain Separation Networks (DSNs)."""
# pylint: enable=line-too-long
import math
import numpy as np
import tensorflow as tf
from domain_adaptation.datasets import dataset_factory
from domain_adaptation.domain_separation import losses
from domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', '',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('dataset', 'mnist_m',
'Which dataset to test on: "mnist", "mnist_m".')
tf.app.flags.DEFINE_string('split', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
tf.app.flags.DEFINE_string('basic_tower', 'dann_mnist',
'The basic tower building block.')
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
params = {'batch_size': FLAGS.batch_size, 'use_logging': False}
logcost = losses.log_quaternion_loss_batch(predictions, labels, params)
return slim.metrics.streaming_mean(logcost)
def angle_diff(true_q, pred_q):
angles = 2 * (
180.0 /
np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1)))
return angles
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch
def main(_):
g = tf.Graph()
with g.as_default():
# Load the data.
images, labels = provide_batch_fn()(
FLAGS.dataset, FLAGS.split, FLAGS.dataset_dir, 4, FLAGS.batch_size, 4)
num_classes = labels['classes'].get_shape().as_list()[1]
tf.summary.image('eval_images', images, max_outputs=3)
# Define the model:
with tf.variable_scope('towers'):
basic_tower = getattr(models, FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images,
num_classes=num_classes,
is_training=False,
batch_norm_params=None)
metric_names_to_values = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
angle_errors, = tf.py_func(
angle_diff, [labels['quaternions'], endpoints['quaternion_pred']],
[tf.float32])
metric_names_to_values[
'Angular mean error'] = slim.metrics.streaming_mean(angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
accuracy = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
if FLAGS.enable_precision_recall:
for i in xrange(num_classes):
index_map = tf.one_hot(i, depth=num_classes)
name = 'PR/Precision_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_precision(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
name = 'PR/Recall_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_recall(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 5,395,471,066,504,499,000 | 34.99375 | 80 | 0.64282 | false |
proxysh/Safejumper-for-Desktop | buildlinux/env32/lib/python2.7/site-packages/twisted/logger/test/test_global.py | 12 | 9902 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._global}.
"""
from __future__ import print_function
import io
from twisted.trial import unittest
from .._file import textFileLogObserver
from .._observer import LogPublisher
from .._logger import Logger
from .._global import LogBeginner
from .._global import MORE_THAN_ONCE_WARNING
from .._levels import LogLevel
from ..test.test_stdlib import nextLine
def compareEvents(test, actualEvents, expectedEvents):
"""
Compare two sequences of log events, examining only the the keys which are
present in both.
@param test: a test case doing the comparison
@type test: L{unittest.TestCase}
@param actualEvents: A list of log events that were emitted by a logger.
@type actualEvents: L{list} of L{dict}
@param expectedEvents: A list of log events that were expected by a test.
@type expected: L{list} of L{dict}
"""
if len(actualEvents) != len(expectedEvents):
test.assertEqual(actualEvents, expectedEvents)
allMergedKeys = set()
for event in expectedEvents:
allMergedKeys |= set(event.keys())
def simplify(event):
copy = event.copy()
for key in event.keys():
if key not in allMergedKeys:
copy.pop(key)
return copy
simplifiedActual = [simplify(event) for event in actualEvents]
test.assertEqual(simplifiedActual, expectedEvents)
class LogBeginnerTests(unittest.TestCase):
"""
Tests for L{LogBeginner}.
"""
def setUp(self):
self.publisher = LogPublisher()
self.errorStream = io.StringIO()
class NotSys(object):
stdout = object()
stderr = object()
class NotWarnings(object):
def __init__(self):
self.warnings = []
def showwarning(
self, message, category, filename, lineno,
file=None, line=None
):
"""
Emulate warnings.showwarning.
@param message: A warning message to emit.
@type message: L{str}
@param category: A warning category to associate with
C{message}.
@type category: L{warnings.Warning}
@param filename: A file name for the source code file issuing
the warning.
@type warning: L{str}
@param lineno: A line number in the source file where the
warning was issued.
@type lineno: L{int}
@param file: A file to write the warning message to. If
L{None}, write to L{sys.stderr}.
@type file: file-like object
@param line: A line of source code to include with the warning
message. If L{None}, attempt to read the line from
C{filename} and C{lineno}.
@type line: L{str}
"""
self.warnings.append(
(message, category, filename, lineno, file, line)
)
self.sysModule = NotSys()
self.warningsModule = NotWarnings()
self.beginner = LogBeginner(
self.publisher, self.errorStream, self.sysModule,
self.warningsModule
)
def test_beginLoggingToAddObservers(self):
"""
Test that C{beginLoggingTo()} adds observers.
"""
event = dict(foo=1, bar=2)
events1 = []
events2 = []
o1 = lambda e: events1.append(e)
o2 = lambda e: events2.append(e)
self.beginner.beginLoggingTo((o1, o2))
self.publisher(event)
self.assertEqual([event], events1)
self.assertEqual([event], events2)
def test_beginLoggingToBufferedEvents(self):
"""
Test that events are buffered until C{beginLoggingTo()} is
called.
"""
event = dict(foo=1, bar=2)
events1 = []
events2 = []
o1 = lambda e: events1.append(e)
o2 = lambda e: events2.append(e)
self.publisher(event) # Before beginLoggingTo; this is buffered
self.beginner.beginLoggingTo((o1, o2))
self.assertEqual([event], events1)
self.assertEqual([event], events2)
def test_beginLoggingToTwice(self):
"""
When invoked twice, L{LogBeginner.beginLoggingTo} will emit a log
message warning the user that they previously began logging, and add
the new log observers.
"""
events1 = []
events2 = []
fileHandle = io.StringIO()
textObserver = textFileLogObserver(fileHandle)
self.publisher(dict(event="prebuffer"))
firstFilename, firstLine = nextLine()
self.beginner.beginLoggingTo([events1.append, textObserver])
self.publisher(dict(event="postbuffer"))
secondFilename, secondLine = nextLine()
self.beginner.beginLoggingTo([events2.append, textObserver])
self.publisher(dict(event="postwarn"))
warning = dict(
log_format=MORE_THAN_ONCE_WARNING,
log_level=LogLevel.warn,
fileNow=secondFilename, lineNow=secondLine,
fileThen=firstFilename, lineThen=firstLine
)
compareEvents(
self, events1,
[
dict(event="prebuffer"),
dict(event="postbuffer"),
warning,
dict(event="postwarn")
]
)
compareEvents(self, events2, [warning, dict(event="postwarn")])
output = fileHandle.getvalue()
self.assertIn('<{0}:{1}>'.format(firstFilename, firstLine),
output)
self.assertIn('<{0}:{1}>'.format(secondFilename, secondLine),
output)
def test_criticalLogging(self):
"""
Critical messages will be written as text to the error stream.
"""
log = Logger(observer=self.publisher)
log.info("ignore this")
log.critical("a critical {message}", message="message")
self.assertEqual(self.errorStream.getvalue(), u"a critical message\n")
def test_criticalLoggingStops(self):
"""
Once logging has begun with C{beginLoggingTo}, critical messages are no
longer written to the output stream.
"""
log = Logger(observer=self.publisher)
self.beginner.beginLoggingTo(())
log.critical("another critical message")
self.assertEqual(self.errorStream.getvalue(), u"")
def test_beginLoggingToRedirectStandardIO(self):
"""
L{LogBeginner.beginLoggingTo} will re-direct the standard output and
error streams by setting the C{stdio} and C{stderr} attributes on its
sys module object.
"""
x = []
self.beginner.beginLoggingTo([x.append])
print("Hello, world.", file=self.sysModule.stdout)
compareEvents(
self, x, [dict(log_namespace="stdout", log_io="Hello, world.")]
)
del x[:]
print("Error, world.", file=self.sysModule.stderr)
compareEvents(
self, x, [dict(log_namespace="stderr", log_io="Error, world.")]
)
def test_beginLoggingToDontRedirect(self):
"""
L{LogBeginner.beginLoggingTo} will leave the existing stdout/stderr in
place if it has been told not to replace them.
"""
oldOut = self.sysModule.stdout
oldErr = self.sysModule.stderr
self.beginner.beginLoggingTo((), redirectStandardIO=False)
self.assertIs(self.sysModule.stdout, oldOut)
self.assertIs(self.sysModule.stderr, oldErr)
def test_beginLoggingToPreservesEncoding(self):
"""
When L{LogBeginner.beginLoggingTo} redirects stdout/stderr streams, the
replacement streams will preserve the encoding of the replaced streams,
to minimally disrupt any application relying on a specific encoding.
"""
weird = io.TextIOWrapper(io.BytesIO(), "shift-JIS")
weirderr = io.TextIOWrapper(io.BytesIO(), "big5")
self.sysModule.stdout = weird
self.sysModule.stderr = weirderr
x = []
self.beginner.beginLoggingTo([x.append])
self.assertEqual(self.sysModule.stdout.encoding, "shift-JIS")
self.assertEqual(self.sysModule.stderr.encoding, "big5")
self.sysModule.stdout.write(b"\x97\x9B\n")
self.sysModule.stderr.write(b"\xBC\xFC\n")
compareEvents(
self, x, [dict(log_io=u"\u674e"), dict(log_io=u"\u7469")]
)
def test_warningsModule(self):
"""
L{LogBeginner.beginLoggingTo} will redirect the warnings of its
warnings module into the logging system.
"""
self.warningsModule.showwarning(
"a message", DeprecationWarning, __file__, 1
)
x = []
self.beginner.beginLoggingTo([x.append])
self.warningsModule.showwarning(
"another message", DeprecationWarning, __file__, 2
)
f = io.StringIO()
self.warningsModule.showwarning(
"yet another", DeprecationWarning, __file__, 3, file=f
)
self.assertEqual(
self.warningsModule.warnings,
[
("a message", DeprecationWarning, __file__, 1, None, None),
("yet another", DeprecationWarning, __file__, 3, f, None),
]
)
compareEvents(
self, x,
[dict(
warning="another message",
category=(
DeprecationWarning.__module__ + "." +
DeprecationWarning.__name__
),
filename=__file__, lineno=2,
)]
)
| gpl-2.0 | -1,041,290,754,697,133,000 | 31.045307 | 79 | 0.582812 | false |
ceos-seo/Data_Cube_v2 | labs/dc_mosaicker.py | 1 | 8894 | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
import xarray as xr
import datacube
import dc_utilities as utilities
# Command line tool imports
import argparse
import os
import collections
import gdal
from datetime import datetime
# Author: KMF
# Creation date: 2016-06-14
def create_mosaic(dataset_in, clean_mask=None, no_data=-9999):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if not clean_mask:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
data_vars = dataset_in.data_vars # Dict object with key as the name of the variable
# and each value as the DataArray of that variable
mosaic = collections.OrderedDict() # Dict to contain variable names as keys and
# numpy arrays containing mosaicked data
for key in data_vars:
# Get raw data for current variable and mask the data
data = data_vars[key].values
masked = np.full(data.shape, no_data)
masked[clean_mask] = data[clean_mask]
out = np.full(masked.shape[1:], no_data)
# Mosaic current variable (most recent - oldest)
for index in reversed(range(len(clean_mask))):
swap = np.reshape(np.in1d(out.reshape(-1), [no_data]),
out.shape)
out[swap] = masked[index][swap]
mosaic[key] = (['latitude', 'longitude'], out)
latitude = dataset_in.latitude
longitude = dataset_in.longitude
dataset_out = xr.Dataset(mosaic,
coords={'latitude': latitude,
'longitude': longitude})
return dataset_out
def main(platform, product_type, min_lon, max_lon, min_lat, max_lat,
red, green, blue, start_date, end_date, dc_config):
"""
Description:
Command-line mosaicking tool - creates a true color mosaic from the
data retrieved by the Data Cube and save a GeoTIFF of the results
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
"""
# Initialize data cube object
dc = datacube.Datacube(config=dc_config,
app='dc-mosaicker')
# Validate arguments
products = dc.list_products()
platform_names = set([product[6] for product in products.values])
if platform not in platform_names:
print 'ERROR: Invalid platform.'
print 'Valid platforms are:'
for name in platform_names:
print name
return
product_names = [product[0] for product in products.values]
if product_type not in product_names:
print 'ERROR: Invalid product type.'
print 'Valid product types are:'
for name in product_names:
print name
return
measurements = dc.list_measurements()
index_1 = measurements.keys()[0] # Doesn't matter what the actual value is,
# just need to get into the next layer of the
# DataFrame.. better way?
bands = set(measurements[index_1][product_type].keys())
if not set([red, green, blue]).issubset(bands):
print 'ERROR: Invalid product type.'
print 'Valid product types are:'
for band in bands:
print band
return
try:
min_lon = float(args.min_lon)
max_lon = float(args.max_lon)
min_lat = float(args.min_lat)
max_lat = float(args.max_lat)
except:
print 'ERROR: Longitudes/Latitudes must be float values'
return
try:
start_date_str = start_date
end_date_str = end_date
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except:
print 'ERROR: Invalid date format. Date format: YYYY-MM-DD'
return
if not os.path.exists(dc_config):
print 'ERROR: Invalid file path for dc_config'
return
# Retrieve data from Data Cube
dataset_in = dc.load(platform=platform,
product=product_type,
time=(start_date, end_date),
lon=(min_lon, max_lon),
lat=(min_lat, max_lat),
measurements=[red, green, blue, 'cf_mask'])
# Get information needed for saving as GeoTIFF
# Spatial ref
crs = dataset_in.crs
spatial_ref = utilities.get_spatial_ref(crs)
# Upper left coordinates
ul_lon = dataset_in.longitude.values[0]
ul_lat = dataset_in.latitude.values[0]
# Resolution
products = dc.list_products()
resolution = products.resolution[products.name == 'ls7_ledaps']
lon_dist = resolution.values[0][1]
lat_dist = resolution.values[0][0]
# Rotation
lon_rtn = 0
lat_rtn = 0
geotransform = (ul_lon, lon_dist, lon_rtn, ul_lat, lat_rtn, lat_dist)
mosaic = create_mosaic(dataset_in)
out_file = ( str(min_lon) + '_' + str(min_lat) + '_'
+ start_date_str + '_' + end_date_str
+ '_mosaic.tif' )
utilities.save_to_geotiff(out_file, gdal.GDT_Float32, mosaic, geotransform, spatial_ref)
if __name__ == '__main__':
start_time = datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('platform', help='Data platform; example: LANDSAT_7')
parser.add_argument('product', help='Product type; example: ls7_ledaps')
parser.add_argument('min_lon', help='Minimum longitude')
parser.add_argument('max_lon', help='Maximum longitude')
parser.add_argument('min_lat', help='Minimum latitude')
parser.add_argument('max_lat', help='Maximum latitude')
parser.add_argument('start_date', help='Start date; format: YYYY-MM-DD')
parser.add_argument('end_date', help='End date; format: YYYY-MM-DD')
parser.add_argument('red', nargs='?', default='red',
help='Band to map to the red color channel')
parser.add_argument('green', nargs='?', default='green',
help='Band to map to the green color channel')
parser.add_argument('blue', nargs='?', default='blue',
help='Band to map to the blue color channel')
parser.add_argument('dc_config', nargs='?', default='~/.datacube.conf',
help='Datacube configuration path; default: ~/.datacube.conf')
args = parser.parse_args()
main(args.platform, args.product,
args.min_lon, args.max_lon,
args.min_lat, args.max_lat,
args.red, args.green, args.blue,
args.start_date, args.end_date,
args.dc_config)
end_time = datetime.now()
print 'Execution time: ' + str(end_time - start_time)
| apache-2.0 | 8,956,539,914,278,026,000 | 36.213389 | 92 | 0.624916 | false |
amitjamadagni/sympy | sympy/mpmath/functions/functions.py | 6 | 17416 | from ..libmp.backend import xrange
class SpecialFunctions(object):
"""
This class implements special functions using high-level code.
Elementary and some other functions (e.g. gamma function, basecase
hypergeometric series) are assumed to be predefined by the context as
"builtins" or "low-level" functions.
"""
defined_functions = {}
# The series for the Jacobi theta functions converge for |q| < 1;
# in the current implementation they throw a ValueError for
# abs(q) > THETA_Q_LIM
THETA_Q_LIM = 1 - 10**-7
def __init__(self):
cls = self.__class__
for name in cls.defined_functions:
f, wrap = cls.defined_functions[name]
cls._wrap_specfun(name, f, wrap)
self.mpq_1 = self._mpq((1,1))
self.mpq_0 = self._mpq((0,1))
self.mpq_1_2 = self._mpq((1,2))
self.mpq_3_2 = self._mpq((3,2))
self.mpq_1_4 = self._mpq((1,4))
self.mpq_1_16 = self._mpq((1,16))
self.mpq_3_16 = self._mpq((3,16))
self.mpq_5_2 = self._mpq((5,2))
self.mpq_3_4 = self._mpq((3,4))
self.mpq_7_4 = self._mpq((7,4))
self.mpq_5_4 = self._mpq((5,4))
self.mpq_1_3 = self._mpq((1,3))
self.mpq_2_3 = self._mpq((2,3))
self.mpq_4_3 = self._mpq((4,3))
self.mpq_1_6 = self._mpq((1,6))
self.mpq_5_6 = self._mpq((5,6))
self.mpq_5_3 = self._mpq((5,3))
self._misc_const_cache = {}
self._aliases.update({
'phase' : 'arg',
'conjugate' : 'conj',
'nthroot' : 'root',
'polygamma' : 'psi',
'hurwitz' : 'zeta',
#'digamma' : 'psi0',
#'trigamma' : 'psi1',
#'tetragamma' : 'psi2',
#'pentagamma' : 'psi3',
'fibonacci' : 'fib',
'factorial' : 'fac',
})
self.zetazero_memoized = self.memoize(self.zetazero)
# Default -- do nothing
@classmethod
def _wrap_specfun(cls, name, f, wrap):
setattr(cls, name, f)
# Optional fast versions of common functions in common cases.
# If not overridden, default (generic hypergeometric series)
# implementations will be used
def _besselj(ctx, n, z): raise NotImplementedError
def _erf(ctx, z): raise NotImplementedError
def _erfc(ctx, z): raise NotImplementedError
def _gamma_upper_int(ctx, z, a): raise NotImplementedError
def _expint_int(ctx, n, z): raise NotImplementedError
def _zeta(ctx, s): raise NotImplementedError
def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
def _ei(ctx, z): raise NotImplementedError
def _e1(ctx, z): raise NotImplementedError
def _ci(ctx, z): raise NotImplementedError
def _si(ctx, z): raise NotImplementedError
def _altzeta(ctx, s): raise NotImplementedError
def defun_wrapped(f):
SpecialFunctions.defined_functions[f.__name__] = f, True
def defun(f):
SpecialFunctions.defined_functions[f.__name__] = f, False
def defun_static(f):
setattr(SpecialFunctions, f.__name__, f)
@defun_wrapped
def cot(ctx, z): return ctx.one / ctx.tan(z)
@defun_wrapped
def sec(ctx, z): return ctx.one / ctx.cos(z)
@defun_wrapped
def csc(ctx, z): return ctx.one / ctx.sin(z)
@defun_wrapped
def coth(ctx, z): return ctx.one / ctx.tanh(z)
@defun_wrapped
def sech(ctx, z): return ctx.one / ctx.cosh(z)
@defun_wrapped
def csch(ctx, z): return ctx.one / ctx.sinh(z)
@defun_wrapped
def acot(ctx, z): return ctx.atan(ctx.one / z)
@defun_wrapped
def asec(ctx, z): return ctx.acos(ctx.one / z)
@defun_wrapped
def acsc(ctx, z): return ctx.asin(ctx.one / z)
@defun_wrapped
def acoth(ctx, z): return ctx.atanh(ctx.one / z)
@defun_wrapped
def asech(ctx, z): return ctx.acosh(ctx.one / z)
@defun_wrapped
def acsch(ctx, z): return ctx.asinh(ctx.one / z)
@defun
def sign(ctx, x):
x = ctx.convert(x)
if not x or ctx.isnan(x):
return x
if ctx._is_real_type(x):
if x > 0:
return ctx.one
else:
return -ctx.one
return x / abs(x)
@defun
def agm(ctx, a, b=1):
if b == 1:
return ctx.agm1(a)
a = ctx.convert(a)
b = ctx.convert(b)
return ctx._agm(a, b)
@defun_wrapped
def sinc(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sin(x)/x
@defun_wrapped
def sincpi(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sinpi(x)/(ctx.pi*x)
# TODO: tests; improve implementation
@defun_wrapped
def expm1(ctx, x):
if not x:
return ctx.zero
# exp(x) - 1 ~ x
if ctx.mag(x) < -ctx.prec:
return x + 0.5*x**2
# TODO: accurately eval the smaller of the real/imag parts
return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
@defun_wrapped
def powm1(ctx, x, y):
mag = ctx.mag
one = ctx.one
w = x**y - one
M = mag(w)
# Only moderate cancellation
if M > -8:
return w
# Check for the only possible exact cases
if not w:
if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
return w
x1 = x - one
magy = mag(y)
lnx = ctx.ln(x)
# Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
if magy + mag(lnx) < -ctx.prec:
return lnx*y + (lnx*y)**2/2
# TODO: accurately eval the smaller of the real/imag part
return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
@defun
def _rootof1(ctx, k, n):
k = int(k)
n = int(n)
k %= n
if not k:
return ctx.one
elif 2*k == n:
return -ctx.one
elif 4*k == n:
return ctx.j
elif 4*k == 3*n:
return -ctx.j
return ctx.expjpi(2*ctx.mpf(k)/n)
@defun
def root(ctx, x, n, k=0):
n = int(n)
x = ctx.convert(x)
if k:
# Special case: there is an exact real root
if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
return -ctx.root(-x, n)
# Multiply by root of unity
prec = ctx.prec
try:
ctx.prec += 10
v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
finally:
ctx.prec = prec
return +v
return ctx._nthroot(x, n)
@defun
def unitroots(ctx, n, primitive=False):
gcd = ctx._gcd
prec = ctx.prec
try:
ctx.prec += 10
if primitive:
v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
else:
# TODO: this can be done *much* faster
v = [ctx._rootof1(k,n) for k in range(n)]
finally:
ctx.prec = prec
return [+x for x in v]
@defun
def arg(ctx, x):
x = ctx.convert(x)
re = ctx._re(x)
im = ctx._im(x)
return ctx.atan2(im, re)
@defun
def fabs(ctx, x):
return abs(ctx.convert(x))
@defun
def re(ctx, x):
x = ctx.convert(x)
if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
return x.real
return x
@defun
def im(ctx, x):
x = ctx.convert(x)
if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
return x.imag
return ctx.zero
@defun
def conj(ctx, x):
x = ctx.convert(x)
try:
return x.conjugate()
except AttributeError:
return x
@defun
def polar(ctx, z):
return (ctx.fabs(z), ctx.arg(z))
@defun_wrapped
def rect(ctx, r, phi):
return r * ctx.mpc(*ctx.cos_sin(phi))
@defun
def log(ctx, x, b=None):
if b is None:
return ctx.ln(x)
wp = ctx.prec + 20
return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
@defun
def log10(ctx, x):
return ctx.log(x, 10)
@defun
def fmod(ctx, x, y):
return ctx.convert(x) % ctx.convert(y)
@defun
def degrees(ctx, x):
return x / ctx.degree
@defun
def radians(ctx, x):
return x * ctx.degree
def _lambertw_special(ctx, z, k):
# W(0,0) = 0; all other branches are singular
if not z:
if not k:
return z
return ctx.ninf + z
if z == ctx.inf:
if k == 0:
return z
else:
return z + 2*k*ctx.pi*ctx.j
if z == ctx.ninf:
return (-z) + (2*k+1)*ctx.pi*ctx.j
# Some kind of nan or complex inf/nan?
return ctx.ln(z)
import math
import cmath
def _lambertw_approx_hybrid(z, k):
imag_sign = 0
if hasattr(z, "imag"):
x = float(z.real)
y = z.imag
if y:
imag_sign = (-1) ** (y < 0)
y = float(y)
else:
x = float(z)
y = 0.0
imag_sign = 0
# hack to work regardless of whether Python supports -0.0
if not y:
y = 0.0
z = complex(x,y)
if k == 0:
if -4.0 < y < 4.0 and -1.0 < x < 2.5:
if imag_sign:
# Taylor series in upper/lower half-plane
if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j))
if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j))
if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j))
if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j))
# Taylor series near -1
if x < -0.5:
if imag_sign >= 0:
return (-0.318+1.34j) + (-0.697-0.593j)*(z+1)
else:
return (-0.318-1.34j) + (-0.697+0.593j)*(z+1)
# return real type
r = -0.367879441171442
if (not imag_sign) and x > r:
z = x
# Singularity near -1/e
if x < -0.2:
return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
# Taylor series near 0
if x < 0.5: return z
# Simple linear approximation
return 0.2 + 0.3*z
if (not imag_sign) and x > 0.0:
L1 = math.log(x)
L2 = math.log(L1)
else:
L1 = cmath.log(z)
L2 = cmath.log(L1)
elif k == -1:
# return real type
r = -0.367879441171442
if (not imag_sign) and r < x < 0.0:
z = x
if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2:
return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
if (not imag_sign) and -0.2 <= x < 0.0:
L1 = math.log(-x)
return L1 - math.log(-L1)
else:
if imag_sign == -1 and (not y) and x < 0.0:
L1 = cmath.log(z) - 3.1415926535897932j
else:
L1 = cmath.log(z) - 6.2831853071795865j
L2 = cmath.log(L1)
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2)
def _lambertw_series(ctx, z, k, tol):
"""
Return rough approximation for W_k(z) from an asymptotic series,
sufficiently accurate for the Halley iteration to converge to
the correct value.
"""
magz = ctx.mag(z)
if (-10 < magz < 900) and (-1000 < k < 1000):
# Near the branch point at -1/e
if magz < 1 and abs(z+0.36787944117144) < 0.05:
if k == 0 or (k == -1 and ctx._im(z) >= 0) or \
(k == 1 and ctx._im(z) < 0):
delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)])
cancellation = -ctx.mag(delta)
ctx.prec += cancellation
# Use series given in Corless et al.
p = ctx.sqrt(2*(ctx.e*z+1))
ctx.prec -= cancellation
u = {0:ctx.mpf(-1), 1:ctx.mpf(1)}
a = {0:ctx.mpf(2), 1:ctx.mpf(-1)}
if k != 0:
p = -p
s = ctx.zero
# The series converges, so we could use it directly, but unless
# *extremely* close, it is better to just use the first few
# terms to get a good approximation for the iteration
for l in xrange(max(2,cancellation)):
if l not in u:
a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l))
u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1)
term = u[l] * p**l
s += term
if ctx.mag(term) < -tol:
return s, True
l += 1
ctx.prec += cancellation//2
return s, False
if k == 0 or k == -1:
return _lambertw_approx_hybrid(z, k), False
if k == 0:
if magz < -1:
return z*(1-z), False
L1 = ctx.ln(z)
L2 = ctx.ln(L1)
elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0):
L1 = ctx.ln(-z)
return L1 - ctx.ln(-L1), False
else:
# This holds both as z -> 0 and z -> inf.
# Relative error is O(1/log(z)).
L1 = ctx.ln(z) + 2j*ctx.pi*k
L2 = ctx.ln(L1)
return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False
@defun
def lambertw(ctx, z, k=0):
z = ctx.convert(z)
k = int(k)
if not ctx.isnormal(z):
return _lambertw_special(ctx, z, k)
prec = ctx.prec
ctx.prec += 20 + ctx.mag(k or 1)
wp = ctx.prec
tol = wp - 5
w, done = _lambertw_series(ctx, z, k, tol)
if not done:
# Use Halley iteration to solve w*exp(w) = z
two = ctx.mpf(2)
for i in xrange(100):
ew = ctx.exp(w)
wew = w*ew
wewz = wew-z
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
if ctx.mag(wn-w) <= ctx.mag(wn) - tol:
w = wn
break
else:
w = wn
if i == 100:
ctx.warn("Lambert W iteration failed to converge for z = %s" % z)
ctx.prec = prec
return +w
@defun_wrapped
def bell(ctx, n, x=1):
x = ctx.convert(x)
if not n:
if ctx.isnan(x):
return x
return type(x)(1)
if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
return x**n
if n == 1: return x
if n == 2: return x*(x+1)
if x == 0: return ctx.sincpi(n)
return _polyexp(ctx, n, x, True) / ctx.exp(x)
def _polyexp(ctx, n, x, extra=False):
def _terms():
if extra:
yield ctx.sincpi(n)
t = x
k = 1
while 1:
yield k**n * t
k += 1
t = t*x/k
return ctx.sum_accurately(_terms, check_step=4)
@defun_wrapped
def polyexp(ctx, s, z):
if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
return z**s
if z == 0: return z*s
if s == 0: return ctx.expm1(z)
if s == 1: return ctx.exp(z)*z
if s == 2: return ctx.exp(z)*z*(z+1)
return _polyexp(ctx, s, z)
@defun_wrapped
def cyclotomic(ctx, n, z):
n = int(n)
assert n >= 0
p = ctx.one
if n == 0:
return p
if n == 1:
return z - p
if n == 2:
return z + p
# Use divisor product representation. Unfortunately, this sometimes
# includes singularities for roots of unity, which we have to cancel out.
# Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
a_prod = 1
b_prod = 1
num_zeros = 0
num_poles = 0
for d in range(1,n+1):
if not n % d:
w = ctx.moebius(n//d)
# Use powm1 because it is important that we get 0 only
# if it really is exactly 0
b = -ctx.powm1(z, d)
if b:
p *= b**w
else:
if w == 1:
a_prod *= d
num_zeros += 1
elif w == -1:
b_prod *= d
num_poles += 1
#print n, num_zeros, num_poles
if num_zeros:
if num_zeros > num_poles:
p *= 0
else:
p *= a_prod
p /= b_prod
return p
@defun
def mangoldt(ctx, n):
r"""
Evaluates the von Mangoldt function `\Lambda(n) = \log p`
if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise.
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [mangoldt(n) for n in range(-2,3)]
[0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321]
>>> mangoldt(6)
0.0
>>> mangoldt(7)
1.945910149055313305105353
>>> mangoldt(8)
0.6931471805599453094172321
>>> fsum(mangoldt(n) for n in range(101))
94.04531122935739224600493
>>> fsum(mangoldt(n) for n in range(10001))
10013.39669326311478372032
"""
n = int(n)
if n < 2:
return ctx.zero
if n % 2 == 0:
# Must be a power of two
if n & (n-1) == 0:
return +ctx.ln2
else:
return ctx.zero
# TODO: the following could be generalized into a perfect
# power testing function
# ---
# Look for a small factor
for p in (3,5,7,11,13,17,19,23,29,31):
if not n % p:
q, r = n // p, 0
while q > 1:
q, r = divmod(q, p)
if r:
return ctx.zero
return ctx.ln(p)
if ctx.isprime(n):
return ctx.ln(n)
# Obviously, we could use arbitrary-precision arithmetic for this...
if n > 10**30:
raise NotImplementedError
k = 2
while 1:
p = int(n**(1./k) + 0.5)
if p < 2:
return ctx.zero
if p ** k == n:
if ctx.isprime(p):
return ctx.ln(p)
k += 1
| bsd-3-clause | -2,190,033,119,787,205,000 | 27.55082 | 84 | 0.506718 | false |
ageitgey/face_recognition | examples/benchmark.py | 1 | 2450 | import timeit
# Note: This example is only tested with Python 3 (not Python 2)
# This is a very simple benchmark to give you an idea of how fast each step of face recognition will run on your system.
# Notice that face detection gets very slow at large image sizes. So you might consider running face detection on a
# scaled down version of your image and then running face encodings on the the full size image.
TEST_IMAGES = [
"obama-240p.jpg",
"obama-480p.jpg",
"obama-720p.jpg",
"obama-1080p.jpg"
]
def run_test(setup, test, iterations_per_test=5, tests_to_run=10):
fastest_execution = min(timeit.Timer(test, setup=setup).repeat(tests_to_run, iterations_per_test))
execution_time = fastest_execution / iterations_per_test
fps = 1.0 / execution_time
return execution_time, fps
setup_locate_faces = """
import face_recognition
image = face_recognition.load_image_file("{}")
"""
test_locate_faces = """
face_locations = face_recognition.face_locations(image)
"""
setup_face_landmarks = """
import face_recognition
image = face_recognition.load_image_file("{}")
face_locations = face_recognition.face_locations(image)
"""
test_face_landmarks = """
landmarks = face_recognition.face_landmarks(image, face_locations=face_locations)[0]
"""
setup_encode_face = """
import face_recognition
image = face_recognition.load_image_file("{}")
face_locations = face_recognition.face_locations(image)
"""
test_encode_face = """
encoding = face_recognition.face_encodings(image, known_face_locations=face_locations)[0]
"""
setup_end_to_end = """
import face_recognition
image = face_recognition.load_image_file("{}")
"""
test_end_to_end = """
encoding = face_recognition.face_encodings(image)[0]
"""
print("Benchmarks (Note: All benchmarks are only using a single CPU core)")
print()
for image in TEST_IMAGES:
size = image.split("-")[1].split(".")[0]
print("Timings at {}:".format(size))
print(" - Face locations: {:.4f}s ({:.2f} fps)".format(*run_test(setup_locate_faces.format(image), test_locate_faces)))
print(" - Face landmarks: {:.4f}s ({:.2f} fps)".format(*run_test(setup_face_landmarks.format(image), test_face_landmarks)))
print(" - Encode face (inc. landmarks): {:.4f}s ({:.2f} fps)".format(*run_test(setup_encode_face.format(image), test_encode_face)))
print(" - End-to-end: {:.4f}s ({:.2f} fps)".format(*run_test(setup_end_to_end.format(image), test_end_to_end)))
print()
| mit | -7,453,001,758,066,568,000 | 30.818182 | 135 | 0.697551 | false |
PhiInnovations/mdp28-linux-bsp | openembedded-core/meta/lib/oe/packagedata.py | 2 | 3067 | import codecs
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
def read_pkgdatafile(fn):
pkgdata = {}
def decode(str):
c = codecs.getdecoder("string_escape")
return c(str)[0]
if os.access(fn, os.R_OK):
import re
f = file(fn, 'r')
lines = f.readlines()
f.close()
r = re.compile("([^:]+):\s*(.*)")
for l in lines:
m = r.match(l)
if m:
pkgdata[m.group(1)] = decode(m.group(2))
return pkgdata
def all_pkgdatadirs(d):
dirs = []
triplets = (d.getVar("PKGMLTRIPLETS") or "").split()
for t in triplets:
dirs.append(t + "/runtime/")
return dirs
def get_subpkgedata_fn(pkg, d):
dirs = all_pkgdatadirs(d)
pkgdata = d.expand('${TMPDIR}/pkgdata/')
for dir in dirs:
fn = pkgdata + dir + pkg
if os.path.exists(fn):
return fn
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
def has_subpkgdata(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
def read_subpkgdata(pkg, d):
return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
def has_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return os.access(fn, os.R_OK)
def read_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return read_pkgdatafile(fn)
#
# Collapse FOO_pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
newvar = var.replace("_" + pkg, "")
if newvar == var and var + "_" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
target_os = d.getVar("TARGET_OS", True)
target_vendor = d.getVar("TARGET_VENDOR", True)
basedir = os.path.dirname(d.getVar("PKGDATA_DIR", True))
dirs = ("%s%s-%s" % (arch, target_vendor, target_os)
for arch in d.getVar("PACKAGE_ARCHS", True).split())
pkgmap = {}
for pkgdatadir in (os.path.join(basedir, sys) for sys in dirs):
try:
files = os.listdir(pkgdatadir)
except OSError:
continue
for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
try:
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
except OSError:
continue
packages = pkgdata.get("PACKAGES") or ""
for pkg in packages.split():
pkgmap[pkg] = pn
return pkgmap
def pkgmap(d):
"""Return a dictionary mapping package to recipe name.
Cache the mapping in the metadata"""
pkgmap_data = d.getVar("__pkgmap_data", False)
if pkgmap_data is None:
pkgmap_data = _pkgmap(d)
d.setVar("__pkgmap_data", pkgmap_data)
return pkgmap_data
def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
| mit | -6,815,879,446,107,291,000 | 26.141593 | 90 | 0.574177 | false |
alimony/django | django/contrib/postgres/forms/array.py | 6 | 6964 | import copy
from itertools import chain
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from ..utils import prefix_validation_error
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super().__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if isinstance(value, list):
items = value
elif value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for index, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
template_name = 'postgres/widgets/split_array.html'
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super().__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def value_omitted_from_data(self, data, files, name):
return all(
self.widget.value_omitted_from_data(data, files, '%s_%s' % (name, index))
for index in range(self.size)
)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def get_context(self, name, value, attrs=None):
attrs = {} if attrs is None else attrs
context = super().get_context(name, value, attrs)
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
context['widget']['subwidgets'] = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
context['widget']['subwidgets'].append(
self.widget.get_context(name + '_%s' % i, widget_value, final_attrs)['widget']
)
return context
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super().__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super().__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for index in range(max_size):
item = value[index]
try:
cleaned_data.append(self.base_field.clean(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
cleaned_data.append(None)
else:
errors.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index is not None:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(list(chain.from_iterable(errors)))
return cleaned_data
| bsd-3-clause | 1,099,102,350,406,072,600 | 33.82 | 101 | 0.553274 | false |
sein-tao/trash-cli | unit_tests/test_list_all_trashinfo_contents.py | 2 | 1611 | from mock import Mock, call
from nose.tools import assert_equals, assert_items_equal
class TestListing:
def setUp(self):
self.trashdir = Mock()
self.trashinfo_reader = Mock()
self.listing = Listing(self.trashdir, self.trashinfo_reader)
def test_it_should_read_all_trashinfo_from_home_dir(self):
self.listing.read_home_trashdir('/path/to/trash_dir')
self.trashdir.list_trashinfos.assert_called_with(
trashdir='/path/to/trash_dir',
list_to=self.trashinfo_reader)
class TestTrashDirReader:
def test_should_list_all_trashinfo_found(self):
def files(path): yield 'file1'; yield 'file2'
os_listdir = Mock(side_effect=files)
trashdir = TrashDirReader(os_listdir)
out = Mock()
trashdir.list_trashinfos(trashdir='/path', list_to=out)
assert_items_equal([call(trashinfo='/path/file1'),
call(trashinfo='/path/file2')], out.mock_calls)
class TrashDirReader:
def __init__(self, os_listdir):
self.os_listdir = os_listdir
def list_trashinfos(self, trashdir, list_to):
import os
for entry in self.os_listdir(trashdir):
full_path = os.path.join(trashdir, entry)
list_to(trashinfo=full_path)
class Listing:
def __init__(self, trashdir, trashinfo_reader):
self.trashdir = trashdir
self.trashinfo_reader = trashinfo_reader
def read_home_trashdir(self, path):
self.trashdir.list_trashinfos(trashdir=path,
list_to=self.trashinfo_reader)
| gpl-2.0 | 7,087,272,222,413,691,000 | 34.021739 | 75 | 0.629423 | false |
ubuntu-core/snapcraft | tests/spread/plugins/v1/x-local/snaps/from-nilplugin/snap/plugins/x_local_plugin.py | 1 | 1166 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft.plugins import nil
class LocalPlugin(nil.NilPlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["foo"] = {"type": "string"}
return schema
@classmethod
def get_pull_properties(cls):
return ["foo", "stage-packages"]
@classmethod
def get_build_properties(cls):
return ["foo", "stage-packages"]
def build(self):
return self.run(["touch", "build-stamp"], self.installdir)
| gpl-3.0 | -5,125,575,185,785,636,000 | 29.684211 | 71 | 0.691252 | false |
sergiorb/askkit | allauth/account/utils.py | 11 | 13863 | from datetime import timedelta
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
import django
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils import six
from django.utils.http import urlencode
from django.utils.http import int_to_base36, base36_to_int
from django.core.exceptions import ValidationError
if django.VERSION > (1, 8,):
from collections import OrderedDict
else:
from django.utils.datastructures import SortedDict as OrderedDict
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from ..exceptions import ImmediateHttpResponse
from ..utils import (import_callable, valid_email_or_none,
get_user_model, get_request_param)
from . import signals
from .app_settings import EmailVerificationMethod
from . import app_settings
from .adapter import get_adapter
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter().is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next"):
redirect_url \
= (url
or get_next_redirect_url(request,
redirect_field_name=redirect_field_name)
or get_adapter().get_login_redirect_url(request))
return redirect_url
_user_display_callable = None
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_text(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY",
default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if field and hasattr(user, field):
if args:
# Setter
v = args[0]
if v:
User = get_user_model()
v = v[0:User._meta.get_field(field).max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def perform_login(request, user, email_verification,
redirect_url=None, signal_kwargs=None,
signup=False):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
if not user.is_active:
return HttpResponseRedirect(reverse('account_inactive'))
from .models import EmailAddress
has_verified_email = EmailAddress.objects.filter(user=user,
verified=True).exists()
if email_verification == EmailVerificationMethod.NONE:
pass
elif email_verification == EmailVerificationMethod.OPTIONAL:
# In case of OPTIONAL verification: send on signup.
if not has_verified_email and signup:
send_email_confirmation(request, user, signup=signup)
elif email_verification == EmailVerificationMethod.MANDATORY:
if not has_verified_email:
send_email_confirmation(request, user, signup=signup)
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
try:
get_adapter().login(request, user)
response = HttpResponseRedirect(
get_login_redirect_url(request, redirect_url))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs)
get_adapter().add_message(request,
messages.SUCCESS,
'account/messages/logged_in.txt',
{'user': user})
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url,
signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(sender=user.__class__,
request=request,
user=user,
**signal_kwargs)
return perform_login(request, user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter()
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (app_settings.UNIQUE_EMAIL
and EmailAddress.objects
.filter(email__iexact=email)
.exists()):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request,
a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert EmailAddress.objects.filter(user=user).count() == 0
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter()
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(EmailAddress(user=user,
email=stashed_email,
primary=True,
verified=True))
email = user_email(user)
if email:
priority_addresses.append(EmailAddress(user=user,
email=email,
primary=True,
verified=False))
addresses, primary = cleanup_email_addresses(request,
priority_addresses
+ addresses)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if (primary
and email
and email.lower() != primary.email.lower()):
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail.
"""
from .models import EmailAddress, EmailConfirmation
COOLDOWN_PERIOD = timedelta(minutes=3)
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
send_email = not EmailConfirmation.objects \
.filter(sent__gt=now() - COOLDOWN_PERIOD,
email_address=email_address) \
.exists()
if send_email:
email_address.send_confirmation(request,
signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(request,
user,
email,
signup=signup,
confirm=True)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
get_adapter().add_message(request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
if signup:
request.session['account_user'] = user.pk
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if email and not EmailAddress.objects.filter(user=user,
email__iexact=email).exists():
if app_settings.UNIQUE_EMAIL \
and EmailAddress.objects.filter(email__iexact=email).exists():
# Bail out
return
EmailAddress.objects.create(user=user,
email=email,
primary=False,
verified=False)
def filter_users_by_email(email):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
users = [e.user for e in mails.prefetch_related('user')]
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + '__iexact': email}
users += list(User.objects.filter(**q_dict))
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + '?' + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
ret = user.pk
if isinstance(ret, six.integer_types):
ret = int_to_base36(user.pk)
return ret
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
try:
User._meta.pk.to_python('a')
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk
| apache-2.0 | -226,021,620,606,552,060 | 35.481579 | 79 | 0.582053 | false |
unlimitedlabs/orchestra | orchestra/bots/tests/test_basebot.py | 2 | 2810 | from orchestra.bots.basebot import BaseBot
from orchestra.bots.errors import SlackCommandInvalidRequest
from orchestra.bots.tests.fixtures import get_mock_slack_data
from orchestra.tests.helpers import OrchestraTestCase
class BaseBotTest(OrchestraTestCase):
token = get_mock_slack_data().get('token')
def test_help(self):
bot = BaseBot(self.token)
mock_slack_data = get_mock_slack_data(text='help')
with self.assertRaises(NotImplementedError):
bot.dispatch(mock_slack_data)
def test_validate(self):
"""
Ensure we only listen to valid requests.
"""
mock_slack_data = get_mock_slack_data()
# Test all requests allowed
bot = BaseBot(self.token)
self.assertEqual(mock_slack_data, bot.validate(mock_slack_data))
# verify we validate the token
bot = BaseBot('')
with self.assertRaises(SlackCommandInvalidRequest):
bot.validate(mock_slack_data)
# verify that we perform validation on each of the fields
validated_fields = ['allowed_team_ids', 'allowed_domains',
'allowed_channel_ids', 'allowed_channel_names',
'allowed_user_ids', 'allowed_user_names',
'allowed_commands']
for field in validated_fields:
config = {field: []}
bot = BaseBot(self.token, **config)
with self.assertRaises(SlackCommandInvalidRequest):
bot.validate(mock_slack_data)
config = {'allowed_{}s'.format(field): [mock_slack_data.get(field)]
for field in validated_fields}
bot = BaseBot(self.token, **config)
self.assertEqual(mock_slack_data, bot.validate(mock_slack_data))
def test_dispatch(self):
bot = BaseBot(self.token)
bot.commands = (
(r'test_command (?P<test_param>[0-9]+)', 'test_command'),
)
def test_command(test_param):
return test_param
bot.test_command = test_command
# Assign the testing command
bot.__init__(self.token)
# Test a valid command
text = 'test_command 5'
mock_slack_data = get_mock_slack_data(text=text)
result = bot.dispatch(mock_slack_data)
self.assertEqual('5', result)
# Test a valid command with missing param
text = 'test_command'
mock_slack_data = get_mock_slack_data(text=text)
result = bot.dispatch(mock_slack_data)
self.assertEqual(bot.no_command_found(text), result)
# Test invalid command
text = 'invalid'
mock_slack_data = get_mock_slack_data(text=text)
result = bot.dispatch(mock_slack_data)
self.assertEqual(bot.no_command_found(text), result)
| apache-2.0 | 1,715,188,096,944,312,300 | 36.972973 | 75 | 0.611388 | false |
opendroid-Team/enigma2-4.1 | lib/python/Screens/Ci.py | 5 | 11213 | from Screen import Screen
from Components.ActionMap import ActionMap
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigSubList, getConfigListEntry, KEY_LEFT, KEY_RIGHT, KEY_0, ConfigNothing, ConfigPIN
from Components.ConfigList import ConfigList
from Components.SystemInfo import SystemInfo
from enigma import eTimer, eDVBCI_UI, eDVBCIInterfaces
MAX_NUM_CI = 4
def setCIBitrate(configElement):
if configElement.getValue() == "no":
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateNormal)
else:
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateHigh)
def InitCiConfig():
config.ci = ConfigSubList()
for slot in range(MAX_NUM_CI):
config.ci.append(ConfigSubsection())
config.ci[slot].canDescrambleMultipleServices = ConfigSelection(choices = [("auto", _("Auto")), ("no", _("No")), ("yes", _("Yes"))], default = "auto")
if SystemInfo["CommonInterfaceSupportsHighBitrates"]:
config.ci[slot].canHandleHighBitrates = ConfigSelection(choices = [("no", _("No")), ("yes", _("Yes"))], default = "yes")
config.ci[slot].canHandleHighBitrates.slotid = slot
config.ci[slot].canHandleHighBitrates.addNotifier(setCIBitrate)
class MMIDialog(Screen):
def __init__(self, session, slotid, action, handler = eDVBCI_UI.getInstance(), wait_text = _("wait for ci...") ):
Screen.__init__(self, session)
print "MMIDialog with action" + str(action)
self.mmiclosed = False
self.tag = None
self.slotid = slotid
self.timer = eTimer()
self.timer.callback.append(self.keyCancel)
#else the skins fails
self["title"] = Label("")
self["subtitle"] = Label("")
self["bottom"] = Label("")
self["entries"] = ConfigList([ ])
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.okbuttonClick,
"cancel": self.keyCancel,
#for PIN
"left": self.keyLeft,
"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.action = action
self.handler = handler
self.wait_text = wait_text
if action == 2: #start MMI
handler.startMMI(self.slotid)
self.showWait()
elif action == 3: #mmi already there (called from infobar)
self.showScreen()
def addEntry(self, list, entry):
if entry[0] == "TEXT": #handle every item (text / pin only?)
list.append( (entry[1], ConfigNothing(), entry[2]) )
if entry[0] == "PIN":
pinlength = entry[1]
if entry[3] == 1:
# masked pins:
x = ConfigPIN(0, len = pinlength, censor = "*")
else:
# unmasked pins:
x = ConfigPIN(0, len = pinlength)
x.addEndNotifier(self.pinEntered)
self["subtitle"].setText(entry[2])
list.append( getConfigListEntry("", x) )
self["bottom"].setText(_("please press OK when ready"))
def pinEntered(self, value):
self.okbuttonClick()
def okbuttonClick(self):
self.timer.stop()
if not self.tag:
return
if self.tag == "WAIT":
print "do nothing - wait"
elif self.tag == "MENU":
print "answer MENU"
cur = self["entries"].getCurrent()
if cur:
self.handler.answerMenu(self.slotid, cur[2])
else:
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "LIST":
print "answer LIST"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
cur = self["entries"].getCurrent()
answer = str(cur[1].getValue())
length = len(answer)
while length < cur[1].getLength():
answer = '0'+answer
length+=1
self.handler.answerEnq(self.slotid, answer)
self.showWait()
def closeMmi(self):
self.timer.stop()
self.close(self.slotid)
def keyCancel(self):
self.timer.stop()
if not self.tag or self.mmiclosed:
self.closeMmi()
elif self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
self.closeMmi()
elif self.tag in ( "MENU", "LIST" ):
print "cancel list"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
print "cancel enq"
self.handler.cancelEnq(self.slotid)
self.showWait()
else:
print "give cancel action to ci"
def keyConfigEntry(self, key):
self.timer.stop()
try:
self["entries"].handleKey(key)
except:
pass
def keyNumberGlobal(self, number):
self.timer.stop()
self.keyConfigEntry(KEY_0 + number)
def keyLeft(self):
self.timer.stop()
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.timer.stop()
self.keyConfigEntry(KEY_RIGHT)
def updateList(self, list):
List = self["entries"]
try:
List.instance.moveSelectionTo(0)
except:
pass
List.l.setList(list)
def showWait(self):
self.tag = "WAIT"
self["title"].setText("")
self["subtitle"].setText("")
self["bottom"].setText("")
list = [(self.wait_text, ConfigNothing())]
self.updateList(list)
def showScreen(self):
screen = self.handler.getMMIScreen(self.slotid)
list = [ ]
self.timer.stop()
if len(screen) > 0 and screen[0][0] == "CLOSE":
timeout = screen[0][1]
self.mmiclosed = True
if timeout > 0:
self.timer.start(timeout*1000, True)
else:
self.keyCancel()
else:
self.mmiclosed = False
self.tag = screen[0][0]
for entry in screen:
if entry[0] == "PIN":
self.addEntry(list, entry)
else:
if entry[0] == "TITLE":
self["title"].setText(entry[1])
elif entry[0] == "SUBTITLE":
self["subtitle"].setText(entry[1])
elif entry[0] == "BOTTOM":
self["bottom"].setText(entry[1])
elif entry[0] == "TEXT":
self.addEntry(list, entry)
self.updateList(list)
def ciStateChanged(self):
do_close = False
if self.action == 0: #reset
do_close = True
if self.action == 1: #init
do_close = True
#module still there ?
if self.handler.getState(self.slotid) != 2:
do_close = True
#mmi session still active ?
if self.handler.getMMIState(self.slotid) != 1:
do_close = True
if do_close:
self.closeMmi()
elif self.action > 1 and self.handler.availableMMI(self.slotid) == 1:
self.showScreen()
#FIXME: check for mmi-session closed
class CiMessageHandler:
def __init__(self):
self.session = None
self.ci = { }
self.dlgs = { }
eDVBCI_UI.getInstance().ciStateChanged.get().append(self.ciStateChanged)
SystemInfo["CommonInterface"] = eDVBCIInterfaces.getInstance().getNumOfSlots() > 0
try:
file = open("/proc/stb/tsmux/ci0_tsclk", "r")
file.close()
SystemInfo["CommonInterfaceSupportsHighBitrates"] = True
except:
SystemInfo["CommonInterfaceSupportsHighBitrates"] = False
def setSession(self, session):
self.session = session
def ciStateChanged(self, slot):
if slot in self.ci:
self.ci[slot](slot)
else:
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif eDVBCI_UI.getInstance().availableMMI(slot) == 1:
if self.session and not config.usage.hide_ci_messages.getValue():
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3)
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
def registerCIMessageHandler(self, slot, func):
self.unregisterCIMessageHandler(slot)
self.ci[slot] = func
def unregisterCIMessageHandler(self, slot):
if slot in self.ci:
del self.ci[slot]
CiHandler = CiMessageHandler()
class CiSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("Common Interface"))
self["actions"] = ActionMap(["OkCancelActions", "CiSelectionActions"],
{
"left": self.keyLeft,
"right": self.keyLeft,
"ok": self.okbuttonClick,
"cancel": self.cancel
},-1)
self.dlg = None
self.state = { }
self.list = [ ]
for slot in range(MAX_NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
self.appendEntries(slot, state)
CiHandler.registerCIMessageHandler(slot, self.ciStateChanged)
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["entries"] = menuList
self["entries"].onSelectionChanged.append(self.selectionChanged)
self["text"] = Label(_("Slot %d")% 1)
def selectionChanged(self):
cur_idx = self["entries"].getCurrentIndex()
self["text"].setText(_("Slot %d")%((cur_idx / 5)+1))
def keyConfigEntry(self, key):
try:
self["entries"].handleKey(key)
self["entries"].getCurrent()[1].save()
except:
pass
def keyLeft(self):
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.keyConfigEntry(KEY_RIGHT)
def appendEntries(self, slot, state):
self.state[slot] = state
self.list.append( (_("Reset"), ConfigNothing(), 0, slot) )
self.list.append( (_("Init"), ConfigNothing(), 1, slot) )
if self.state[slot] == 0: #no module
self.list.append( (_("no module found"), ConfigNothing(), 2, slot) )
elif self.state[slot] == 1: #module in init
self.list.append( (_("init module"), ConfigNothing(), 2, slot) )
elif self.state[slot] == 2: #module ready
#get appname
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 2, slot) )
self.list.append(getConfigListEntry(_("Multiple service support"), config.ci[slot].canDescrambleMultipleServices))
if SystemInfo["CommonInterfaceSupportsHighBitrates"]:
self.list.append(getConfigListEntry(_("High bitrate support"), config.ci[slot].canHandleHighBitrates))
def updateState(self, slot):
state = eDVBCI_UI.getInstance().getState(slot)
self.state[slot] = state
slotidx=0
while len(self.list[slotidx]) < 3 or self.list[slotidx][3] != slot:
slotidx += 1
slotidx += 1 # do not change Reset
slotidx += 1 # do not change Init
if state == 0: #no module
self.list[slotidx] = (_("no module found"), ConfigNothing(), 2, slot)
elif state == 1: #module in init
self.list[slotidx] = (_("init module"), ConfigNothing(), 2, slot)
elif state == 2: #module ready
#get appname
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list[slotidx] = (appname, ConfigNothing(), 2, slot)
lst = self["entries"]
lst.list = self.list
lst.l.setList(self.list)
def ciStateChanged(self, slot):
if self.dlg:
self.dlg.ciStateChanged()
else:
state = eDVBCI_UI.getInstance().getState(slot)
if self.state[slot] != state:
#print "something happens"
self.state[slot] = state
self.updateState(slot)
def dlgClosed(self, slot):
self.dlg = None
def okbuttonClick(self):
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 0: #reset
eDVBCI_UI.getInstance().setReset(slot)
elif action == 1: #init
eDVBCI_UI.getInstance().setInit(slot)
elif self.state[slot] == 2:
self.dlg = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, action)
def cancel(self):
for slot in range(MAX_NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
CiHandler.unregisterCIMessageHandler(slot)
self.close()
| gpl-2.0 | -3,939,962,232,657,956,400 | 27.387342 | 160 | 0.672791 | false |
VUIIS/seam | setup.py | 1 | 3188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Scott Burns <[email protected]>'
__copyright__ = 'Copyright 2014 Vanderbilt University. All Rights Reserved'
from setuptools import setup, find_packages
import os
import re
from io import open
import codecs
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Code taken from pip's setup.py
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name="seam",
version=find_version('seam', '__init__.py'),
description="A simple layer between neuroimaging tools and your data",
long_description=long_description,
# The project URL.
url='http://github.com/VUIIS/seam',
# Author details
author='Scott Burns',
author_email='[email protected]',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
# What does your project relate to?
keywords='neuroimaging data analysis',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# If there are data files included in your packages, specify them here.
package_data={
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={'console_scripts': [
'build-recon-v1 = seam.freesurfer.v1.recipe:main']
},
)
| mit | 555,408,630,373,452,800 | 33.27957 | 79 | 0.643036 | false |
davvid/qtpy | qtpy/tests/test_patch_qheaderview.py | 1 | 3524 | from __future__ import absolute_import
from qtpy import PYSIDE, PYQT4
from qtpy.QtWidgets import QApplication
from qtpy.QtWidgets import QHeaderView
from qtpy.QtCore import Qt
from qtpy.QtCore import QAbstractListModel
import pytest
def get_qapp(icon_path=None):
qapp = QApplication.instance()
if qapp is None:
qapp = QApplication([''])
return qapp
def test_patched_qheaderview():
"""
This will test whether QHeaderView has the new methods introduced in Qt5.
It will then create an instance of QHeaderView and test that no exceptions
are raised and that some basic behaviour works.
"""
assert QHeaderView.sectionsClickable is not None
assert QHeaderView.sectionsMovable is not None
assert QHeaderView.sectionResizeMode is not None
assert QHeaderView.setSectionsClickable is not None
assert QHeaderView.setSectionsMovable is not None
assert QHeaderView.setSectionResizeMode is not None
# setup a model and add it to a headerview
qapp = get_qapp()
headerview = QHeaderView(Qt.Horizontal)
class Model(QAbstractListModel):
pass
model = Model()
headerview.setModel(model)
assert headerview.count() == 1
# test it
assert isinstance(headerview.sectionsClickable(), bool)
assert isinstance(headerview.sectionsMovable(), bool)
if PYSIDE:
assert isinstance(headerview.sectionResizeMode(0),
QHeaderView.ResizeMode)
else:
assert isinstance(headerview.sectionResizeMode(0), int)
headerview.setSectionsClickable(True)
assert headerview.sectionsClickable() == True
headerview.setSectionsClickable(False)
assert headerview.sectionsClickable() == False
headerview.setSectionsMovable(True)
assert headerview.sectionsMovable() == True
headerview.setSectionsMovable(False)
assert headerview.sectionsMovable() == False
headerview.setSectionResizeMode(QHeaderView.Interactive)
assert headerview.sectionResizeMode(0) == QHeaderView.Interactive
headerview.setSectionResizeMode(QHeaderView.Fixed)
assert headerview.sectionResizeMode(0) == QHeaderView.Fixed
headerview.setSectionResizeMode(QHeaderView.Stretch)
assert headerview.sectionResizeMode(0) == QHeaderView.Stretch
headerview.setSectionResizeMode(QHeaderView.ResizeToContents)
assert headerview.sectionResizeMode(0) == QHeaderView.ResizeToContents
headerview.setSectionResizeMode(0, QHeaderView.Interactive)
assert headerview.sectionResizeMode(0) == QHeaderView.Interactive
headerview.setSectionResizeMode(0, QHeaderView.Fixed)
assert headerview.sectionResizeMode(0) == QHeaderView.Fixed
headerview.setSectionResizeMode(0, QHeaderView.Stretch)
assert headerview.sectionResizeMode(0) == QHeaderView.Stretch
headerview.setSectionResizeMode(0, QHeaderView.ResizeToContents)
assert headerview.sectionResizeMode(0) == QHeaderView.ResizeToContents
# test that the old methods in Qt4 raise exceptions
if PYQT4 or PYSIDE:
with pytest.raises(Exception):
headerview.isClickable()
with pytest.raises(Exception):
headerview.isMovable()
with pytest.raises(Exception):
headerview.resizeMode(0)
with pytest.raises(Exception):
headerview.setClickable(True)
with pytest.raises(Exception):
headerview.setMovableClickable(True)
with pytest.raises(Exception):
headerview.setResizeMode(0, QHeaderView.Interactive)
| mit | 2,025,572,770,592,096,300 | 37.725275 | 78 | 0.742054 | false |
Rositsazz/hack33 | config/urls.py | 2 | 1599 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('hack33.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| mit | -3,431,784,249,311,826,000 | 38 | 110 | 0.674171 | false |
heenbo/mosquitto-heenbo | test/lib/03-publish-c2b-qos1-disconnect.py | 7 | 2303 | #!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 1, then responds correctly to a disconnect.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("publish-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
publish_packet = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message")
publish_packet_dup = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message", dup=True)
puback_packet = mosq_test.gen_puback(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(15)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "publish", publish_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "retried publish", publish_packet_dup):
conn.send(puback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| gpl-3.0 | 7,890,993,740,533,308,000 | 28.909091 | 129 | 0.663048 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/connectivity_issue_py3.py | 1 | 2129 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityIssue(Model):
"""Information about an issue encountered in the process of checking for
connectivity.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar origin: The origin of the issue. Possible values include: 'Local',
'Inbound', 'Outbound'
:vartype origin: str or ~azure.mgmt.network.v2017_09_01.models.Origin
:ivar severity: The severity of the issue. Possible values include:
'Error', 'Warning'
:vartype severity: str or ~azure.mgmt.network.v2017_09_01.models.Severity
:ivar type: The type of issue. Possible values include: 'Unknown',
'AgentStopped', 'GuestFirewall', 'DnsResolution', 'SocketBind',
'NetworkSecurityRule', 'UserDefinedRoute', 'PortThrottled', 'Platform'
:vartype type: str or ~azure.mgmt.network.v2017_09_01.models.IssueType
:ivar context: Provides additional context on the issue.
:vartype context: list[dict[str, str]]
"""
_validation = {
'origin': {'readonly': True},
'severity': {'readonly': True},
'type': {'readonly': True},
'context': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'context': {'key': 'context', 'type': '[{str}]'},
}
def __init__(self, **kwargs) -> None:
super(ConnectivityIssue, self).__init__(**kwargs)
self.origin = None
self.severity = None
self.type = None
self.context = None
| mit | -768,324,051,524,460,200 | 37.709091 | 77 | 0.602161 | false |
yassen-itlabs/py-linux-traffic-control | pyltc/util/cmdline.py | 1 | 5206 | """
Command line execution utility module.
"""
import time
import subprocess
def popen_factory():
"""Returns subprocess.Popen on Linux, otherwise returns MockPopen
class assuming this is a test run."""
import platform
if platform.system() == 'Linux':
return subprocess.Popen
class MockPopen(object):
"""Mocks command execution to allow testing on non-Linux OS."""
def __init__(self, command_list, *args, **kw):
self._cmd_list = command_list
self._args = args
self._kw = kw
self.returncode = 0
def communicate(self, timeout=None):
if self._cmd_list[0].endswith('echo'):
return bytes(" ".join(self._cmd_list[1:]), encoding='utf-8'), None
if self._cmd_list[0].endswith('/bin/true'):
return None, None
if self._cmd_list[0].endswith('/bin/false'):
self.returncode = 1
return None, None
if self._cmd_list[0].endswith('sleep'):
pause = float(self._cmd_list[1])
if timeout and timeout < pause:
time.sleep(timeout)
cmd = " ".join(self._cmd_list)
raise subprocess.TimeoutExpired(cmd, timeout)
time.sleep(pause)
return None, None
raise RuntimeError("UNREACHABLE")
return MockPopen
class CommandFailed(Exception):
"""Rased when a command line execution yielded a non-zero return code."""
def __init__(self, command):
assert isinstance(command, CommandLine) and command.returncode, \
"expecting failed command, got {!r}".format(command)
output = command.stderr
if command.stdout:
output = "out:{}\nerr: {}".format(command.stdout, output)
msg = "Command failed (rc={}, {!r})".format(command.returncode, command.cmdline)
if output:
msg += ": " + output
super(CommandFailed, self).__init__(msg)
self._command = command
@property
def returncode(self):
return self._command.returncode
class CommandLine(object):
"""Command line execution class."""
def __init__(self, cmdline, ignore_errors=False, verbose=False, sudo=False):
self._cmdline = cmdline
self._ignore_errors = ignore_errors
self._verbose = verbose
self._sudo = sudo
self._returncode = None
self._stdout = None
self._stderr = None
self._proc = None
@property
def cmdline(self):
return self._cmdline
def _construct_cmd_list(self, command):
"""Recursively process the command string to exctract any quoted segments
as a single command element.
"""
QUOTE = '"'
def validate(command):
quote_count = command.count(QUOTE)
if quote_count % 2 != 0:
raise RuntimeError('Unbalanced quotes in command: {!r}'.format(command))
def construct_the_list(command):
if QUOTE not in command:
return command.split()
left, mid, right = command.split(QUOTE, 2)
return left.split() + [mid] + construct_the_list(right) # recursively process the right part
validate(command)
result = ['sudo'] if self._sudo else []
result += construct_the_list(command)
return result
def terminate(self):
# TODO: refactor
if not self._proc:
return None
self._proc.terminate()
rc = self._proc.poll()
c = 0
while rc == None:
time.sleep(0.1)
if c > 19:
self._proc.kill()
break
c += 1
rc = self._proc.poll()
time.sleep(0.1)
return self._proc.poll()
def execute_daemon(self):
command_list = self._construct_cmd_list(self._cmdline)
PopenClass = popen_factory()
proc = PopenClass(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._proc = proc
return self # allows for one-line creation + execution with assignment
def execute(self, timeout=10):
"""Prepares and executes the command."""
command_list = self._construct_cmd_list(self._cmdline)
PopenClass = popen_factory()
proc = PopenClass(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._proc = proc
stdout, stderr = proc.communicate(timeout=timeout)
self._stdout = stdout.decode('unicode_escape') if stdout else ""
self._stderr = stderr.decode('unicode_escape') if stderr else ""
rc = proc.returncode
self._returncode = rc
if self._verbose:
print(">", " ".join(command_list))
if rc and not self._ignore_errors:
raise CommandFailed(self)
return self # allows for one-line creation + execution with assignment
@property
def returncode(self):
return self._returncode
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
| mit | 8,862,398,151,114,089,000 | 32.805195 | 105 | 0.57184 | false |
chemelnucfin/tensorflow | tensorflow/python/keras/mixed_precision/experimental/__init__.py | 8 | 1179 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mixed precision API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.mixed_precision.experimental.loss_scale_optimizer import LossScaleOptimizer
from tensorflow.python.keras.mixed_precision.experimental.policy import global_policy
from tensorflow.python.keras.mixed_precision.experimental.policy import Policy
from tensorflow.python.keras.mixed_precision.experimental.policy import set_policy
| apache-2.0 | -6,846,957,284,458,253,000 | 50.26087 | 104 | 0.742154 | false |
xiaokangwang/KKWebVideoDL-X | youtube_dl/extractor/wimp.py | 6 | 1302 | import re
import base64
from .common import InfoExtractor
class WimpIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?wimp\.com/([^/]+)/'
_TEST = {
u'url': u'http://www.wimp.com/deerfence/',
u'file': u'deerfence.flv',
u'md5': u'8b215e2e0168c6081a1cf84b2846a2b5',
u'info_dict': {
u"title": u"Watch Till End: Herd of deer jump over a fence."
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'<meta name="description" content="(.+?)" />',webpage, 'video title')
thumbnail_url = self._search_regex(r'<meta property="og\:image" content="(.+?)" />', webpage,'video thumbnail')
googleString = self._search_regex("googleCode = '(.*?)'", webpage, 'file url')
googleString = base64.b64decode(googleString).decode('ascii')
final_url = self._search_regex('","(.*?)"', googleString,'final video url')
ext = final_url.rpartition(u'.')[2]
return [{
'id': video_id,
'url': final_url,
'ext': ext,
'title': title,
'thumbnail': thumbnail_url,
}]
| gpl-3.0 | 4,256,060,455,791,585,300 | 35.166667 | 119 | 0.544547 | false |
benthomasson/msa-designer | msa_designer/server.py | 1 | 3482 |
from gevent import monkey
monkey.patch_all()
import os
import socket
import pkg_resources
import logging
import hashlib
import yaml
import json
from pprint import pprint
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('msa_designer_web', 'templates'))
from bottle import route, request
from bottle import static_file
from bottle import redirect
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin
logger = logging.getLogger('msa-designer.server')
saved_msas_root = os.path.abspath("saved_msas")
load_msas_root = os.path.abspath("loaded_msas")
if not os.path.exists(saved_msas_root):
os.makedirs(saved_msas_root)
if not os.path.exists(load_msas_root):
os.makedirs(load_msas_root)
class AgentNamespace(BaseNamespace, BroadcastMixin):
def initialize(self):
logger.debug("INIT")
print self.__dict__.keys()
print self.ns_name
print self.request
def on_save(self, message):
logger.debug("save %s", message)
app_name = message.get('app', 'msa')
data = yaml.safe_dump(message, default_flow_style=False)
save_id = hashlib.sha1(data).hexdigest()
url = '/save/{0}/{1}.yml'.format(save_id, app_name)
with open(os.path.join(saved_msas_root, save_id), 'w') as f:
f.write(data)
self.emit('saved', dict(url=url))
from socketio import socketio_manage
@route('/status')
def status():
return "running"
@route('/upload', method='POST')
def do_upload():
upload = request.files.get('upload')
data = upload.file.read()
print upload.filename, data
load_id = hashlib.sha1(data).hexdigest()
with open(os.path.join(load_msas_root, load_id), 'w') as f:
f.write(data)
return redirect("/msa/{0}".format(load_id))
@route('/socket.io/<namespace:path>')
def index(namespace):
socketio_manage(request.environ, {'/msa-designer': AgentNamespace})
@route('/save/<save_id:path>/<name:path>')
def save(save_id, name):
logger.debug("save_id %s", save_id)
return static_file(save_id, root=saved_msas_root, mimetype="text/yaml", download=name)
@route('/')
def root():
return env.get_template('index.html').render()
@route('/msa/<load_id:path>')
def root(load_id):
with open(os.path.join(load_msas_root, load_id)) as f:
msa = json.dumps(yaml.load(f.read()))
print msa
return env.get_template('index.html').render(msa_to_load=msa)
@route('/static/<filename:path>')
def serve_static(filename):
return static_file(filename, root=pkg_resources.resource_filename('msa_designer_web', 'static'))
from bottle import ServerAdapter
class SocketIOServer(ServerAdapter):
def run(self, handler):
from socketio.server import SocketIOServer
resource = self.options.get('resource', 'socket.io')
policy_server = self.options.get('policy_server', False)
done = False
while not done:
try:
SocketIOServer((self.host, self.port),
handler,
resource=resource,
policy_server=policy_server,
transports=['websocket', 'xhr-multipart', 'xhr-polling']).serve_forever()
except socket.error, e:
if e.errno == 98:
logger.warning(str(e))
raise
else:
raise
| bsd-3-clause | -5,581,207,329,173,082,000 | 26.203125 | 104 | 0.635267 | false |
hudora/huTools | huTools/world.py | 1 | 2969 | #!/usr/bin/env python
# encoding: utf-8
"""
hutools/world - countries of the world (to be procise: countries we do buiseness with)
Most interesting are the constants COUNTRY_CHOICES and COUNTRIES.
COUNTRY_CHOICES = [('DE', 'Deutschland'), ('AT', u'Österreich'), ...]
COUNTRIES = ['DE', 'AT', ...]
Created by Maximillian Dornseif on 2007-05-01.
Copyright (c) 2007, 2010 HUDORA GmbH. BSD Licensed.
"""
import doctest
import sys
COUNTRY_CHOICES = [('DE', u'Deutschland'),
('AR', u'Argentinien'),
('AT', u'Österreich'),
('AU', u'Australien'),
('BE', u'Belgien'),
('BG', u'Bulgarien'),
('CH', u'Schweiz'),
('CY', u'Zypern'),
('CZ', u'Tschechien'),
('DK', u'Dänemark'),
('EE', u'Estland'),
('ES', u'Spanien'),
('FI', u'Finnland'),
('FR', u'Frankreich'),
('GB', u'Grossbritannien'),
('GR', u'Griechenland'),
('HR', u'Kroatien'),
('HU', u'Ungarn'),
('IE', u'Irland'),
('IL', u'Israel'),
('IT', u'Italien'),
('LI', u'Liechtenstein'),
('LT', u'Litauen'),
('LU', u'Luxemburg'),
('LV', u'Lettland'),
('MT', u'Malta'),
('NL', u'Niederlande'),
('NO', u'Norwegen'),
('PL', u'Polen'),
('PT', u'Portugal'),
('RO', u'Rumänien'),
('RS', u'Republik Serbien'),
('RU', u'Russland'),
('SE', u'Schweden'),
('SG', u'Singapur'),
('SI', u'Slowenien'),
('SK', u'Slowakei'),
('US', u'USA'),
('ZA', u'Südafrika'),
]
COUNTRIES = dict(COUNTRY_CHOICES).keys()
# Stand: Juli 2013 - diese MÜSSEN auch in COUNTRY_CHOICES vorkommen.
EUROPEAN_UNION = ['BE', 'IT', 'RO', 'BG', 'LV', 'SE',
'DK', 'LT', 'SK', 'DE', 'LU', 'SI',
'EE', 'MT', 'ES', 'FI', 'NL', 'CZ',
'FR', 'AT', 'HU', 'GR', 'PL', 'GB',
'IE', 'PT', 'CY', 'HR']
def in_european_union(isoland):
"""
Gibt zurück, ob ein Land Mitglied der EU ist.
>>> in_european_union('DE')
True
>>> in_european_union('CH')
False
>>> all(map(in_european_union, EUROPEAN_UNION))
True
>>> non_eu_countries = set((abrev for abrev, name in COUNTRY_CHOICES)) - set(EUROPEAN_UNION)
>>> any(map(in_european_union, non_eu_countries))
False
"""
return isoland.upper() in EUROPEAN_UNION
if __name__ == "__main__":
failure_count, test_count = doctest.testmod()
sys.exit(failure_count)
| bsd-3-clause | 1,703,927,227,999,649,300 | 31.911111 | 96 | 0.426063 | false |
dstanek/keystone | keystone/tests/unit/test_v3_controller.py | 4 | 1890 | # Copyright 2014 CERN.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import six
from six.moves import range
from testtools import matchers
from keystone.common import controller
from keystone import exception
from keystone.tests import unit as tests
class V3ControllerTestCase(tests.TestCase):
"""Tests for the V3Controller class."""
def setUp(self):
super(V3ControllerTestCase, self).setUp()
class ControllerUnderTest(controller.V3Controller):
_mutable_parameters = frozenset(['hello', 'world'])
self.api = ControllerUnderTest()
def test_check_immutable_params(self):
"""Pass valid parameters to the method and expect no failure."""
ref = {
'hello': uuid.uuid4().hex,
'world': uuid.uuid4().hex
}
self.api.check_immutable_params(ref)
def test_check_immutable_params_fail(self):
"""Pass invalid parameter to the method and expect failure."""
ref = {uuid.uuid4().hex: uuid.uuid4().hex for _ in range(3)}
ex = self.assertRaises(exception.ImmutableAttributeError,
self.api.check_immutable_params, ref)
ex_msg = six.text_type(ex)
self.assertThat(ex_msg, matchers.Contains(self.api.__class__.__name__))
for key in ref.keys():
self.assertThat(ex_msg, matchers.Contains(key))
| apache-2.0 | -3,008,335,138,926,897,700 | 34.660377 | 79 | 0.679894 | false |
JohnDoee/autotorrent | autotorrent/at.py | 1 | 22932 | from __future__ import division, unicode_literals
import os
import hashlib
import logging
import platform
from collections import defaultdict
from .bencode import bencode, bdecode
from .humanize import humanize_bytes
from .utils import is_unsplitable, get_root_of_unsplitable, Pieces
logger = logging.getLogger('autotorrent')
class Color:
BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
PINK = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
ENDC = '\033[0m'
COLOR_OK = Color.GREEN
COLOR_MISSING_FILES = Color.RED
COLOR_ALREADY_SEEDING = Color.BLUE
COLOR_FOLDER_EXIST_NOT_SEEDING = Color.YELLOW
COLOR_FAILED_TO_ADD_TO_CLIENT = Color.PINK
class Status:
OK = 0
MISSING_FILES = 1
ALREADY_SEEDING = 2
FOLDER_EXIST_NOT_SEEDING = 3
FAILED_TO_ADD_TO_CLIENT = 4
status_messages = {
Status.OK: '%sOK%s' % (COLOR_OK, Color.ENDC),
Status.MISSING_FILES: '%sMissing%s' % (COLOR_MISSING_FILES, Color.ENDC),
Status.ALREADY_SEEDING: '%sSeeded%s' % (COLOR_ALREADY_SEEDING, Color.ENDC),
Status.FOLDER_EXIST_NOT_SEEDING: '%sExists%s' % (COLOR_FOLDER_EXIST_NOT_SEEDING, Color.ENDC),
Status.FAILED_TO_ADD_TO_CLIENT: '%sFailed%s' % (COLOR_FAILED_TO_ADD_TO_CLIENT, Color.ENDC),
}
CHUNK_SIZE = 65536
class UnknownLinkTypeException(Exception):
pass
class IllegalPathException(Exception):
pass
class AutoTorrent(object):
def __init__(self, db, client, store_path, add_limit_size, add_limit_percent, delete_torrents, link_type='soft'):
self.db = db
self.client = client
self.store_path = store_path
self.add_limit_size = add_limit_size
self.add_limit_percent = add_limit_percent
self.delete_torrents = delete_torrents
self.link_type = link_type
self.torrents_seeded = set()
def try_decode(self, value):
try:
return value.decode('utf-8')
except UnicodeDecodeError:
logger.debug('Failed to decode %r using UTF-8' % value)
return value.decode('iso-8859-1')
def is_legal_path(self, path):
for p in path:
if p in ['.', '..'] or '/' in p:
return False
return True
def populate_torrents_seeded(self):
"""
Fetches a list of currently-seeded info hashes
"""
self.torrents_seeded = set(x.lower() for x in self.client.get_torrents())
def get_info_hash(self, torrent):
"""
Creates the info hash of a torrent
"""
return hashlib.sha1(bencode(torrent[b'info'])).hexdigest()
def find_hash_checks(self, torrent, result):
"""
Uses hash checking to find pieces
"""
modified_result = False
pieces = Pieces(torrent)
if self.db.hash_slow_mode:
logger.info('Slow mode enabled, building hash size table')
self.db.build_hash_size_table()
start_size = 0
end_size = 0
logger.info('Hash scan mode enabled, checking for incomplete files')
for f in result:
start_size = end_size
end_size += f['length']
if f['completed']:
continue
files_to_check = []
logger.debug('Building list of file names to match hash with.')
if self.db.hash_size_mode:
logger.debug('Using hash size mode to find files')
files_to_check += self.db.find_hash_size(f['length'])
if self.db.hash_name_mode:
logger.debug('Using hash name mode to find files')
name = f['path'][-1]
files_to_check += self.db.find_hash_name(name)
if self.db.hash_slow_mode:
logger.debug('Using hash slow mode to find files')
files_to_check += self.db.find_hash_varying_size(f['length'])
logger.debug('Found %i files to check for matching hash' % len(files_to_check))
checked_files = set()
for db_file in files_to_check:
if db_file in checked_files:
logger.debug('File %s already checked, skipping' % db_file)
checked_files.add(db_file)
logger.info('Hash checking %s' % db_file)
match_start, match_end = pieces.match_file(db_file, start_size, end_size)
logger.info('We go result for file %s start:%s end:%s' % (db_file, match_start, match_end))
if match_start or match_end: # this file is all-good
size = os.path.getsize(db_file)
if size != f['length']: # size does not match, need to align file
logger.debug('File does not have correct size, need to align it')
if match_start and match_end:
logger.debug('Need to find alignment in the middle of the file')
modification_point = pieces.find_piece_breakpoint(db_file, start_size, end_size)
elif match_start:
logger.debug('Need to modify from the end of the file')
modification_point = min(f['length'], size)
elif match_end:
logger.debug('Need to modify at the front of the file')
modification_point = 0
if size > f['length']:
modification_action = 'remove'
else:
modification_action = 'add'
f['completed'] = False
f['postprocessing'] = ('rewrite', modification_action, modification_point)
modified_result = True
else:
logger.debug('Perfect size, perfect match !')
f['completed'] = True
f['actual_path'] = db_file
break
return modified_result, result
def index_torrent(self, torrent):
"""
Indexes the files in the torrent.
"""
torrent_name = torrent[b'info'][b'name']
logger.debug('Handling torrent name %r' % (torrent_name, ))
torrent_name = self.try_decode(torrent_name)
if not self.is_legal_path([torrent_name]):
raise IllegalPathException('That is a dangerous torrent name %r, bailing' % torrent_name)
logger.info('Found name %r for torrent' % torrent_name)
if self.db.exact_mode:
prefix = 'd' if b'files' in torrent[b'info'] else 'f'
paths = self.db.find_exact_file_path(prefix, torrent_name)
if paths:
for path in paths:
logger.debug('Checking exact path %r' % path)
if prefix == 'f':
logger.info('Did an exact match to a file')
size = os.path.getsize(path)
if torrent[b'info'][b'length'] != size:
continue
return {'mode': 'exact',
'source_path': os.path.dirname(path),
'files': [{
'actual_path': path,
'length': size,
'path': [torrent_name],
'completed': True,
}]}
else:
result = []
for f in torrent[b'info'][b'files']:
orig_path = [self.try_decode(x) for x in f[b'path']]
p = os.path.join(path, *orig_path)
if not os.path.isfile(p):
logger.debug('File %r does not exist' % p)
break
size = os.path.getsize(p)
if size != f[b'length']:
logger.debug('File %r did not match, this is not exact (got size %s, expected %s)' % (p, size, f[b'length']))
break
result.append({
'actual_path': p,
'length': f[b'length'],
'path': orig_path,
'completed': True,
})
else:
logger.info('Did an exact match to a path')
return {'mode': 'exact',
'source_path': path,
'files': result}
result = []
if b'files' in torrent[b'info']: # multifile torrent
files_sorted = {}
files = {}
if b'files' in torrent[b'info']:
i = 0
path_files = defaultdict(list)
for f in torrent[b'info'][b'files']:
logger.debug('Handling torrent file %r' % (f, ))
orig_path = [self.try_decode(x) for x in f[b'path'] if x] # remove empty fragments
if not self.is_legal_path(orig_path):
raise IllegalPathException('That is a dangerous torrent path %r, bailing' % orig_path)
path = [torrent_name] + orig_path
name = path.pop()
path_files[os.path.join(*path)].append({
'path': orig_path,
'length': f[b'length'],
})
files_sorted['/'.join(orig_path)] = i
i += 1
if self.db.unsplitable_mode:
unsplitable_paths = set()
for path, files in path_files.items():
if is_unsplitable(f['path'][-1] for f in files):
path = path.split(os.sep)
name = get_root_of_unsplitable(path)
if not name:
continue
while path[-1] != name:
path.pop()
unsplitable_paths.add(os.path.join(*path))
for path, files in path_files.items():
if self.db.unsplitable_mode:
path = path.split(os.sep)
while path and os.path.join(*path) not in unsplitable_paths:
path.pop()
else:
path = None
if path:
name = path[-1]
for f in files:
actual_path = self.db.find_unsplitable_file_path(name, f['path'], f['length'])
f['actual_path'] = actual_path
f['completed'] = actual_path is not None
result += files
else:
for f in files:
actual_path = self.db.find_file_path(f['path'][-1], f['length'])
f['actual_path'] = actual_path
f['completed'] = actual_path is not None
result += files
# re-sort the torrent to fit original ordering
result = sorted(result, key=lambda x:files_sorted['/'.join(x['path'])])
else: # singlefile torrent
length = torrent[b'info'][b'length']
actual_path = self.db.find_file_path(torrent_name, length)
result.append({
'actual_path': actual_path,
'length': length,
'path': [torrent_name],
'completed': actual_path is not None,
})
mode = 'link'
if self.db.hash_mode:
modified_result, result = self.find_hash_checks(torrent, result)
if modified_result:
mode = 'hash'
return {'mode': mode, 'files': result}
def parse_torrent(self, torrent):
"""
Parses the torrent and finds the physical location of files
in the torrent
"""
files = self.index_torrent(torrent)
found_size, missing_size = 0, 0
for f in files['files']:
if f['completed'] or f.get('postprocessing'):
found_size += f['length']
else:
missing_size += f['length']
return found_size, missing_size, files
def link_files(self, destination_path, files):
"""
Links the files to the destination_path if they are found.
"""
if not os.path.isdir(destination_path):
os.makedirs(destination_path)
for f in files:
if f['completed']:
destination = os.path.join(destination_path, *f['path'])
file_path = os.path.dirname(destination)
if not os.path.isdir(file_path):
logger.debug('Folder %r does not exist, creating' % file_path)
os.makedirs(file_path)
logger.debug('Making %s link from %r to %r' % (self.link_type, f['actual_path'], destination))
if self.link_type == 'soft':
os.symlink(f['actual_path'], destination)
elif self.link_type == 'hard':
os.link(f['actual_path'], destination)
elif self.link_type == 'ref':
self.reflink(f['actual_path'], destination)
else:
raise UnknownLinkTypeException('%r is not a known link type' % self.link_type)
def reflink(self, path, destination):
"""
Perform a reflink (if supported, currently only xfs, apfs, btrfs is)
This code is modified from dvc (https://github.com/iterative/dvc/blob/f4bec650eddc8874b3f7ab2f8b34bc5dfe60fd49/dvc/system.py#L105).
These libraries are available under the Apache 2.0 license, which can be obtained from http://www.apache.org/licenses/LICENSE-2.0.
"""
system = platform.system()
logger.debug('platform is %r', system)
try:
if system == "Windows":
ret = self.reflink_windows(path, destination)
elif system == "Darwin":
ret = self.reflink_darwin(path, destination)
elif system == "Linux":
ret = self.reflink_linux(path, destination)
else:
ret = -1
except IOError:
ret = -1
if ret != 0:
raise Exception("reflink is not supported")
def reflink_linux(self, path, destination):
"""
Linux only reflink via syscall FICLONE on supported filesystems
"""
import os
import fcntl
FICLONE = 0x40049409
try:
ret = 255
with open(path, "r") as s, open(destination, "w+") as d:
ret = fcntl.ioctl(d.fileno(), FICLONE, s.fileno())
finally:
if ret != 0:
os.unlink(destination)
return ret
def reflink_windows(self, path, destination):
return -1
def reflink_darwin(self, path, destination):
import ctypes
LIBC = "libc.dylib"
LIBC_FALLBACK = "/usr/lib/libSystem.dylib"
try:
clib = ctypes.CDLL(LIBC)
except OSError as exc:
logger.debug(
"unable to access '{}' (errno '{}'). "
"Falling back to '{}'.".format(LIBC, exc.errno, LIBC_FALLBACK)
)
if exc.errno != errno.ENOENT:
raise
# NOTE: trying to bypass System Integrity Protection (SIP)
clib = ctypes.CDLL(LIBC_FALLBACK)
if not hasattr(clib, "clonefile"):
return -1
clonefile = clib.clonefile
clonefile.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]
clonefile.restype = ctypes.c_int
return clonefile(
ctypes.c_char_p(path.encode("utf-8")),
ctypes.c_char_p(destination.encode("utf-8")),
ctypes.c_int(0),
)
def rewrite_hashed_files(self, destination_path, files):
"""
Rewrites files from the actual_path to the correct file inside destination_path.
"""
if not os.path.isdir(destination_path):
os.makedirs(destination_path)
for f in files:
if not f['completed'] and 'postprocessing' in f:
destination = os.path.join(destination_path, *f['path'])
file_path = os.path.dirname(destination)
if not os.path.isdir(file_path):
logger.debug('Folder %r does not exist, creating' % file_path)
os.makedirs(file_path)
logger.debug('Rewriting file from %r to %r' % (f['actual_path'], destination))
_, modification_action, modification_point = f['postprocessing']
current_size = os.path.getsize(f['actual_path'])
expected_size = f['length']
diff = abs(current_size - expected_size)
# write until modification_point, do action, write rest of file
modified = False
bytes_written = 0
with open(destination, 'wb') as output_fp:
with open(f['actual_path'], 'rb') as input_fp:
logger.debug('Opened file %s and writing its data to %s - The breakpoint is %i' % (f['actual_path'], destination, modification_point))
while True:
if not modified and bytes_written == modification_point:
logger.debug('Time to modify with action %s and bytes %i' % (modification_action, diff))
modified = True
if modification_action == 'remove':
seek_point = bytes_written + diff
logger.debug('Have to shrink compared to original file, seeking to %i' % (seek_point, ))
input_fp.seek(seek_point)
elif modification_action == 'add':
logger.debug('Need to add data, writing %i empty bytes' % diff)
while diff > 0:
write_bytes = min(CHUNK_SIZE, diff)
output_fp.write(b'\x00' * write_bytes)
diff -= write_bytes
read_bytes = CHUNK_SIZE
if not modified:
read_bytes = min(read_bytes, modification_point-bytes_written)
logger.debug('Reading %i bytes' % (read_bytes, ))
data = input_fp.read(read_bytes)
if not data:
break
output_fp.write(data)
bytes_written += read_bytes
logger.debug('Done rewriting file')
def handle_torrentfile(self, path, dry_run=False):
"""
Checks a torrentfile for files to seed, groups them by found / not found.
The result will also include the total size of missing / not missing files.
"""
logger.info('Handling file %s' % path)
torrent = self.open_torrentfile(path)
if self.check_torrent_in_client(torrent):
self.print_status(Status.ALREADY_SEEDING, path, 'Already seeded')
if self.delete_torrents:
logger.info('Removing torrent %r' % path)
os.remove(path)
return Status.ALREADY_SEEDING
found_size, missing_size, files = self.parse_torrent(torrent)
if found_size + missing_size == 0:
missing_percent = 100
else:
missing_percent = (missing_size / (found_size + missing_size)) * 100
found_percent = 100 - missing_percent
would_not_add = missing_size and missing_percent > self.add_limit_percent or missing_size > self.add_limit_size
if dry_run:
return found_size, missing_size, would_not_add, [f['actual_path'] for f in files['files'] if f.get('actual_path')]
if would_not_add:
logger.info('Files missing from %s, only %3.2f%% found (%s missing)' % (path, found_percent, humanize_bytes(missing_size)))
self.print_status(Status.MISSING_FILES, path, 'Missing files, only %3.2f%% found (%s missing)' % (found_percent, humanize_bytes(missing_size)))
return Status.MISSING_FILES
if files['mode'] == 'link' or files['mode'] == 'hash':
logger.info('Preparing torrent using link mode')
destination_path = os.path.join(self.store_path, os.path.splitext(os.path.basename(path))[0])
if os.path.isdir(destination_path):
logger.info('Folder exist but torrent is not seeded %s' % destination_path)
self.print_status(Status.FOLDER_EXIST_NOT_SEEDING, path, 'The folder exist, but is not seeded by torrentclient')
return Status.FOLDER_EXIST_NOT_SEEDING
self.link_files(destination_path, files['files'])
elif files['mode'] == 'exact':
logger.info('Preparing torrent using exact mode')
destination_path = files['source_path']
fast_resume = True
if files['mode'] == 'hash':
fast_resume = False
logger.info('There are files found using hashing that needs rewriting.')
self.rewrite_hashed_files(destination_path, files['files'])
if self.delete_torrents:
logger.info('Removing torrent %r' % path)
os.remove(path)
if self.client.add_torrent(torrent, destination_path, files['files'], fast_resume):
self.print_status(Status.OK, path, 'Torrent added successfully')
return Status.OK
else:
self.print_status(Status.FAILED_TO_ADD_TO_CLIENT, path, 'Failed to send torrent to client')
return Status.FAILED_TO_ADD_TO_CLIENT
def check_torrent_in_client(self, torrent):
"""
Checks if a torrent is currently seeded
"""
info_hash = self.get_info_hash(torrent)
return info_hash in self.torrents_seeded
def open_torrentfile(self, path):
"""
Opens and parses a torrent file
"""
with open(path, 'rb') as f:
return bdecode(f.read())
def print_status(self, status, torrentfile, message):
print(' %-20s %r %s' % ('[%s]' % status_messages[status], os.path.splitext(os.path.basename(torrentfile))[0], message))
| mit | 7,117,655,230,951,537,000 | 39.161121 | 158 | 0.511774 | false |
nrupatunga/PY-GOTURN | goturn/network/regressor.py | 1 | 5623 | # Date: Friday 02 June 2017 05:04:00 PM IST
# Email: [email protected]
# Name: Nrupatunga
# Description: Basic regressor function implemented
from __future__ import print_function
import os
import glob
import numpy as np
import sys
import cv2
from ..helper import config
sys.path.insert(0, config.CAFFE_PATH)
import caffe
class regressor:
"""Regressor Class"""
def __init__(self, deploy_proto, caffe_model, gpu_id, num_inputs,
do_train, logger, solver_file=None):
"""TODO: to be defined"""
self.num_inputs = num_inputs
self.logger = logger
self.caffe_model_ = caffe_model
self.modified_params_ = False
self.mean = [104, 117, 123]
self.modified_params = False
self.solver_file = None
if solver_file:
self.solver_file = solver_file
self.setupNetwork(deploy_proto, caffe_model, gpu_id, do_train)
def reshape_image_inputs(self, num_images):
"""TODO: Docstring for reshape_image_inputs.
:returns: TODO
"""
net = self.net
net.blobs['image'].reshape(num_images, self.channels, self.height, self.width)
net.blobs['target'].reshape(num_images, self.channels, self.height, self.width)
def set_images(self, images, targets):
"""TODO: Docstring for set_images.
:returns: TODO
"""
num_images = len(images)
self.reshape_image_inputs(num_images)
self.preprocess_batch(images, targets)
def preprocess(self, image):
"""TODO: Docstring for preprocess.
:arg1: TODO
:returns: TODO
"""
num_channels = self.channels
if num_channels == 1 and image.shape[2] == 3:
image_out = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif num_channels == 1 and image.shape[2] == 4:
image_out = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
elif num_channels == 3 and image.shape[2] == 4:
image_out = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
elif num_channels == 3 and image.shape[2] == 1:
image_out = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
else:
image_out = image
if image_out.shape != (self.height, self.width, self.channels):
image_out = cv2.resize(image_out, (self.width, self.height), interpolation=cv2.INTER_CUBIC)
image_out = np.float32(image_out)
image_out -= np.array(self.mean)
image_out = np.transpose(image_out, [2, 0, 1])
return image_out
def preprocess_batch(self, images_batch, targets_batch):
"""TODO: Docstring for preprocess_batch.
:arg1: TODO
:returns: TODO
"""
net = self.net
num_images = len(images_batch)
for i in range(num_images):
image = images_batch[i]
image_out = self.preprocess(image)
net.blobs['image'].data[i] = image_out
target = targets_batch[i]
target_out = self.preprocess(target)
net.blobs['target'].data[i] = target_out
def setupNetwork(self, deploy_proto, caffe_model, gpu_id, do_train):
"""TODO: Docstring for setupNetwork.
:deploy_proto (string) : deploy prototxt file
:caffe_model (string) : trained caffe model path
:gpu_id (integer) : GPU id
:do_train (boolean) : training phase or testing phase
"""
logger = self.logger
caffe.set_mode_gpu()
caffe.set_device(int(gpu_id))
if do_train == True:
logger.info('Setting phase to train')
# TODO: this part of the code needs to be changed for
# training phase
if self.solver_file:
self.solver = caffe.SGDSolver(self.solver_file)
net = self.solver.net
net.copy_from(caffe_model)
else:
logger.error('solver file required')
return
self.phase = caffe.TRAIN
else:
logger.info('Setting phase to test')
net = caffe.Net(deploy_proto, caffe_model, caffe.TEST)
self.phase = caffe.TEST
self.net = net
self.num_inputs = net.blobs['image'].data[...].shape[0]
self.channels = net.blobs['image'].data[...].shape[1]
self.height = net.blobs['image'].data[...].shape[2]
self.width = net.blobs['image'].data[...].shape[3]
if self.num_inputs != 1:
logger.error('Network should take exactly one input')
if self.channels != 1 and self.channels != 3:
logger.error('Network should have 1 or 3 channels')
def regress(self, curr_search_region, target_region):
"""TODO: Docstring for regress.
:returns: TODO
"""
return self.estimate(curr_search_region, target_region)
def estimate(self, curr_search_region, target_region):
"""TODO: Docstring for estimate.
:arg1: TODO
:returns: TODO
"""
net = self.net
# reshape the inputs
net.blobs['image'].data.reshape(1, self.channels, self.height, self.width)
net.blobs['target'].data.reshape(1, self.channels, self.height, self.width)
net.blobs['bbox'].data.reshape(1, 4, 1, 1)
curr_search_region = self.preprocess(curr_search_region)
target_region = self.preprocess(target_region)
net.blobs['image'].data[...] = curr_search_region
net.blobs['target'].data[...] = target_region
net.forward()
bbox_estimate = net.blobs['fc8'].data
return bbox_estimate | mit | 2,089,229,815,870,603,000 | 31.508671 | 103 | 0.586342 | false |
bobev18/reflective | django2wrap/settings.py | 1 | 5385 | import os
# Django settings for django2wrap project.
SITE_ROOT = os.path.realpath(os.path.dirname(__file__)) # + '/'
PROJECT_NAME = os.path.basename(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(BASE_DIR, 'local_settings.py')
try:
exec(open(fp,'rt',encoding='utf-8').read())
except :
print('!'*32, 'Failed to import', fp)
print()
exit(1)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Sofia'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = SITE_ROOT + 'static_files/'
# STATIC_ROOT = os.path.join(SITE_ROOT, 'static') #SITE_ROOT + 'static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django2wrap.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django2wrap.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
# 'django.core.context_processors.debug',
# 'django.core.context_processors.i18n',
# 'django.core.context_processors.media',
# 'django.core.context_processors.request',
# )
INSTALLED_APPS = (
'south',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django_extensions',
'django2wrap',
)
LOGIN_REDIRECT_URL = "/"
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
# email settings were here #
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | 7,251,111,057,287,929,000 | 32.447205 | 100 | 0.69805 | false |
JackMc/CourseScraper | scraper/carleton/scrape.py | 1 | 3008 | from carleton.course import Course
import requests
from bs4 import BeautifulSoup
import datetime
import csv
FALL = 0
WINTER = 1
SUMMER = 2
# Maps term codes to semesters
# Experimentally determined from playing with Carleton Central.
_TERM_MAP = {WINTER: '10', SUMMER: '20', FALL: '30'}
def _make_term_code(year, term):
# Term is invalid.
if term not in _TERM_MAP:
return None
return str(year) + _TERM_MAP[term]
def _get_session_id():
"""
Returns a string used by Carleton Central to uniquely identify a session.
"""
get_session_id = requests.get('http://central.carleton.ca/prod/bwysched.p_select_term',
params={'wsea_code': 'EXT'})
if not get_session_id.ok:
# It didn't return a good response code.
return None
# Parse out the session ID.
session_soup = BeautifulSoup(get_session_id.text)
inputs = session_soup.find('input', attrs={'name': 'session_id'})
return inputs['value']
def _parse_row(row):
"""
Returns a two-tuple containing the parsed version of a given row.
The first tuple will contain all
"""
if not row:
return (None, None)
elif 'DNI' in row:
return (None, None)
return ([eval(x) for x in row if x.startswith('[')], [x[1:] for x in row if x.startswith('\'')])
def get_courses(faculty, year=2014, term=FALL):
"""
Returns a list of Course objects for a given faculty code (i.e. COMP).
"""
# We grab the faculty courses page and soup it. This is a listing of courses.
faculty_courses = requests.get('http://calendar.carleton.ca/undergrad/courses/' + faculty)
soup = BeautifulSoup(faculty_courses.text)
# This variable contains a list of the divs that contain the course info.
course_divs = soup.find_all('div', attrs={'class': 'courseblock'})
courses = {}
# Open up the courses/prereqs file
reader = csv.reader(open(faculty + '_prereqs.csv', 'r+'))
for div, row in zip(course_divs, reader):
strong_block = div.find('strong')
text = strong_block.text
top, title = text.split('\n')
# The first half of this would be the faculty code, which we already have.
# Also for some reason it likes it when I split on \xa0 instead of space,
# though it's visually a space. Probably a weird unicode thing.
_, course_no = top.split('[')[0].strip().split('\xa0')
# Another magic number... 3 is the length of both 1.0, 0.5, and 0.0
credits = float(top.split('[')[1][:3])
description = str(div.string)
prereqs, text_prereqs = _parse_row(row)
if prereqs is None or text_prereqs is None:
continue
additional = div.find('coursedescadditional')
courses[faculty + course_no] = Course(credits, faculty, course_no, title, description, prereqs, text_prereqs,
None, additional.get_text() if additional else None)
return courses
| mit | 8,382,359,165,925,215,000 | 32.422222 | 117 | 0.628657 | false |
dnowatsc/Varial | varial/test/test_diskio.py | 1 | 3008 | import os
from ROOT import TH1F
from test_histotoolsbase import TestHistoToolsBase
from varial.wrappers import FileServiceAlias
from varial import diskio
from varial import analysis
class TestDiskio(TestHistoToolsBase):
def setUp(self):
super(TestDiskio, self).setUp()
if not os.path.exists("test_data"):
os.mkdir("test_data")
def test_fileservice_aliases(self):
for name, smp in analysis.all_samples.items():
analysis.fs_aliases += list(
alias for alias in diskio.generate_fs_aliases(
'fileservice/%s.root' % name,
smp
)
)
aliases = analysis.fs_aliases[:]
# Is number of loaded elements correct?
self.assertEqual(len(aliases), 150)
# Are sample names correct?
samples = set(a.sample for a in aliases)
self.assertTrue("tt" in samples)
self.assertTrue("ttgamma" in samples)
self.assertTrue("zjets" in samples)
# Check for some analyzers
analyzers = set(a.analyzer for a in aliases)
self.assertTrue("realTemplate" in analyzers)
self.assertTrue("analyzer_ET" in analyzers)
# Check for some histonames
histos = set(a.name for a in aliases)
self.assertTrue("histo" in histos)
self.assertTrue("sihihEB" in histos)
def test_load_histogram(self):
test_alias = FileServiceAlias(
"cutflow", "analyzeSelection", "fileservice/ttgamma.root",
analysis.all_samples["ttgamma"]
)
wrp = diskio.load_histogram(test_alias)
self.assertEqual(wrp.name, test_alias.name)
self.assertEqual(wrp.analyzer, test_alias.analyzer)
self.assertEqual(wrp.sample, test_alias.sample)
self.assertTrue(isinstance(wrp.histo, TH1F))
self.assertAlmostEqual(wrp.histo.Integral(), 280555.0)
def test_write(self):
fname = "test_data/wrp_save.info"
diskio.write(self.test_wrp, fname)
# file should exist
self.assertTrue(
os.path.exists(fname)
)
# file should have 7 lines (with history written out)
with open(fname) as fhandle:
n_lines = len(list(fhandle))
self.assertEqual(n_lines, 21)
def test_read(self):
fname = "test_data/wrp_load.info"
diskio.write(self.test_wrp, fname)
loaded = diskio.read(fname)
self.test_wrp.history = str(self.test_wrp.history)
# check names
self.assertEqual(
self.test_wrp.all_writeable_info(),
loaded.all_writeable_info()
)
# check histograms (same integral, different instance)
self.assertEqual(self.test_wrp.histo.Integral(), loaded.histo.Integral())
self.assertNotEqual(str(self.test_wrp.histo), str(loaded.histo))
import unittest
suite = unittest.TestLoader().loadTestsFromTestCase(TestDiskio)
if __name__ == '__main__':
unittest.main() | gpl-3.0 | 5,906,985,303,420,168,000 | 32.433333 | 81 | 0.622008 | false |
csachs/openmicroscopy | components/tools/OmeroWeb/test/integration/test_download.py | 9 | 5019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test download of data.
"""
from omero.model import PlateI, WellI, WellSampleI
from omero.rtypes import rstring
import pytest
from django.core.urlresolvers import reverse
from weblibrary import IWebTest, _get_response
class TestDownload(IWebTest):
"""
Tests to check download is disabled where specified.
"""
@pytest.fixture
def image_well_plate(self):
"""
Returns a new OMERO Project, linked Dataset and linked Image populated
by an L{test.integration.library.ITest} instance with required fields
set.
"""
plate = PlateI()
plate.name = rstring(self.uuid())
plate = self.update.saveAndReturnObject(plate)
well = WellI()
well.plate = plate
well = self.update.saveAndReturnObject(well)
image = self.new_image(name=self.uuid())
ws = WellSampleI()
ws.image = image
ws.well = well
well.addWellSample(ws)
ws = self.update.saveAndReturnObject(ws)
return plate, well, ws.image
def test_spw_download(self, image_well_plate):
"""
Download of an Image that is part of a plate should be disabled,
and return a 404 response.
"""
plate, well, image = image_well_plate
# download archived files
request_url = reverse('webgateway.views.archived_files')
data = {
"image": image.id.val
}
_get_response(self.django_client, request_url, data, status_code=404)
def test_orphaned_image_direct_download(self):
"""
Download of archived files for a non-SPW orphaned Image.
"""
image = self.importSingleImage()
# download archived files
request_url = reverse('webgateway.views.archived_files',
args=[image.id.val])
_get_response(self.django_client, request_url, {}, status_code=200)
def test_orphaned_image_download(self):
"""
Download of archived files for a non-SPW orphaned Image.
"""
image = self.importSingleImage()
# download archived files
request_url = reverse('webgateway.views.archived_files')
data = {
"image": image.id.val
}
_get_response(self.django_client, request_url, data, status_code=200)
def test_image_in_dataset_download(self):
"""
Download of archived files for a non-SPW Image in Dataset.
"""
image = self.importSingleImage()
ds = self.make_dataset()
self.link(ds, image)
# download archived files
request_url = reverse('webgateway.views.archived_files')
data = {
"image": image.id.val
}
_get_response(self.django_client, request_url, data, status_code=200)
def test_image_in_dataset_in_project_download(self):
"""
Download of archived files for a non-SPW Image in Dataset in Project.
"""
image = self.importSingleImage()
ds = self.make_dataset()
pr = self.make_project()
self.link(pr, ds)
self.link(ds, image)
# download archived files
request_url = reverse('webgateway.views.archived_files')
data = {
"image": image.id.val
}
_get_response(self.django_client, request_url, data, status_code=200)
def test_well_download(self, image_well_plate):
"""
Download of archived files for a SPW Well.
"""
plate, well, image = image_well_plate
# download archived files
request_url = reverse('webgateway.views.archived_files')
data = {
"well": well.id.val
}
_get_response(self.django_client, request_url, data, status_code=404)
def test_attachement_download(self):
"""
Download of attachement.
"""
image = self.importSingleImage()
fa = self.make_file_annotation()
self.link(image, fa)
# download archived files
request_url = reverse('download_annotation',
args=[fa.id.val])
_get_response(self.django_client, request_url, {}, status_code=200)
| gpl-2.0 | 3,900,360,906,882,273,000 | 29.981481 | 78 | 0.61845 | false |
uhjish/bx-python | lib/bx/seq/nib.py | 3 | 2884 | """
Classes to support nib files.
:Author: James Taylor ([email protected]), Bob Harris ([email protected])
A nib sequence is a sequence of DNA, using the 10 character alphabet A,C,G,T,N
(upper and lower case). The file is packed as 4 bits per character.
nib file format
---------------
Fields can be in big- or little-endian format; they must match the endianess
of the magic number.
============ =========== ======================================================
offset 0x00: 6B E9 3D 3A big endian magic number (3A 3D E9 6B => little endian)
offset 0x04: xx xx xx xx length of data sequence (counted in characters)
offset 0x08: ... data sequence; most significant nybble in each
byte is first in sequence
============ =========== ======================================================
"""
from __future__ import division
from bx.seq.seq import SeqFile,SeqReader
import sys, struct, string, math
import _nib
NIB_MAGIC_NUMBER = 0x6BE93D3A
NIB_MAGIC_NUMBER_SWAP = 0x3A3DE96B
NIB_MAGIC_SIZE = 4
NIB_LENGTH_SIZE = 4
class NibFile(SeqFile):
def __init__(self, file, revcomp=False, name="", gap=None):
SeqFile.__init__(self,file,revcomp,name,gap)
self.byte_order = ">"
magic = struct.unpack(">L", file.read(NIB_MAGIC_SIZE))[0]
if (magic != NIB_MAGIC_NUMBER):
if magic == NIB_MAGIC_NUMBER_SWAP: self.byte_order = "<"
else: raise Exception("Not a NIB file")
self.magic = magic
self.length = struct.unpack("%sL" % self.byte_order, file.read(NIB_LENGTH_SIZE))[0]
def raw_fetch(self, start, length):
# Check parameters
assert start >= 0, "Start must be greater than 0"
assert length >= 0, "Length must be greater than 0"
assert start + length <= self.length, "Interval beyond end of sequence"
# Read block of bytes containing sequence
block_start = int(math.floor(start / 2))
block_end = int(math.floor((start + length - 1) / 2))
block_len = block_end + 1 - block_start
self.file.seek(NIB_MAGIC_SIZE + NIB_LENGTH_SIZE + block_start)
raw = self.file.read(block_len)
# Unpack compressed block into a character string and return
return _nib.translate_raw_data( raw, start, length )
class NibReader(SeqReader):
def __init__(self, file, revcomp=False, name="", gap=None):
SeqReader.__init__(self,file,revcomp,name,gap)
def next(self):
if (self.seqs_read != 0): return # nib files have just one sequence
seq = NibFile(self.file,self.revcomp,self.name,self.gap)
self.seqs_read += 1
return seq
class NibWriter(object):
def __init__(self,file):
self.file = file
def write(self,seq):
assert (False), "NibWriter.write() is not implemented yet"
def close(self):
self.file.close()
| mit | -2,764,153,147,221,239,000 | 33.333333 | 91 | 0.602982 | false |
trichter/sito | xcorr.py | 1 | 17828 | #!/usr/bin/env python
# by TR
#import logging
from obspy.signal.util import nextpow2
from scipy.fftpack import fft, ifft, fftshift, ifftshift
from sito._xcorr import xcorr as xcorr_c
from sito.util import isnumber, filterResp, smooth, fillArray
import logging
import numpy as np
import obspy.signal
from obspy.core.util.decorator import deprecated
USE_FFTW3 = False
#fft = None
#ifft = None
#def use_fftw3(val=True):
# global USE_FFTW3, fft, ifft
# USE_FFTW3 = val
# print('not ' * (not val) + 'using FFWT3 library.')
# if val:
# from sito.util.fftw3_be import fft, ifft
# else:
# from scipy.fftpack import fft, ifft
#use_fftw3(USE_FFTW3)
log = logging.getLogger(__name__)
def timeNorm(data, method=None, param=None, recursive=0):
"""
Calculates normalized data. See Bensen et al.(2007)
Method is a string. There are the following methods:
1bit: reduce data to +1 if >0 and -1 if <0
clip: clip data to the root mean square (rms)
eventremoval: automatic event detection and removal - if an value is bigger
than the threshold, the following values are set to zero.
param: (threshold, number of samples (eg. 30min) to set to zero)
stalta: automatic event removing with recursive sta/lta trigger
runningmean: the data is normalized with the running average
The width of the normalization window determines how much amplitude
information is retained (N=1 -> 1bit normalization, N very big ->
rescaled data). Half of the maximum period of the passband filter_ works
well.
param: width of window (should be odd)
runningmean_over_filtered: the data is normalized with the running average
over the filtered data.
A band pass filter_ between 20s and 100s period can remove local
seismicity.
param: (width of window in seconds, sampling rate, filter_, freq1, freq2)
filter_: in ('band', 'low', high')
if filter_ in ('low', 'high') only on frequency is needed
waterlevel: any amplitude above the waterlevel (multiple of rms) is
down-weighted by a factor. This procedure is repeated all of the
waveform data is under the water-level
param: (water-level factor, reducing factor)
"""
mask = np.ma.getmask(data)
if method == '1bit':
data = np.sign(data)
elif method == 'clip':
std = np.std(data)
data[data > std] = std
data[data < -std] = -std
elif method == 'eventremoval':
if param == None:
# remove 30 min (at 10Hz) after events if data is bigger than 2000
param = (2000, 30 * 60 * 10)
clip = np.nonzero(abs(data) >= param[0])[0]
if len(clip) > 0:
clip = clip[0]
index = min(clip + param[1], len(data))
data[clip:index] = 0
if index < len(data):
data[index:] = timeNorm(data[index:], method=method, param=param)
elif method == 'stalta':
if param is None:
# STA: 3s at 100Hz, LTA: 10s, trigger on: 1.2, trigger off:1.0
param = (100 * 3, 100 * 10, 1.2, 1.0)
cft = obspy.signal.trigger.recSTALTA(data, param[0], param[1])
trg = obspy.signal.trigger.triggerOnset(cft, param[2], param[3])
for on, off in trg:
data[on:off] = 0
elif method == 'runningmean':
if param == None:
# smooth over 20s at 10Hz
param = 10 * 10
smoothed = smooth(np.abs(data), param)
data /= smoothed
elif method == 'runningmean_over_filtered':
if param is None:
# smooth over 20s at 10Hz over bandpassed data
param = (10, 10, 'band', 1 / 50., 1 / 15.)
sr = param[1]
over = int(param[0] * sr)
filter_ = param[2]
if filter_ == 'band':
data2 = obspy.signal.bandpass(data, param[3], param[4], sr)
elif filter_ == 'low':
data2 = obspy.signal.lowpass(data, param[3], sr)
elif filter_ == 'high':
data2 = obspy.signal.highpass(data, param[3], sr)
else:
raise ValueError("filter_ should be in ('band', 'high', 'low')")
data /= smooth(np.abs(data2), over)
elif method == 'waterlevel':
if param == None:
# data above 6*rms is recursively reduced by a factor of 10
param = (6., 10.)
waterlevel = param[0] * np.std(data)
indices = np.abs(data) > waterlevel
if np.any(indices):
if param[1] == 0:
data[indices] = 0
else:
data[indices] /= param[1]
data = timeNorm(data, method=method, param=param, recursive=recursive + 1)
elif method == 'waterlevel_rm':
if param == None:
# running mean over 5s at 10Hz data
# data above 4*rms is recursively reduced by a factor of 10
param = (5 * 10, 4., 10.)
running_mean = smooth(np.abs(data), param[0])
waterlevel = param[1] * np.std(running_mean)
indices = (running_mean > waterlevel) + (np.abs(data) > waterlevel)
if np.any(indices):
param = list(param)
frac_zeros = 1. * np.count_nonzero(indices) / len(data)
if param[2] == 0:
data[indices] = 0
param[1] *= (1 + frac_zeros)
else:
data[indices] /= param[2]
param[1] *= (1 + frac_zeros * (1 - 1 / param[2]))
print recursive, frac_zeros, waterlevel
data = timeNorm(data, method=method, param=param, recursive=recursive + 1)
elif method == 'waterlevel_env':
if param == None:
# data above 4*rms is recursively reduced by a factor of 10
param = (4., 10.)
param = list(param)
if len(param) == 2:
param.append(0)
param.append(0)
env = obspy.signal.cpxtrace.envelope(data)[1][:len(data)]
# correct std because of zeros
waterlevel = param[0] * np.std(env) / (1 - param[2])
# import pylab as plt
# from imaging import plotTrace
# from sito import Trace
# trace = Trace(data=data)
# trace2 = Trace(data=env)
# plotTrace(trace)
# plotTrace(trace2)
# plt.figure()
# plt.plot(data)
# plt.plot(env)
# plt.hlines(waterlevel, 0, len(data))
# plt.show()
indices = env > waterlevel
frac_zeros = 1. * np.count_nonzero(indices) / len(data)
if np.any(indices) and frac_zeros > 0.0005 and param[3] < 20:
if param[1] == 0:
data[indices] = 0
#param[0] *= (1 + frac_zeros)
else:
data[indices] /= param[2]
#param[0] *= (1 + frac_zeros * (1 - 1 / param[1]))
print param[3], frac_zeros, param[2], waterlevel
param[2] += frac_zeros
param[3] += 1
data = timeNorm(data, method=method, param=param)
elif method == 'waterlevel_env2':
if param == None:
# data above 4*rms is recursively reduced by a factor of 10
param = (4., 10.)
N = len(data)
env = obspy.signal.cpxtrace.envelope(data)[1][:N]
if mask is not False:
env[mask] = 0.
num_stds = 96 # 24*4 =^ every 15min
if N < 86400: # 24*3600
num_stds = N // 900
len_parts = N // num_stds # N//96 = N//24//4 =^ 15min
len_stds = len_parts // 15 # len_parts//15 =^ 1min
stds = np.array([np.std(env[i:i + len_stds]) for i in np.arange(num_stds) * len_parts])
if np.min(stds) == 0:
stds = stds[stds != 0.]
num_stds = len(stds)
stds = np.sort(stds)[num_stds // 15:-num_stds // 15]
stds = stds[stds < np.min(stds) * 2.]
waterlevel = param[0] * np.mean(stds)
# import pylab as plt
# from imaging import plotTrace
# from sito import Trace
# trace = Trace(data=data)
# trace2 = Trace(data=env)
# plotTrace(trace)
# plotTrace(trace2)
# plt.figure()
# plt.plot(data)
# plt.plot(env)
# plt.hlines(waterlevel, 0, len(data))
# plt.show()
indices = env > waterlevel
#frac_zeros = 1. * np.count_nonzero(indices) / len(data)
if np.any(indices):
if param[1] == 0:
# not setting values to zero but masking them
# -> they will stay zero after spectral whitening
# and 1bit normalization
mask = np.ma.mask_or(mask, indices)
#data[indices] = 0
else:
data[indices] /= param[2]
elif method is not None:
raise ValueError('The method passed to timeNorm() is not known.')
return fillArray(data, mask=mask, fill_value=0.)
def spectralWhitening(data, sr=None, smoothi=None, freq_domain=False, apply_filter=None):
"""
Apply spectral whitening to data.
sr: sampling rate (only needed for smoothing)
smoothi: None or int
Data is divided by its smoothed (Default: None) amplitude spectrum.
"""
if freq_domain:
mask = False
spec = data
else:
mask = np.ma.getmask(data)
N = len(data)
nfft = nextpow2(N)
spec = fft(data, nfft)
#df = sr/N
spec_ampl = np.sqrt(np.abs(np.multiply(spec, np.conjugate(spec))))
if isinstance(smoothi, basestring) and isnumber(smoothi) and smoothi > 0:
smoothi = int(smoothi * N / sr)
spec /= ifftshift(smooth(fftshift(spec_ampl), smoothi))
else:
spec /= spec_ampl
if apply_filter is not None:
spec *= filterResp(*apply_filter, sr=sr, N=len(spec), whole=True)[1]
if freq_domain:
return spec
else:
ret = np.real(ifft(spec, nfft)[:N])
if USE_FFTW3:
ret = ret.copy()
return fillArray(ret, mask=mask, fill_value=0.)
# from pylab import plot, show, subplot
# freqs = np.fft.fftfreq(nfft, 1. / sr)
# ax = subplot(211)
## ax.set_xscale('log')
## ax.set_yscale('log')
# plot(freqs, spec_ampl)
# plot(freqs, ifftshift(smooth(fftshift(spec_ampl), over)), lw=2)
# ax2 = subplot(212, sharex=ax)
## ax2.set_xscale('log')
## ax2.set_yscale('log')
# plot(freqs, np.abs(spec * np.conjugate(spec)) ** 0.5)
# plot(freqs, np.ones(nfft), lw=2)
# show()
# return np.abs(ifft(spec, nfft)[:N])
def xcorrf(data1, data2, shift=None, shift_zero=0, oneside=False,
demean=True, window=0, ndat1d=0, ndat2d=0, N1=None, N2=None,
normalize=True,
freq_domain=False, transform_back=True,
stdev1=None, stdev2=None):
"""
Cross-correlation of numpy arrays data1 and data2 in frequency domain.
We define cross-corelation as:
xcorr[i] = sum_j (tr1[i+j-shift_zero] * tr2[j])
The data is demeaned before cross-correlation and the result normalized
after cross-correlation.
data1, data2: data
shift: maximum samples to shift
(window for i in the above formula)
shift_zero: shift tr1 before cross-correlation by this amount of samples to
the right (this means correlation function is shifted to the
right or better: the window of what you get of the function
is shifted to the left)
oneside: if True only the right/positive side of the correlation function
is returned. Overrides parameter shift_zero.
demean: if True demean data beforehand
normalize: if True normalize correlation function
(1 means perfect correlation)
window: Use only data in this window for demeaning and normalizing
0: window = min(ndat1, ndat2)
>0: window = this parameter
ndat1d, ndat2d: If >0 use different values for the length of the arrays when
calculating the mean (defaults to window parameter)
return: numpy array with correlation function of length 2*shift+1 for
oneside=False and of length shift+1 for oneside=True
"""
if freq_domain and not transform_back:
return data1 * np.conjugate(data2)
elif freq_domain:
min_size = max(2 * shift + 1 + abs(shift_zero),
(N1 + N2) // 2 + shift + abs(shift_zero))
if len(data1) < min_size:
raise ValueError('NFFT was not large enough to cover the desired '
'xcorr!\nnfft: %d, required minimum: %d' %
(len(data1), min_size))
ret = (ifft(data1 * np.conjugate(data2))).real
else:
complex_result = (data1.dtype == np.complex or
data2.dtype == np.complex)
N1 = len(data1)
N2 = len(data2)
#if isinstance(data1[0], np.integer) or isinstance(data2[0], np.integer):
data1 = data1.astype('float64')
data2 = data2.astype('float64')
#if (N1-N2)%2==1:
# raise ValueError('(N1-N2)%2 has to be 0')
if window == 0:
window = min(N1, N2)
if ndat1d == 0:
ndat1d = window
if ndat2d == 0:
ndat2d = window
# determine indices for demeaning and normalization
ind1 = max(0, (N1 - window) // 2)
ind2 = min(N1, (N1 + window) // 2)
ind3 = max(0, (N2 - window) // 2)
ind4 = min(N2, (N2 + window) // 2)
# demean and normalize data
if demean:
data1 -= np.sum(data1[ind1:ind2]) / ndat1d
data2 -= np.sum(data2[ind3:ind4]) / ndat2d
if normalize:
data1 /= np.max(data1[ind1:ind2])
data2 /= np.max(data2[ind3:ind4])
# Always use 2**n-sized FFT, perform xcorr
size = max(2 * shift + 1 + abs(shift_zero),
(N1 + N2) // 2 + shift + abs(shift_zero))
nfft = nextpow2(size)
IN1 = fft(data1, nfft)
if USE_FFTW3:
IN1 = IN1.copy()
IN1 *= np.conjugate(fft(data2, nfft))
ret = ifft(IN1)
if not USE_FFTW3:
del IN1
if not complex_result:
ret = ret.real
# shift data for time lag 0 to index 'shift'
ret = np.roll(ret, -(N1 - N2) // 2 + shift + shift_zero)[:2 * shift + 1]
# normalize xcorr
if normalize:
if not freq_domain:
stdev1 = (np.sum(data1[ind1:ind2] ** 2)) ** 0.5
stdev2 = (np.sum(data2[ind3:ind4] ** 2)) ** 0.5
# stdev1 = (np.sum(data1 ** 2)) ** 0.5
# stdev2 = (np.sum(data2 ** 2)) ** 0.5
if stdev1 == 0 or stdev2 == 0:
log.warning('Data is zero!!')
ret[:] = 0.
else:
ret /= stdev1 * stdev2
if oneside:
ret = ret[shift:]
return np.copy(ret)
def acorrf(data, shift, oneside=True, clipdata=False, ** kwargs):
"""
Auto-correlation of array data in frequency domain.
clipdata: if True: data2=data[shift:-shift]
if False: data2=data
It calls xcorrf.
See doc for xcorr:
"""
if clipdata:
data2 = data[shift:-shift]
else:
data2 = data
return xcorrf(data, data2, shift, oneside=oneside, ** kwargs)
@deprecated
def xcorr_obspy(data1, data2, shift):
"""Cross correlation using ObsPy"""
return obspy.signal.xcorr(data1, data2, shift, full_xcorr=True)[2]
@deprecated
def xcorrt(data1, data2, shift, shift_zero=0, oneside=False, demean=True, normalize=True, window=0, ndat1d=0, ndat2d=0):
"""
Cross-correlation of numpy arrays data1 and data2 in time domain.
"""
if (len(data1) - len(data2)) % 2 == 1:
raise ValueError('(N1-N2)%2 has to be 0')
if oneside:
ret = xcorr_c(data1, data2, (shift + 1) // 2, -((shift + 1) // 2), window, bool(demean), bool(normalize), ndat1d, ndat2d)
if len(ret) == shift + 2:
ret = ret[:-1]
else:
ret = xcorr_c(data1, data2, shift, shift_zero, window, bool(demean), bool(normalize), ndat1d, ndat2d)
return ret
@deprecated
def acorrt(data, shift, oneside=True, clipdata=True, ** kwargs):
"""
Auto-correlation of array data in time domain.
clipdata: if True: data2=data[shift:-shift]
if False: data2=data
It calls xcorrt with parameter oneside=True.
See doc for xcorr:
"""
if clipdata:
data2 = data[shift:-shift]
else:
data2 = data
ret = xcorrt(data, data2, shift, oneside=True, ** kwargs)
if not oneside:
ret = np.hstack((ret[::-1], ret[1:]))
return ret
def getNormFactors(data1, data2, demean=True, num=24):
"""
return norm factors of xcorr routine of divided data compared to whole set
The norm factors of the xcorr routine are calculated for the data divided
into 'num' parts of the data set (each with equal length) and for the whole
data set and the quotient is returned.
Only if these are near 1 stacking the xcorr is technically correct.
"""
N = len(data1)
if len(data2) != N:
raise ValueError('Length of data1 has to be the same as length of data2')
if isinstance(data1[0], np.integer) or isinstance(data2[0], np.integer):
data1 = 1. * data1
data2 = 1. * data2
if demean:
data1 -= np.mean(data1)
data2 -= np.mean(data2)
fac_whole_time = (np.sum(data1 ** 2) * np.sum(data2 ** 2)) ** 0.5
fac_period = np.zeros(num)
for i in range(num):
ind1 = i * N // num
ind2 = (i + 1) * N // num
data1p = data1[ind1:ind2] - np.mean(data1[ind1:ind2])
data2p = data2[ind1:ind2] - np.mean(data2[ind1:ind2])
fac_period[i] = (np.sum(data1p ** 2) * np.sum(data2p ** 2)) ** 0.5
fac_period = fac_period * num / fac_whole_time
return fac_period
| mit | -1,005,370,512,903,102,600 | 37.339785 | 129 | 0.569441 | false |
lepisma/audire | src/server.py | 1 | 1838 | """
Tornado websocket server
"""
import tornado.ioloop
import tornado.web
import tornado.websocket
import os
from threading import Thread
from modules import noise, anomaly, message, event
settings = {"static_path": os.path.join(os.path.dirname(__file__), "static")}
# Runt the audio level thread
Thread(target=noise.noise_level).start()
class IndexHandler(tornado.web.RequestHandler):
"""Handle rendering of dash
"""
def get(self):
self.render("index.html")
class EventHandler(tornado.web.RequestHandler):
"""Event detection route handler
"""
def get(self):
# Event detection results
self.write(event.get_event())
class LevelHandler(tornado.web.RequestHandler):
"""Audio Level request handler
"""
def get(self):
# Make a global level var
# Update it using a threaded script
# Return its value here
self.write(str(noise.level))
class WebSocketHandler(tornado.websocket.WebSocketHandler):
"""
Handle messages from server to dash.
Work involves sending anomaly values and speech to text values.
"""
def open(self):
print("WebSocket Opened")
# Pass the object to different modules
# so that they can communicate with the dash
Thread(target=anomaly.anomaly_detection, args=(self, )).start()
Thread(target=message.speech_win, args=(self, )).start()
def on_message(self, message):
print("Received a message : " + message)
def on_close(self):
print("WebSocket closed")
application = tornado.web.Application([
(r"/", IndexHandler),
(r"/event", EventHandler),
(r"/level", LevelHandler),
(r"/ws", WebSocketHandler)
], **settings)
if __name__ == "__main__":
application.listen(5000)
tornado.ioloop.IOLoop.instance().start()
| gpl-3.0 | 9,133,335,180,269,070,000 | 24.527778 | 77 | 0.658324 | false |
mola/qgis | python/plugins/GdalTools/tools/doRasterize.py | 3 | 4207 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from osgeo import ogr
from ui_widgetRasterize import Ui_GdalToolsWidget as Ui_Widget
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BasePluginWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.setupUi(self)
BasePluginWidget.__init__(self, self.iface, "gdal_rasterize")
self.lastEncoding = Utils.getLastUsedEncoding()
self.setParamsStatus(
[
(self.inputLayerCombo, [SIGNAL("currentIndexChanged(int)"), SIGNAL("editTextChanged(const QString &)")] ),
(self.outputFileEdit, SIGNAL("textChanged(const QString &)")),
(self.attributeComboBox, SIGNAL("currentIndexChanged(int)"))
]
)
self.connect(self.selectInputFileButton, SIGNAL("clicked()"), self.fillInputFileEdit)
self.connect(self.selectOutputFileButton, SIGNAL("clicked()"), self.fillOutputFileEdit)
self.connect(self.inputLayerCombo, SIGNAL("currentIndexChanged(int)"), self.fillFieldsCombo)
# fill layers combo
self.fillInputLayerCombo()
def fillInputLayerCombo( self ):
self.inputLayerCombo.clear()
( self.layers, names ) = Utils.getVectorLayers()
self.inputLayerCombo.addItems( names )
def fillFieldsCombo(self):
index = self.inputLayerCombo.currentIndex()
if index < 0:
return
self.lastEncoding = self.layers[index].dataProvider().encoding()
self.loadFields( self.getInputFileName() )
def fillInputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedVectorFilter()
inputFile, encoding = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the input file for Rasterize" ), Utils.FileFilter.allVectorsFilter(), lastUsedFilter, True)
if inputFile.isEmpty():
return
Utils.FileFilter.setLastUsedVectorFilter(lastUsedFilter)
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText(inputFile)
self.lastEncoding = encoding
self.loadFields( inputFile )
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter)
if outputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFileEdit.setText(outputFile)
def getArguments(self):
arguments = QStringList()
if self.attributeComboBox.currentIndex() >= 0:
arguments << "-a"
arguments << self.attributeComboBox.currentText()
if self.inputLayerCombo.currentIndex() >= 0:
arguments << "-l"
arguments << QFileInfo(self.layers[ self.inputLayerCombo.currentIndex() ].source()).baseName()
elif not self.inputLayerCombo.currentText().isEmpty():
arguments << "-l"
arguments << QFileInfo(self.inputLayerCombo.currentText()).baseName()
arguments << self.getInputFileName()
arguments << self.outputFileEdit.text()
return arguments
def getInputFileName(self):
if self.inputLayerCombo.currentIndex() >= 0:
return self.layers[self.inputLayerCombo.currentIndex()].source()
return self.inputLayerCombo.currentText()
def getOutputFileName(self):
return self.outputFileEdit.text()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
def loadFields(self, vectorFile):
self.attributeComboBox.clear()
if vectorFile.isEmpty():
return
try:
(fields, names) = Utils.getVectorFields(vectorFile)
except Exception, e:
QErrorMessage(self).showMessage( str(e) )
self.inputLayerCombo.clearEditText()
self.inputLayerCombo.setCurrentIndex(-1)
return
ncodec = QTextCodec.codecForName(self.lastEncoding)
for name in names:
self.attributeComboBox.addItem( ncodec.toUnicode(name) )
| gpl-2.0 | 2,840,791,900,625,934,300 | 35.267241 | 175 | 0.700499 | false |
Subsets and Splits