repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
OpenSight/StorLever | storlever/mngr/utils/zabbixagent.py | 1 | 8763 | """
storlever.mngr.utils.ntpmgr
~~~~~~~~~~~~~~~~
This module implements ntp server management.
:copyright: (c) 2014 by OpenSight (www.opensight.cn).
:license: AGPLv3, see LICENSE for more details.
"""
import os
import os.path
import subprocess
from storlever.lib.config import Config
from storlever.lib.command import check_output
from storlever.lib.exception import StorLeverError
from storlever.lib import logger
from storlever.lib.utils import filter_dict
import logging
from storlever.lib.schema import Schema, Use, Optional, \
Default, DoNotCare, BoolVal, IntVal, AutoDel
from storlever.lib.confparse import properties
from storlever.lib.lock import lock
from storlever.mngr.system.cfgmgr import STORLEVER_CONF_DIR, cfg_mgr
from storlever.mngr.system.servicemgr import service_mgr
from storlever.mngr.system.modulemgr import ModuleManager
MODULE_INFO = {
"module_name": "zabbix_agent",
"rpms": [
"zabbix-agent"
],
"comment": "Provides the support of zabbix agent config for storlever"
}
ZABBIX_AGENT_CONF_FILE_NAME = "zabbix_agentd_conf.yaml"
ZABBIX_AGENT_ETC_CONF_DIR = "/etc/zabbix/"
ZABBIX_AGENT_CONF_FILE = "zabbix_agentd.conf"
ZABBIX_AGENT_CONF_SCHEMA = Schema({
Optional("hostname"): Default(Use(str), default=""),
# How often list of active checks is refreshed, in seconds.
# Note that after failing to refresh active checks the next refresh
# will be attempted after 60 seconds.
Optional("refresh_active_check"): Default(IntVal(min=60, max=3600), default=120),
# the server ip:port list for active check.zabbix_agent would get the active check list
# from each server at the refresh_active_check frequency. Entry string Format is IP:PORT
Optional("active_check_server_list"): Default([Use(str)], default=[]),
# the server ip list for passive check. each passive check's source ip must
# exist in this list. Entry string Format is IP
Optional("passive_check_server_list"): Default([Use(str)], default=[]),
AutoDel(str): object # for all other key we auto delete
})
class ZabbixAgentManager(object):
"""contains all methods to manage NTP server in linux system"""
def __init__(self):
# need a mutex to protect create/delete bond interface
self.lock = lock()
self.conf_file = os.path.join(STORLEVER_CONF_DIR, ZABBIX_AGENT_CONF_FILE_NAME)
self.zabbix_agentd_conf_schema = ZABBIX_AGENT_CONF_SCHEMA
def _load_conf(self):
zabbix_agent_conf = {}
cfg_mgr().check_conf_dir()
if os.path.exists(self.conf_file):
zabbix_agent_conf = \
Config.from_file(self.conf_file, self.zabbix_agentd_conf_schema).conf
else:
zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf)
return zabbix_agent_conf
def _save_conf(self, zabbix_agent_conf):
cfg_mgr().check_conf_dir()
Config.to_file(self.conf_file, zabbix_agent_conf)
def _sync_to_system_conf(self, zabbix_agent_conf):
if not os.path.exists(ZABBIX_AGENT_ETC_CONF_DIR):
os.makedirs(ZABBIX_AGENT_ETC_CONF_DIR)
# conf file
zabbix_agent_property = properties()
# active server
if zabbix_agent_conf["active_check_server_list"]:
zabbix_agent_property["ServerActive"] = \
",".join(zabbix_agent_conf["active_check_server_list"])
else:
zabbix_agent_property.delete("ServerActive")
# Server
server_list = list(zabbix_agent_conf["passive_check_server_list"])
if not server_list:
server_list.append("127.0.0.1")
zabbix_agent_property["Server"] = ",".join(server_list)
# hostname
if zabbix_agent_conf["hostname"] == "":
zabbix_agent_property.delete("Hostname")
else:
zabbix_agent_property["Hostname"] = zabbix_agent_conf["hostname"]
# RefreshActiveChecks
zabbix_agent_property["RefreshActiveChecks"] = str(zabbix_agent_conf["refresh_active_check"])
etc_conf_file = os.path.join(ZABBIX_AGENT_ETC_CONF_DIR, ZABBIX_AGENT_CONF_FILE)
zabbix_agent_property.apply_to(etc_conf_file)
def sync_to_system_conf(self, *args, **kwargs):
"""sync the ntp conf to /etc/ntp.conf"""
if not os.path.exists(self.conf_file):
return # if not conf file, don't change the system config
with self.lock:
zabbix_agent_conf = self._load_conf()
self._sync_to_system_conf(zabbix_agent_conf)
def system_restore_cb(self, *args, **kwargs):
"""sync the ntp conf to /etc/ntp"""
if not os.path.exists(self.conf_file):
return # if not conf file, don't change the system config
os.remove(self.conf_file)
with self.lock:
zabbix_agent_conf = self._load_conf()
self._sync_to_system_conf(zabbix_agent_conf)
def set_agent_conf(self, config={}, operator="unkown", *args, **kwargs):
if not isinstance(config, dict):
raise StorLeverError("Parameter type error", 500)
if len(config) == 0 and len(kwargs) == 0:
return
config.update(kwargs)
not_allow_keys = (
"active_check_server_list",
"passive_check_server_list"
)
config = filter_dict(config, not_allow_keys, True)
with self.lock:
zabbix_agent_conf = self._load_conf()
for name, value in config.items():
if name in zabbix_agent_conf and value is not None:
zabbix_agent_conf[name] = value
# check config conflict
zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf)
# save new conf
self._save_conf(zabbix_agent_conf)
self._sync_to_system_conf(zabbix_agent_conf)
logger.log(logging.INFO, logger.LOG_TYPE_CONFIG,
"Zabbix agent config is updated by operator(%s)" %
(operator))
def get_agent_conf(self, *args, **kwargs):
with self.lock:
zabbix_agent_conf = self._load_conf()
not_allow_keys = (
"active_check_server_list",
"passive_check_server_list"
)
zabbix_agent_conf = filter_dict(zabbix_agent_conf, not_allow_keys, True)
return zabbix_agent_conf
def get_passive_check_server_list(self, *args, **kwargs):
with self.lock:
zabbix_agent_conf = self._load_conf()
return zabbix_agent_conf["passive_check_server_list"]
def set_passive_check_server_list(self, servers=[], operator="unkown", *args, **kwargs):
with self.lock:
zabbix_agent_conf = self._load_conf()
zabbix_agent_conf["passive_check_server_list"] = servers
# check config conflict
zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf)
# save new conf
self._save_conf(zabbix_agent_conf)
self._sync_to_system_conf(zabbix_agent_conf)
logger.log(logging.INFO, logger.LOG_TYPE_CONFIG,
"Zabbix agent passive server list is updated by operator(%s)" %
(operator))
def get_active_check_server_list(self, *args, **kwargs):
with self.lock:
zabbix_agent_conf = self._load_conf()
return zabbix_agent_conf["active_check_server_list"]
def set_active_check_server_list(self, servers=[], operator="unkown",
*args, **kwargs):
with self.lock:
zabbix_agent_conf = self._load_conf()
zabbix_agent_conf["active_check_server_list"] = servers
# check config conflict
zabbix_agent_conf = self.zabbix_agentd_conf_schema.validate(zabbix_agent_conf)
# save new conf
self._save_conf(zabbix_agent_conf)
self._sync_to_system_conf(zabbix_agent_conf)
logger.log(logging.INFO, logger.LOG_TYPE_CONFIG,
"Zabbix agent active server list is updated by operator(%s)" %
(operator))
ZabbixAgentManager = ZabbixAgentManager()
# register ftp manager callback functions to basic manager
cfg_mgr().register_restore_from_file_cb(ZabbixAgentManager.sync_to_system_conf)
cfg_mgr().register_system_restore_cb(ZabbixAgentManager.system_restore_cb)
service_mgr().register_service("zabbix-agent", "zabbix-agent", "/usr/sbin/zabbix_agentd",
"zabbix agent for system/network monitor")
ModuleManager.register_module(**MODULE_INFO)
def zabbix_agent_mgr():
"""return the global user manager instance"""
return ZabbixAgentManager
| agpl-3.0 | 2,014,510,673,102,703,600 | 34.767347 | 101 | 0.634144 | false | 3.588452 | true | false | false |
Eureka22/ASM_xf | PythonD/site_python/twisted/scripts/websetroot.py | 2 | 2906 | # Twisted, the Framework of Your Internet
# Copyright (C) 2001-2002 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from twisted.scripts import twistd
from twisted.python import usage
try:
import cPickle as pickle
except ImportError:
import pickle
class Options(usage.Options):
optFlags = [
['encrypted', 'e' ,
"The specified tap/aos/xml file is encrypted."]
]
optParameters = [
['port','p', 80,
"The port the web server is running on"],
['file','f','twistd.tap',
"read the given .tap file"],
['python','y', None,
"read an application from within a Python file"],
['xml', 'x', None,
"Read an application from a .tax file (Marmalade format)."],
['source', 's', None,
"Read an application from a .tas file (AOT format)."],
]
def opt_script(self, scriptname):
"""Set the root resource of the web server to the resource created
(and put into the `resource' variable) by this script."""
d = {}
execfile(scriptname, d)
self['root'] = d['resource']
def opt_pickle(self, picklename):
"""Set the root resource of the web server to the resource saved in
this pickle."""
self['root'] = pickle.load(open(picklename))
def getFactory(app, port):
for (num, fact, _, _) in app.tcpPorts:
if num == port:
return fact
raise LookupError('no such port')
def main(config):
if config['encrypted']:
import getpass
passphrase = getpass.getpass('Passphrase: ')
else:
passphrase = None
application = twistd.loadApplication(config, passphrase)
site = getFactory(application, int(config['port']))
site.resource = config['root']
application.save()
def run():
import sys
config = Options()
config.parseOptions()
try:
main(config)
except LookupError, err:
sys.exit(sys.argv[0]+": "+str(err))
except IOError, err:
sys.exit(sys.argv[0]+": %s: %s" % (err.filename, err.strerror))
if __name__ == '__main__':
run()
| gpl-2.0 | 5,813,040,898,145,593,000 | 31.651685 | 79 | 0.602891 | false | 4.169297 | true | false | false |
tgbugs/pyontutils | nifstd/setup.py | 1 | 2463 | import re
from pathlib import Path
from setuptools import setup
def find_version(filename):
_version_re = re.compile(r"__version__ = '(.*)'")
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
__version__ = find_version('nifstd_tools/__init__.py')
with open('README.md', 'rt') as f:
long_description = f.read()
tests_require = ['pytest']
setup(
name='nifstd-tools',
version=__version__,
description='utilities for working with the NIF ontology',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/tgbugs/pyontutils/tree/master/nifstd',
author='Tom Gillespie',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='nif nifstd ontology pyontutils neuroscience',
packages=['nifstd_tools'],
python_requires='>=3.6',
tests_require=tests_require,
install_requires=[
'beautifulsoup4',
'flask',
'nbconvert',
'nbformat',
'networkx',
'psutil',
'pymysql',
'pyontutils>=0.1.26',
'sqlalchemy',
],
extras_require={'dev': ['mysql-connector',
'protobuf',
'pytest-cov',
'wheel',
],
'spell': ['hunspell'],
'test': tests_require,
},
scripts=['bin/ttlcmp'],
entry_points={
'console_scripts': [
'ont-docs=nifstd_tools.docs:main',
'ontree=nifstd_tools.ontree:main',
'registry-sync=nifstd_tools.scr_sync:main',
'slimgen=nifstd_tools.slimgen:main',
],
},
data_files=[('share/nifstd/resources/sparc_term_versions/',
['resources/sparc_term_versions/sparc_terms2-mod.txt']),
('share/nifstd/resources/',
[p.as_posix() for p in Path('resources').iterdir()
if p.is_file() and p.suffix[1:] not in
('confd', 'rc', 'service', 'socket', 'tmp', 'spec')],)])
| mit | -1,007,413,463,080,663,200 | 31.84 | 74 | 0.544458 | false | 3.777607 | true | false | false |
wujunnan0929/pedstrain-detection | doppia/src/tests/objects_detection/plot_channel_statistics.py | 2 | 5526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Companion script to doppia/src/tests/objects_detection/test_objects_detection
Will plot the content of channel_statistics.txt
"""
from __future__ import print_function
import pylab
def main():
filename = "channel_statistics.txt"
# parse the content --
data = pylab.loadtxt(filename)
scales = data[0,:]
print("num_scales == ", len(scales))
for i in range(len(scales)):
if scales[i] > 1:
break
down_scales_slice = slice(0, i)
up_scales_slice = slice(i, None)
num_channels = (len(data) - 1)/2
# do regressions --
do_regressions = True
# channels to be estimated together
# this values where set after observing the data a first time
regression_groups = {"HOG":range(0,7), "L":[7], "UV":range(8,10)}
down_scales_regressions = {}
up_scales_regressions = {}
if do_regressions:
log_data = pylab.log(data)
log_scales = log_data[0,:]
for name, channels_indices in regression_groups.items():
down_scales_x = list(log_scales[down_scales_slice])*len(channels_indices)
up_scales_x = list(log_scales[up_scales_slice])*len(channels_indices)
down_scales_y = []
up_scales_y = []
for c in channels_indices:
down_scales_y.extend(log_data[c*2 + 1, down_scales_slice])
up_scales_y.extend(log_data[c*2 + 1, up_scales_slice])
# r = a*(k**b) => log(r) = b*log(k) + log(a)
down_b, down_log_a = pylab.polyfit(down_scales_x, down_scales_y, 1)
up_b, up_log_a = pylab.polyfit(up_scales_x, up_scales_y, 1)
down_scales_regressions[name] = [down_b, pylab.exp(down_log_a)]
up_scales_regressions[name] = [up_b, pylab.exp(up_log_a)]
print("%s\tfor downscaling r = %.3f*(x**%.3f), "
"for upscaling r = %.3f*(x**%.3f)" % (name,
down_scales_regressions[name][1], down_scales_regressions[name][0],
up_scales_regressions[name][1], up_scales_regressions[name][0]))
#print(regression_groups)
# plot the content --
pylab.figure(0)
pylab.gcf().set_facecolor("w") # set white background
pylab.grid(True)
colormap = pylab.cm.Spectral
#colormap = pylab.cm.gist_rainbow
#colormap = pylab.cm.brg
for channel_index in range(num_channels):
color = colormap( channel_index / float(num_channels) )
label = "channel %i" % channel_index
#label = None
# mean down
pylab.subplot(2,2,1)
x = scales[down_scales_slice]
y = data[channel_index*2 + 1, down_scales_slice]
pylab.plot(x,y, color=color)#, label=label)
# std dev down
pylab.subplot(2,2,3)
x = scales[down_scales_slice]
y = data[channel_index*2 + 2, down_scales_slice]
pylab.plot(x,y, color=color)#, label=label)
# mean up
pylab.subplot(2,2,2)
x = scales[up_scales_slice]
y = data[channel_index*2 + 1, up_scales_slice]
pylab.plot(x,y, color=color)#, label=label)
# std dev up
pylab.subplot(2,2,4)
x = scales[up_scales_slice]
y = data[channel_index*2 + 2, up_scales_slice]
pylab.plot(x,y, color=color, label=label)
for label, b_a in down_scales_regressions.items():
b,a = b_a
# mean down
pylab.subplot(2,2,1)
x = scales[down_scales_slice]
y = [a*(k**b) for k in x]
color = colormap( regression_groups[label][0] / float(num_channels) )
pylab.plot(x,y,
color=color, label=label,
linewidth=1.5, linestyle="--")
for label, b_a in up_scales_regressions.items():
b,a = b_a
# mean down
pylab.subplot(2,2,2)
x = scales[up_scales_slice]
y = [a*(k**b) for k in x]
color = colormap( regression_groups[label][0] / float(num_channels) )
pylab.plot(x,y,
color=color, label=label,
linewidth=1.5, linestyle="--")
pylab.subplot(2,2,1)
pylab.xlabel("scales")
pylab.ylabel("mean ratio")
pylab.title("Mean ratio when downscaling")
pylab.subplot(2,2,3)
#pylab.legend(loc ="lower right", fancybox=True)
pylab.xlabel("scales")
pylab.ylabel("Standard deviation of ratio")
pylab.title("Standard deviation of when downscaling")
pylab.subplot(2,2,2)
pylab.legend(loc ="lower right", fancybox=True)
pylab.xlabel("scales")
pylab.ylabel("mean ratio")
pylab.title("Mean ratio when upscaling")
pylab.subplot(2,2,4)
pylab.legend(loc ="lower right", fancybox=True)
pylab.xlabel("scales")
pylab.ylabel("Standard deviation of ratio")
pylab.title("Standard deviation of when upscaling")
pylab.suptitle("Channel statistics")
pylab.draw()
pylab.show() # blocking call
return
if __name__ == "__main__":
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
#print("(psyco not found)")
pass
else:
print("(using psyco)")
main()
| gpl-3.0 | -4,115,158,841,492,788,700 | 33.111111 | 92 | 0.545784 | false | 3.54458 | false | false | false |
qadium-memex/CommonCrawlJob | aws/__main__.py | 1 | 1487 | from __future__ import print_function
import sys
from argparse import ArgumentParser
from . import S3Remote
def command_line(remote):
prog = 'crawl_index'
description = 'Helper tool to run MapReduce jobs over Common Crawl'
crawl_list = ArgumentParser(add_help=False)
crawl_list.add_argument(
'-l', '--list',
action='store_true',
help='Enumerate all possible crawl dates',
)
# Preparse Date Codes
crawl, _ = crawl_list.parse_known_args()
if crawl.list:
remote.print_buckets()
exit(0)
parser = ArgumentParser(
parents=[crawl_list],
prog=prog,
description=description,
)
parser.add_argument(
'-v', '--version',
action='version',
version="%s v0.1.0" % prog
)
parser.add_argument(
'-d', '--date',
nargs='?',
default='latest',
help='Specify crawl date',
metavar='d',
)
parser.add_argument(
'-f', '--file',
nargs='?',
metavar='f',
default=None,
help='Output to a file'
)
return parser.parse_args()
def main():
remote = S3Remote()
args = command_line(remote)
crawl = remote.select_crawl() if args.date == 'latest' else remote.select_crawl(args.date)
fp = open(args.file, 'wt') if args.file else sys.stdout
idx = remote.get_index(crawl)
for i in idx:
print(i, file=fp)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 3,053,803,613,909,744,000 | 22.234375 | 94 | 0.570276 | false | 3.618005 | false | false | false |
ThiefMaster/indico | indico/modules/events/timetable/util.py | 3 | 16943 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import attrgetter
from flask import render_template, session
from pytz import utc
from sqlalchemy import Date, cast
from sqlalchemy.orm import contains_eager, joinedload, subqueryload, undefer
from indico.core.db import db
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.models.events import Event
from indico.modules.events.models.persons import EventPersonLink
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.timetable.legacy import TimetableSerializer, serialize_event_info
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.caching import memoize_request
from indico.util.date_time import format_time, get_day_end, iterdays
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.forms.colors import get_colors
def _query_events(categ_ids, day_start, day_end):
event = db.aliased(Event)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
return (db.session.query(Event.id, TimetableEntry.start_dt)
.filter(
Event.category_chain_overlaps(categ_ids),
~Event.is_deleted,
((Event.timetable_entries.any(dates_overlap(TimetableEntry))) |
(Event.query.exists().where(
Event.happens_between(day_start, day_end) &
(Event.id == event.id)))))
.group_by(Event.id, TimetableEntry.start_dt)
.order_by(Event.id, TimetableEntry.start_dt)
.join(TimetableEntry,
(TimetableEntry.event_id == Event.id) & (dates_overlap(TimetableEntry)),
isouter=True))
def _query_blocks(event_ids, dates_overlap, detail_level='session'):
options = [subqueryload('session').joinedload('blocks').joinedload('person_links')]
if detail_level == 'contribution':
options.append(contains_eager(SessionBlock.timetable_entry).joinedload(TimetableEntry.children))
else:
options.append(contains_eager(SessionBlock.timetable_entry))
return (SessionBlock.query
.filter(~Session.is_deleted,
Session.event_id.in_(event_ids),
dates_overlap(TimetableEntry))
.options(*options)
.join(TimetableEntry)
.join(Session))
def find_latest_entry_end_dt(obj, day=None):
"""Get the latest end datetime for timetable entries within the object.
:param obj: The :class:`Event` or :class:`SessionBlock` that will be used to
look for timetable entries.
:param day: The local event date to look for timetable entries. Applicable only
to ``Event``.
:return: The end datetime of the timetable entry finishing the latest. ``None``
if no entry was found.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError("No day specified for event.")
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError("Day out of event bounds.")
entries = obj.timetable_entries.filter(TimetableEntry.parent_id.is_(None),
cast(TimetableEntry.start_dt.astimezone(obj.tzinfo), Date) == day).all()
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError("Day specified for session block.")
entries = obj.timetable_entry.children
else:
raise ValueError(f"Invalid object type {type(obj)}")
return max(entries, key=attrgetter('end_dt')).end_dt if entries else None
def find_next_start_dt(duration, obj, day=None, force=False):
"""Find the next most convenient start date fitting a duration within an object.
:param duration: Duration to fit into the event/session-block.
:param obj: The :class:`Event` or :class:`SessionBlock` the duration needs to
fit into.
:param day: The local event date where to fit the duration in case the object is
an event.
:param force: Gives earliest datetime if the duration doesn't fit.
:return: The end datetime of the latest scheduled entry in the object if the
duration fits then. It it doesn't, the latest datetime that fits it.
``None`` if the duration cannot fit in the object, earliest datetime
if ``force`` is ``True``.
"""
if isinstance(obj, Event):
if day is None:
raise ValueError("No day specified for event.")
if not (obj.start_dt_local.date() <= day <= obj.end_dt_local.date()):
raise ValueError("Day out of event bounds.")
earliest_dt = obj.start_dt if obj.start_dt_local.date() == day else obj.start_dt.replace(hour=8, minute=0)
latest_dt = obj.end_dt if obj.start_dt.date() == day else get_day_end(day, tzinfo=obj.tzinfo)
elif isinstance(obj, SessionBlock):
if day is not None:
raise ValueError("Day specified for session block.")
earliest_dt = obj.timetable_entry.start_dt
latest_dt = obj.timetable_entry.end_dt
else:
raise ValueError(f"Invalid object type {type(obj)}")
max_duration = latest_dt - earliest_dt
if duration > max_duration:
return earliest_dt if force else None
start_dt = find_latest_entry_end_dt(obj, day=day) or earliest_dt
end_dt = start_dt + duration
if end_dt > latest_dt:
start_dt = latest_dt - duration
return start_dt
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True,
includible=lambda item: True):
"""Retrieve time blocks that fall within a specific time interval
for a given set of categories.
:param categ_ids: iterable containing list of category IDs
:param start_dt: start of search interval (``datetime``, expected
to be in display timezone)
:param end_dt: end of search interval (``datetime`` in expected
to be in display timezone)
:param detail_level: the level of detail of information
(``event|session|contribution``)
:param tz: the ``timezone`` information should be displayed in
:param from_categ: ``Category`` that will be taken into account to calculate
visibility
:param grouped: Whether to group results by start date
:param includible: a callable, to allow further arbitrary custom filtering (maybe from 3rd
party plugins) on whether to include (returns True) or not (returns False)
each ``detail`` item. Default always returns True.
:returns: a dictionary containing timetable information in a
structured way. See source code for examples.
"""
day_start = start_dt.astimezone(utc)
day_end = end_dt.astimezone(utc)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
items = defaultdict(lambda: defaultdict(list))
# first of all, query TimetableEntries/events that fall within
# specified range of dates (and category set)
events = _query_events(categ_ids, day_start, day_end)
if from_categ:
events = events.filter(Event.is_visible_in(from_categ.id))
for eid, tt_start_dt in events:
if tt_start_dt:
items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
else:
items[eid] = None
# then, retrieve detailed information about the events
event_ids = set(items)
query = (Event.query
.filter(Event.id.in_(event_ids))
.options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),
joinedload(Event.own_room).noload('owner'),
joinedload(Event.own_venue),
joinedload(Event.category).undefer('effective_icon_data'),
undefer('effective_protection_mode')))
scheduled_events = defaultdict(list)
ongoing_events = []
events = []
for e in query:
if not includible(e):
continue
if grouped:
local_start_dt = e.start_dt.astimezone(tz).date()
local_end_dt = e.end_dt.astimezone(tz).date()
if items[e.id] is None:
# if there is no TimetableEntry, this means the event has not timetable on that interval
for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):
# if the event starts on this date, we've got a time slot
if day.date() == local_start_dt:
scheduled_events[day.date()].append((e.start_dt, e))
else:
ongoing_events.append(e)
else:
for start_d, start_dts in items[e.id].items():
scheduled_events[start_d].append((start_dts[0], e))
else:
events.append(e)
# result['events'][date(...)] -> [(datetime(....), Event(...))]
# result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]
# result['ongoing_events'] = [Event(...)]
if grouped:
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
else:
result = defaultdict(lambda: defaultdict(list))
result.update({
'events': scheduled_events if grouped else events,
'ongoing_events': ongoing_events
})
# according to detail level, ask for extra information from the DB
if detail_level != 'event':
query = _query_blocks(event_ids, dates_overlap, detail_level)
if grouped:
for b in query:
start_date = b.timetable_entry.start_dt.astimezone(tz).date()
result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))
else:
for b in query:
result[b.session.event_id]['blocks'].append(b)
if detail_level == 'contribution':
query = (Contribution.query
.filter(Contribution.event_id.in_(event_ids),
dates_overlap(TimetableEntry),
~Contribution.is_deleted)
.options(contains_eager(Contribution.timetable_entry),
joinedload(Contribution.person_links))
.join(TimetableEntry))
if grouped:
for c in query:
start_date = c.timetable_entry.start_dt.astimezone(tz).date()
result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))
else:
for c in query:
result[c.event_id]['contributions'].append(c)
query = (Break.query
.filter(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))
.options(contains_eager(Break.timetable_entry))
.join(TimetableEntry))
if grouped:
for b in query:
start_date = b.timetable_entry.start_dt.astimezone(tz).date()
result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))
else:
for b in query:
result[b.timetable_entry.event_id]['breaks'].append(b)
return result
def render_entry_info_balloon(entry, editable=False, sess=None, is_session_timetable=False):
if entry.break_:
return render_template('events/timetable/balloons/break.html', break_=entry.break_, editable=editable,
can_manage_event=entry.event.can_manage(session.user), color_list=get_colors(),
event_locked=entry.event.is_locked,
is_session_timetable=is_session_timetable)
elif entry.contribution:
return render_template('events/timetable/balloons/contribution.html', contrib=entry.contribution,
editable=editable,
can_manage_event=entry.event.can_manage(session.user),
can_manage_contributions=sess.can_manage_contributions(session.user) if sess else True,
event_locked=entry.event.is_locked)
elif entry.session_block:
return render_template('events/timetable/balloons/block.html', block=entry.session_block, editable=editable,
can_manage_session=sess.can_manage(session.user) if sess else True,
can_manage_blocks=sess.can_manage_blocks(session.user) if sess else True,
color_list=get_colors(), event_locked=entry.event.is_locked,
is_session_timetable=is_session_timetable)
else:
raise ValueError("Invalid entry")
def render_session_timetable(session, timetable_layout=None, management=False):
if not session.start_dt:
# no scheduled sessions present
return ''
timetable_data = TimetableSerializer(session.event).serialize_session_timetable(session, without_blocks=True,
strip_empty_days=True)
event_info = serialize_event_info(session.event)
tpl = get_template_module('events/timetable/_timetable.html')
return tpl.render_timetable(timetable_data, event_info, timetable_layout=timetable_layout, management=management)
def get_session_block_entries(event, day):
"""Return a list of event top-level session blocks for the given `day`."""
return (event.timetable_entries
.filter(db.cast(TimetableEntry.start_dt.astimezone(event.tzinfo), db.Date) == day.date(),
TimetableEntry.type == TimetableEntryType.SESSION_BLOCK)
.all())
def shift_following_entries(entry, shift, session_=None):
"""Reschedule entries starting after the given entry by the given shift."""
query = entry.siblings_query.filter(TimetableEntry.start_dt >= entry.end_dt)
if session_ and not entry.parent:
query.filter(TimetableEntry.type == TimetableEntryType.SESSION_BLOCK,
TimetableEntry.session_block.has(session_id=session_.id))
entries = query.all()
if not entries:
return []
for sibling in entries:
sibling.move(sibling.start_dt + shift)
def get_timetable_offline_pdf_generator(event):
from indico.legacy.pdfinterface.conference import TimetablePDFFormat, TimeTablePlain
pdf_format = TimetablePDFFormat()
return TimeTablePlain(event, session.user, sortingCrit=None, ttPDFFormat=pdf_format, pagesize='A4',
fontsize='normal')
def get_time_changes_notifications(changes, tzinfo, entry=None):
notifications = []
for obj, change in changes.items():
if entry:
if entry.object == obj:
continue
if not isinstance(obj, Event) and obj.timetable_entry in entry.children:
continue
msg = None
if isinstance(obj, Event):
if 'start_dt' in change:
new_time = change['start_dt'][1]
msg = _("Event start time changed to {}")
elif 'end_dt' in change:
new_time = change['end_dt'][1]
msg = _("Event end time changed to {}")
else:
raise ValueError("Invalid change in event.")
elif isinstance(obj, SessionBlock):
if 'start_dt' in change:
new_time = change['start_dt'][1]
msg = _("Session block start time changed to {}")
elif 'end_dt' in change:
new_time = change['end_dt'][1]
msg = _("Session block end time changed to {}")
else:
raise ValueError("Invalid change in session block.")
if msg:
notifications.append(msg.format(format_time(new_time, timezone=tzinfo)))
return notifications
@memoize_request
def get_top_level_entries(event):
return event.timetable_entries.filter_by(parent_id=None).all()
@memoize_request
def get_nested_entries(event):
entries = event.timetable_entries.filter(TimetableEntry.parent_id.isnot(None)).all()
result = defaultdict(list)
for entry in entries:
result[entry.parent_id].append(entry)
return result
| mit | -1,825,261,157,495,425,300 | 46.063889 | 119 | 0.621496 | false | 4.064044 | true | false | false |
basak/glacier-cli | glacier_test.py | 1 | 7446 | #!/usr/bin/env python
# Copyright (c) 2013 Robie Basak
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
import sys
import unittest
import mock
from mock import Mock, patch, sentinel
import nose.tools
import glacier
EX_TEMPFAIL = 75
PY2 = (sys.version_info[0] == 2)
def patch_builtin(name, *args, **kwargs):
"""Helper to patch builtins easily for py2 and py3"""
target = '{b}.{n}'.format(b='__builtin__' if PY2 else 'builtins', n=name)
return patch(target, *args, **kwargs)
class TestCase(unittest.TestCase):
def init_app(self, args, memory_cache=False):
self.connection = Mock()
if memory_cache:
self.cache = glacier.Cache(0, db_path=':memory:')
else:
self.cache = Mock()
self.app = glacier.App(
args=args,
connection=self.connection,
cache=self.cache)
def run_app(self, args):
self.init_app(args)
self.app.main()
def test_vault_list(self):
self.init_app(['vault', 'list'])
mock_vault = Mock()
mock_vault.name = sentinel.vault_name
self.connection.list_vaults.return_value = [mock_vault]
print_mock = Mock()
with patch_builtin('print', print_mock):
self.app.main()
print_mock.assert_called_once_with(sentinel.vault_name, sep=u'\n')
def test_vault_create(self):
self.run_app(['vault', 'create', 'vault_name'])
self.connection.create_vault.assert_called_once_with('vault_name')
def test_archive_list(self):
self.init_app(['archive', 'list', 'vault_name'])
archive_list = [sentinel.archive_one, sentinel.archive_two]
self.cache.get_archive_list.return_value = archive_list
print_mock = Mock()
with patch_builtin('print', print_mock):
self.app.main()
print_mock.assert_called_once_with(*archive_list, sep="\n")
def test_archive_list_force_ids(self):
self.init_app(
['archive', 'list', '--force-ids', 'vault_name'],
memory_cache=True,
)
self.cache.add_archive('vault_name', 'archive_name_1', 'id_1')
self.cache.add_archive('vault_name', 'archive_name_1', 'id_2')
self.cache.add_archive('vault_name', 'archive_name_3', 'id_3')
print_mock = Mock()
with patch_builtin('print', print_mock):
self.app.main()
# print should have been called with a list of the items in some
# arbitrary order. Testing this correctly involves being agnostic with
# the order of args in *args. Does mock provide any other way of doing
# this other than by introspecting mock_calls like this?
nose.tools.assert_equals(print_mock.call_count, 1)
nose.tools.assert_equals(
sorted(print_mock.mock_calls[0][1]),
sorted([
u'id:id_1\tarchive_name_1',
u'id:id_2\tarchive_name_1',
u'id:id_3\tarchive_name_3',
]),
)
nose.tools.assert_equals(
print_mock.mock_calls[0][2],
{'sep': "\n"}
)
def test_archive_upload(self):
file_obj = Mock()
file_obj.name = 'filename'
file_obj.mode = 'rb'
open_mock = Mock(return_value=file_obj)
with patch_builtin('open', open_mock):
self.run_app(['archive', 'upload', 'vault_name', 'filename'])
self.connection.get_vault.assert_called_with('vault_name')
mock_vault = self.connection.get_vault.return_value
mock_vault.create_archive_from_file.assert_called_once_with(
file_obj=file_obj, description='filename')
def test_archive_stdin_upload(self):
self.run_app(['archive', 'upload', 'vault_name', '-'])
self.connection.get_vault.assert_called_once_with('vault_name')
vault = self.connection.get_vault.return_value
expected_file_obj = sys.stdin if PY2 else sys.stdin.buffer
vault.create_archive_from_file.assert_called_once_with(
file_obj=expected_file_obj, description='<stdin>')
def test_archive_retrieve_no_job(self):
self.init_app(['archive', 'retrieve', 'vault_name', 'archive_name'])
mock_vault = Mock()
mock_vault.list_jobs.return_value = []
self.connection.get_vault.return_value = mock_vault
mock_exit = Mock()
mock_print = Mock()
with patch('sys.exit', mock_exit):
with patch_builtin('print', mock_print):
self.app.main()
mock_exit.assert_called_once_with(EX_TEMPFAIL)
mock_print.assert_called_once_with(
u"glacier: queued retrieval job for archive 'archive_name'",
file=sys.stderr)
self.connection.get_vault.assert_called_once_with('vault_name')
mock_vault.retrieve_archive.assert_called_once_with(
self.cache.get_archive_id.return_value)
def test_archive_retrieve_with_job(self):
self.init_app(['archive', 'retrieve', 'vault_name', 'archive_name'])
self.cache.get_archive_id.return_value = sentinel.archive_id
mock_job = Mock(
archive_id=sentinel.archive_id,
completed=True,
completion_date='1970-01-01T00:00:00Z',
archive_size=1)
mock_vault = Mock()
mock_vault.list_jobs.return_value = [mock_job]
self.connection.get_vault.return_value = mock_vault
mock_open = mock.mock_open()
with patch_builtin('open', mock_open):
self.app.main()
self.cache.get_archive_id.assert_called_once_with(
'vault_name', 'archive_name')
mock_job.get_output.assert_called_once_with()
mock_job.get_output.return_value.read.assert_called_once_with()
mock_open.assert_called_once_with('archive_name', u'wb')
mock_open.return_value.write.assert_called_once_with(
mock_job.get_output.return_value.read.return_value)
def test_archive_delete(self):
self.run_app(['archive', 'delete', 'vault_name', 'archive_name'])
self.cache.get_archive_id.assert_called_once_with(
'vault_name', 'archive_name')
self.connection.get_vault.assert_called_with('vault_name')
mock_vault = self.connection.get_vault.return_value
mock_vault.delete_archive.assert_called_once_with(
self.cache.get_archive_id.return_value)
| mit | 7,930,861,665,525,968,000 | 39.688525 | 78 | 0.63054 | false | 3.626887 | true | false | false |
RHInception/re-worker-git | replugin/gitworker/__init__.py | 1 | 11607 | # -*- coding: utf-8 -*-
# Copyright © 2014 SEE AUTHORS FILE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Git worker.
"""
import git
import time
import os
import shutil
import subprocess
import uuid
from reworker.worker import Worker
class GitWorkerError(Exception):
"""
Base exception class for GitWorker errors.
"""
pass
class GitWorker(Worker):
"""
Worker which provides basic functionality with Git.
"""
#: allowed subcommands
subcommands = ('CherryPickMerge', 'Merge')
dynamic = []
# Subcommand methods
def cherry_pick_merge(self, body, corr_id, output):
# Get neede ic variables
params = body.get('parameters', {})
try:
commits = params['commits']
to_branch = params['to_branch']
temp_branch = params.get('temp_branch', 'mergebranch')
run_scripts = params.get('run_scripts', [])
repo = params['repo']
self.app_logger.info(
'Attempting to cherry pick the following commits on %s: %s' % (
repo, ",".join(commits)))
# Create a workspace
workspace = self._create_workspace()
# result_data is where we store the results to return to the bus
result_data = {
"cherry_pick": [],
}
# Create a git command wrapper
gitcmd = git.cmd.Git(workspace)
# Clone
location_type = 'local'
if (
repo.startswith('http://') or
repo.startswith('https://') or
repo.startswith('ssh://')):
location_type = 'remote'
output.info('Cloning %s %s' % (location_type, repo))
gitcmd.clone(repo, workspace)
local_repo = git.Repo(workspace)
output.info('Checking out branch %s for work' % temp_branch)
local_repo.git.checkout(b=temp_branch)
for commit in commits:
self.app_logger.info("Going to cherry pick %s now" % commit)
local_repo.git.cherry_pick(commit)
result_data['cherry_pick'].append(commit)
output.info('Cherry picked %s' % commit)
self.app_logger.info("Cherry picked %s successfully" % commit)
local_repo.git.fetch('origin', to_branch)
local_repo.git.checkout(to_branch)
local_repo.git.pull('origin', to_branch)
local_repo.git.merge(temp_branch, squash=True)
local_repo.git.commit(m="Commit for squash-merge of release: %s" % corr_id)
result_data['commit'] = local_repo.commit().hexsha
result_data['branch'] = to_branch
if run_scripts:
for script in run_scripts:
try:
self._config['scripts'][script]
self.app_logger.info('Executing ')
self.app_logger.debug('Running: ["%s"]' % (
script))
script_process = subprocess.Popen([
self._config['scripts'][script]],
shell=False,
cwd=workspace,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Give a little time ...
time.sleep(2)
# If we get a non 0 then it's a failure.
if script_process.returncode != 0:
# stop executing and bail out
raise GitWorkerError(str(script_process.stdout.read()))
result_data['commit'] = local_repo.commit().hexsha
self.app_logger.info('%s run finished' % script)
output.info('%s run finished' % script)
except KeyError, ke:
self.app_logger.warn(
'%s is not in the allowed scripts list. Skipped.')
output.warn(
'%s is not in the allowed scripts list. Skipped.')
local_repo.git.push("origin", to_branch, force=True)
# Remove the workspace after work is done (unless
# keep_workspace is True)
if not params.get('keep_workspace', False):
self._delete_workspace(workspace)
output.info('Cleaning up workspace.')
self.app_logger.info('Cherry picking succeeded.')
return {'status': 'completed', 'data': result_data}
except KeyError, ke:
raise GitWorkerError('Missing input %s' % ke)
except git.GitCommandError, gce:
raise GitWorkerError('Git error: %s' % gce)
def merge(self, body, corr_id, output):
"""
Merge a branch into another branch.
"""
params = body.get('parameters', {})
try:
from_branch = params['from_branch']
to_branch = params['to_branch']
repo = params['repo']
msg = 'Attempting to merge %s to %s' % (from_branch, to_branch)
self.app_logger.info(msg)
output.info(msg)
# Create a workspace
workspace = self._create_workspace()
# Create a git command wrapper
gitcmd = git.cmd.Git(workspace)
# Clone
location_type = 'local'
if (
repo.startswith('http://') or
repo.startswith('https://') or
repo.startswith('ssh://')):
location_type = 'remote'
output.info('Cloning %s %s' % (location_type, repo))
gitcmd.clone(repo, workspace)
local_repo = git.Repo(workspace)
output.info('Checking out branch %s to merge into' % to_branch)
# Make sure we have the data from the server
local_repo.git.fetch('origin', from_branch)
local_repo.git.fetch('origin', to_branch)
# Move onto the branch
local_repo.git.checkout(to_branch)
# Do the work
local_repo.git.merge("origin/" + from_branch)
output.info('Merged %s to %s successfully' % (
from_branch, to_branch))
self.app_logger.info("Merged %s to %s successfully" % (
from_branch, to_branch))
result_data = {
'commit': local_repo.commit().hexsha,
'from_branch': from_branch,
'to_branch': to_branch,
}
local_repo.git.push("origin", to_branch, force=False)
# Remove the workspace after work is done (unless
# keep_workspace is True)
if not params.get('keep_workspace', False):
self._delete_workspace(workspace)
output.info('Cleaning up workspace.')
self.app_logger.info('Merge succeeded.')
return {'status': 'completed', 'data': result_data}
except KeyError, ke:
raise GitWorkerError('Missing input %s' % ke)
except git.GitCommandError, gce:
raise GitWorkerError('Git error: %s' % gce)
def _create_workspace(self):
"""
Creates a workspace to clone in.
"""
workspace = os.path.sep.join([
self._config['workspace_dir'],
str(uuid.uuid4())])
self.app_logger.debug('Trying to make %s.' % workspace)
os.makedirs(workspace)
self.app_logger.info('Created workspace at %s' % workspace)
return workspace
def _delete_workspace(self, workspace):
"""
Deletes a workspace after worker is done.
"""
self.app_logger.debug('Attempting to delete workspace %s.' % workspace)
if workspace.startswith(self._config['workspace_dir']):
shutil.rmtree(workspace)
self.app_logger.info('Deleted workspace at %s' % workspace)
else:
self.app_logger.warn(
'Worksapce %s is not inside %s. Not removing.' % (
workspace, self._config['workspace_dir']))
def process(self, channel, basic_deliver, properties, body, output):
"""
Processes GitWorker requests from the bus.
*Keys Requires*:
* subcommand: the subcommand to execute.
"""
# Ack the original message
self.ack(basic_deliver)
corr_id = str(properties.correlation_id)
try:
try:
subcommand = str(body['parameters']['subcommand'])
if subcommand not in self.subcommands:
raise KeyError()
except KeyError:
raise GitWorkerError(
'No valid subcommand given. Nothing to do!')
cmd_method = None
if subcommand == 'CherryPickMerge':
cmd_method = self.cherry_pick_merge
elif subcommand == 'Merge':
cmd_method = self.merge
else:
self.app_logger.warn(
'Could not find the implementation of subcommand %s' % (
subcommand))
raise GitWorkerError('No subcommand implementation')
result = cmd_method(body, corr_id, output)
# Send results back
self.send(
properties.reply_to,
corr_id,
{'status': 'completed', 'data': result},
exchange=''
)
# Send results back
self.send(
properties.reply_to,
corr_id,
result,
exchange=''
)
# Notify on result. Not required but nice to do.
self.notify(
'GitWorker Executed Successfully',
'GitWorker successfully executed %s. See logs.' % (
subcommand),
'completed',
corr_id)
# Send out responses
self.app_logger.info(
'GitWorker successfully executed %s for '
'correlation_id %s. See logs.' % (
subcommand, corr_id))
except GitWorkerError, fwe:
# If a GitWorkerError happens send a failure log it.
self.app_logger.error('Failure: %s' % fwe)
self.send(
properties.reply_to,
corr_id,
{'status': 'failed'},
exchange=''
)
self.notify(
'GitWorker Failed',
str(fwe),
'failed',
corr_id)
output.error(str(fwe))
def main(): # pragma: no cover
from reworker.worker import runner
runner(GitWorker)
if __name__ == '__main__': # pragma nocover
main()
| agpl-3.0 | -3,250,987,112,263,276,000 | 35.961783 | 87 | 0.521368 | false | 4.572892 | false | false | false |
abhijitbangera/ecommerce | src/products/views.py | 1 | 3497 | from django.shortcuts import render,get_object_or_404, redirect
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.http import Http404
from django.db.models import Q
from .forms import VariationInventoryFormSet
from django.contrib import messages
from .mixins import StaffRequiredMixin,LoginRequiredMixin
# Create your views here.
from .models import Product, Variation,Category
class CategoryListView(ListView):
model=Category
queryset=Category.objects.all()
template_name="products/product_list.html"
class CategoryDetailView(DetailView):
model=Category
def get_context_data(self,*args,**kwargs):
context=super(CategoryDetailView,self).get_context_data(*args,**kwargs)
obj=self.get_object()
product_set=obj.product_set.all()
default_products=obj.default_category.all()
products=(product_set | default_products).distinct()
context["products"]=products
return context
class VariationListView(StaffRequiredMixin,ListView):
model=Variation
queryset=Variation.objects.all()
def get_context_data(self,*args,**kwargs):
context=super(VariationListView,self).get_context_data(*args,**kwargs)
context['formset']=VariationInventoryFormSet(queryset=self.get_queryset())
return context
def get_queryset(self,*args,**kwargs):
product_pk=self.kwargs.get("pk")
if product_pk:
product=get_object_or_404(Product,pk=product_pk)
queryset=Variation.objects.filter(product=product)
return queryset
def post(self,request,*args,**kwargs):
formset=VariationInventoryFormSet(request.POST,request.FILES)
print (request.POST)
if formset.is_valid():
formset.save(commit=False)
for form in formset:
new_ietm=form.save(commit=False)
if new_ietm.title:
product_pk=self.kwargs.get("pk")
product=get_object_or_404(Product,pk=product_pk)
new_ietm.product=product
new_ietm.save()
messages.success(request,"updated successful")
return redirect("products")
raise Http404
#--------------------------------------------------------------
#Class based view and function based view below
class ProductListView(ListView):
model=Product
queryset=Product.objects.all()
def get_context_data(self,*args,**kwargs):
context=super(ProductListView,self).get_context_data(*args,**kwargs)
print (context)
return context
def get_queryset(self,*args,**kwargs):
qs=super(ProductListView,self).get_queryset(*args,**kwargs)
query=self.request.GET.get("q")
if query:
qs=self.model.objects.filter(
Q(title__icontains=query) |
Q(description__icontains=query)
)
try:
qs2=self.model.objects.filter(Q(price=query))
qs=(qs | qs2).distinct()
except:
pass
return qs
import random
class ProductDetailView(DetailView):
model=Product
def get_context_data(self,*args,**kwargs):
context=super(ProductDetailView,self).get_context_data(*args,**kwargs)
instance=self.get_object()
context["related"]=sorted(Product.objects.get_related(instance)[:6],key= lambda x:random.random())
return context
def product_details_view_func(request,id):
# product_instance=Product.objects.get(id=id)
product_instance=get_object_or_404(Product,id=id)
try:
product_instance=Product.objects.get(id=id)
except Product.DoesNotExist:
raise Http404
except:
raise Http404
template="products/product_detail.html"
context={
"object":product_instance
}
return render(request,template,context)
#-------------------------------------------------------------- | mit | -5,411,964,018,029,951,000 | 30.232143 | 100 | 0.729482 | false | 3.411707 | false | false | false |
edggy/grAIte | server/script.py | 1 | 3927 | from os import path
from collections import deque
import string
import io
from constants import SCRIPT_PATH
import opcodes
class Script:
'''
A script is code to be run by each agent
It has code
'''
def __init__(self, scripName):
self.ip = ip
self.data = []
self.keys = {}
self.keysBack = {}
with open(path.join(SCRIPT_PATH, scripName), 'rb') as f:
tok = ''
lastIndex = None
for byte in f.read():
byte = byte.upper()
# Skip non-letter bytes
if byte not in string.uppercase:
continue
# Add letter to tok
tok += byte
# Check if tok is an opcode
if tok in opcodes.OP_CODES:
# Check if tok is the 'literal' opcode
if tok == opcodes.LITERAL_CODE:
# Parse literal number
num = ''
for digit in f.read():
if d not in (string.digits + '-bx' + string.uppercase[:6] + ' +*/()' + '<>^~&|%'):
break
num += digit
f.seek(-1, io.SEEK_CUR)
try:
# Add literal to data
self.data.append(int(eval(num)))
except:
pass
else:
# Add opcode to data
self.data.append(tok)
if tok in opcodes.TICK_CODES:
if lastIndex is not None:
index = len(self.data)
self.keys[lastIndex] = index
self.keysBack[index] = lastIndex
lastIndex = index
# Reset tok
tok = ''
def nextOps(self):
'''
Gets the next list of op codes to be evaluated
'''
nextKey = self.keys[self.ip]
opStr = self.data[self.ip:nextKey-1]
self.ip = nextKey
return opStr
def execute(self, opStr):
'''
Executes a list of evaluated op codes
@return - The command to be executed in a tuple ('opcode', value)
'''
# JG MA 25 50 MA 2 0 -1
opstack = deque(opStr)
argStack = deque()
while len(opstack) > 0:
p = opstack.pop()
if p in opcodes.OP_CODES:
arity = opcodes.OP_CODES[p]
if len(argStack) > arity:
args = [argStack.pop() for i in range(arity)]
result = self.applyOpCode(p, args)
if result is not None:
argStack.append(result)
else:
argStack.append(p)
# opstack = JG MA 25 50 MA 2 0 -1
# argStack =
# opstack = JG MA 25 50 MA 2 0
# argStack = -1
# opstack = JG MA 25 50 MA 2
# argStack = -1 0
# opstack = JG MA 25 50 MA
# argStack = -1 0 2
# MA 2 0 = 2
# opstack = JG MA 25 50
# argStack = -1 2
# opstack = JG MA 25
# argStack = -1 2 50
# opstack = JG MA
# argStack = -1 2 50 25
# MA 50 25 = 75
# opstack = JG
# argStack = -1 2 75
# JG 75 2 -1 = None (sets ip to ip-1)
# opstack =
# argStack =
def applyOpCode(self, opcode, args):
pass
| mit | -5,724,895,252,161,347,000 | 27.875 | 110 | 0.389865 | false | 4.658363 | false | false | false |
rfancn/myprojects | spload/handlers/ISPDownloadHandler.py | 1 | 2723 | import logging
import urllib
import xmlrpclib
from handlerbase import Abort
from handlerdl import DownloadHandler
from network.httprequest import BadHeader
from network.stringcookiejar import StringCookieJar
class ISPDownloadHandler(DownloadHandler):
def loadCookieFromSPAT(self):
""" Load cookies from center server """
cookieString = None
try:
s = xmlrpclib.ServerProxy("http://localhost:8888")
cookieString = s.get_cookie_str()
except Exception as e:
logging.error("Failed to get the cookie from center server because of: %s" % e)
return None
stringCookieJar = StringCookieJar()
stringCookieJar.load_from_str(cookieString, True, True)
""" handles important things to do before starting """
cj = self.loadCookieFromSPAT()
if cj:
self.req.setCookieJar(cj)
else:
raise Abort("Failed to get cookie from Center server")
def abort(self):
return self.task.abort
def downloadISPAtt(self):
# try at most 5 times redirect
url = self.task.url
for i in range(5):
# if just_header set, then dict like http headers will be return
headers = self.httpfetch(url, just_header = True)
# self.load does not raise a BadHeader on 404 responses, do it here
if headers.has_key('code') and headers['code'] == 404:
raise BadHeader(404)
if 'location' in headers:
self.logDebug("Location: " + headers['location'])
url = urllib.unquote(headers['location'])
else:
break
# download the url
# we don't use the 'content-disposition' name specified in http header
self.httpdownload(url)
def preprocess(self, thread):
# load cookie from center server and set it into current request
self.loadCookieFromSPAT()
# mimic ack ISP downloading agreements page
# set the taskfile's status to be 'starting'
self.task.setStatus("starting")
def process(self):
"""main function"""
# preprocess function
self.preprocess()
# real download
try:
self.downloadISPAtt()
except BadHeader, e:
if e.code in (401, 403):
self.logging("Cookie expired, try to reload cookie from center server and retry again!")
self.loadCookieFromSPAT()
self.downloadISPAtt()
else:
raise
# postprocess
self.postprocess()
def postprocess(self):
self.load(self.task.url)
| mit | -7,459,862,449,835,864,000 | 30.298851 | 104 | 0.596768 | false | 4.630952 | false | false | false |
edelooff/newWeb | newweb/scripts/tables.py | 1 | 1309 | # Originally from: http://code.activestate.com/recipes/577202/#c4
# Written by Vasilij Pupkin (2012)
# Minor changes by Elmer de Looff (2012)
# Licensed under the MIT License (http://opensource.org/licenses/MIT
class ALIGN(object):
LEFT, RIGHT = '-', ''
class Column(list):
def __init__(self, name, data, align=ALIGN.LEFT):
list.__init__(self, data)
self.name = name
self.width = max(len(x) for x in self + [name])
self.format = ' %%%s%ds ' % (align, self.width)
class Table(object):
def __init__(self, *columns):
self.columns = columns
self.length = max(len(x) for x in columns)
def get_row(self, i=None):
for x in self.columns:
if i is None:
yield x.format % x.name
else:
yield x.format % x[i]
def get_line(self):
for x in self.columns:
yield '-' * (x.width + 2)
def join_n_wrap(self, char, elements):
return ' ' + char + char.join(elements) + char
def get_rows(self):
yield self.join_n_wrap('+', self.get_line())
yield self.join_n_wrap('|', self.get_row(None))
yield self.join_n_wrap('+', self.get_line())
for i in range(0, self.length):
yield self.join_n_wrap('|', self.get_row(i))
yield self.join_n_wrap('+', self.get_line())
def __str__(self):
return '\n'.join(self.get_rows())
| isc | -1,103,684,903,766,414,100 | 28.088889 | 68 | 0.608862 | false | 3.051282 | false | false | false |
rezoo/twopy | twopy/board.py | 1 | 1589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from user import make_anonymous_user
from exeptions import HttpStatusError, RegexError
def make_subject_url(url):
if url.endswith("/"):
return url + "subject.txt"
else:
return url + "/subject.txt"
def parse_board(string):
if not isinstance(string, unicode):
raise TypeError("unsupported string type:" + str(type(string)))
thread_expressions = re.compile(
r"^(?P<dat>\d+\.dat)<>(?P<title>.*) \((?P<n_comments>\d*)\)$")
results = []
for thread_string in string.split("\n"):
thread_data = thread_expressions.search(thread_string)
if thread_data:
results.append({
"title": thread_data.group("title"),
"n_comments": int(thread_data.group("n_comments")),
"dat": thread_data.group("dat"),
})
elif len(thread_string) != 0:
raise RegexError(
"Regex unmatched in parsing the thread's data",
thread_expressions)
return results
def retrieve_board(board_url, user=None):
my_user = user if user else make_anonymous_user()
subject_url = make_subject_url(board_url)
response = my_user.urlopen(subject_url, gzip=False)
if response.code == 200:
retrieved_string = unicode(response.read(), "Shift_JIS", "ignore")
print type(retrieved_string)
return parse_board(retrieved_string)
else:
message = "HTTP status is invalid: " + str(response.code)
raise HttpStatusError(message, response)
| mit | 2,028,491,065,550,726,100 | 32.104167 | 74 | 0.604783 | false | 3.819712 | false | false | false |
InformaticsMatters/pipelines | src/python/pipelines/rdkit/standardize.py | 1 | 3592 | #!/usr/bin/env python
# Copyright 2018 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from rdkit import DataStructs, rdBase
from rdkit.Chem.MolStandardize import rdMolStandardize
from pipelines_utils import parameter_utils, utils
from pipelines_utils_rdkit import rdkit_utils, mol_utils
### functions #########################################
#lfc = rdMolStandardize.LargestFragmentChooser()
uncharger = rdMolStandardize.Uncharger()
def standardize(mol, neutralize, fragment):
"""
:param mol: The molecule to standardize
:param neutralize: Boolean for whether to neutralize the molecule
:param fragment: The approach for choosing the largest fragment. Either 'hac' or 'mw'. If not specified the whole
molecule is used.
:return: The standardized molecule
"""
mol = rdMolStandardize.Cleanup(mol)
#mol = lfc.choose(mol)
# We use our own largest fragment picker as the RDKit one behaves slightly differently
if fragment:
mol = mol_utils.fragment(mol, fragment)
if neutralize:
mol = uncharger.uncharge(mol)
return mol
### start main execution #########################################
def main():
### command line args definitions #########################################
parser = argparse.ArgumentParser(description='RDKit Standardize')
parser.add_argument('--fragment-method', choices=['hac', 'mw'], help='Approach to find biggest fragment if more than one (hac = biggest by heavy atom count, mw = biggest by mol weight)')
parser.add_argument('--neutralize', action='store_true', help='Neutralize the molecule')
parameter_utils.add_default_io_args(parser)
parser.add_argument('-q', '--quiet', action='store_true', help='Quiet mode')
parser.add_argument('--thin', action='store_true', help='Thin output mode')
args = parser.parse_args()
utils.log("Standardize Args: ", args)
# handle metadata
source = "standardize.py"
datasetMetaProps = {"source":source, "description": "Standardize using RDKit " + rdBase.rdkitVersion}
clsMappings = {}
fieldMetaProps = []
input,output,suppl,writer,output_base = rdkit_utils.\
default_open_input_output(args.input, args.informat, args.output,
'standardize', args.outformat,
thinOutput=False, valueClassMappings=clsMappings,
datasetMetaProps=datasetMetaProps,
fieldMetaProps=fieldMetaProps)
count = 0
total = 0
errors = 0
for mol in suppl:
count += 1
if mol is None:
errors += 1
continue
m = standardize(mol, args.neutralize, args.fragment_method)
writer.write(m)
total += 1
input.close()
writer.flush()
writer.close()
output.close()
if args.meta:
utils.write_metrics(output_base, {'__InputCount__':count, '__OutputCount__':total, '__ErrorCount__':errors, 'RDKitStandardize':total})
if __name__ == "__main__":
main()
| apache-2.0 | -3,940,250,534,355,485,000 | 33.873786 | 190 | 0.648942 | false | 3.947253 | false | false | false |
redhat-cip/dci-control-server | dci/worker/umb.py | 1 | 4780 | import datetime
import json
import os
import logging
from dci_umb.sender import send
logger = logging.getLogger(__name__)
def _get_architecture(job):
arch = "x86_64"
available_arches = ["x86_64", "ppc64le", "aarch64", "s390x"]
for available_arch in available_arches:
if available_arch in job["tags"]:
arch = available_arch
break
return arch
def _get_artifact(component):
return {
"compose_type": "nightly" if "nightly" in component["url"] else "rel-eng",
"id": component["name"],
"type": "productmd-compose",
}
def _build_generic_message(job, component, result, now):
test_name = result["name"]
job_id = str(job["id"])
job_url = "https://www.distributed-ci.io/jobs/%s/jobStates" % job_id
target = "topic://VirtualTopic.eng.dci.job.complete"
architecture = _get_architecture(job)
return {
"target": target,
"body": json.dumps(
{
"contact": {
"name": "DCI CI",
"team": "DCI",
"docs": "https://docs.distributed-ci.io/",
"email": "[email protected]",
"url": "https://distributed-ci.io/",
},
"run": {"url": job_url, "log": job_url},
"artifact": _get_artifact(component),
"pipeline": {"id": job_id, "name": "job id"},
"test": {
"category": "system",
"namespace": "dci",
"type": test_name,
"result": "passed" if job["status"] == "success" else "failed",
},
"system": [{"provider": "beaker", "architecture": architecture}],
"generated_at": "%sZ" % now.isoformat(),
"version": "0.1.0",
}
),
}
def _get_kernel_version(component):
if "tags" not in component:
return None
kernel_version = None
for tag in component["tags"]:
if "kernel:" in tag:
kernel_version = tag.replace("kernel:", "")
return kernel_version
def _build_cki_message(job, component, result):
job_url = "https://www.distributed-ci.io/jobs/%s/jobStates" % str(job["id"])
target = "topic://VirtualTopic.eng.dci.cki"
architecture = _get_architecture(job)
return {
"target": target,
"body": json.dumps(
{
"results": [
{
"test_arch": architecture,
"test_description": tc["classname"],
"test_log_url": [job_url],
"test_name": tc["name"],
"test_result": "PASS" if job["status"] == "success" else "FAIL",
"is_debug": False,
}
for tc in result["testcases"]
],
"summarized_result": "",
"team_email": "[email protected]",
"team_name": "DCI",
"kernel_version": _get_kernel_version(component),
"artifact": _get_artifact(component),
}
),
}
def build_umb_messages(event, now=datetime.datetime.utcnow()):
logger.debug("Received event to send on UMB: %s" % event)
messages = []
job = event["job"]
for component in job["components"]:
if component["type"].lower() != "compose":
logger.debug(
'Ignoring event of type "%s". Only processing events of type "compose".'
% component["type"]
)
continue
for result in job["results"]:
if "cki-results" == result["name"].lower():
messages.append(_build_cki_message(job, component, result))
messages.append(_build_generic_message(job, component, result, now))
return messages
def send_event_on_umb(event):
messages = build_umb_messages(event)
key_file = os.getenv("UMB_KEY_FILE_PATH", "/etc/pki/tls/private/umb.key")
crt_file = os.getenv("UMB_CRT_FILE_PATH", "/etc/pki/tls/certs/umb.crt")
ca_file = os.getenv("UMB_CA_FILE_PATH", "/etc/pki/tls/certs/RH-IT-Root-CA.crt")
brokers = os.environ.get("UMB_BROKERS", "amqps://umb.api.redhat.com:5671").split()
for message in messages:
try:
send(
{
"key_file": key_file,
"crt_file": crt_file,
"ca_file": ca_file,
"brokers": brokers,
"target": message["target"],
"message": message["body"],
}
)
except Exception as e:
logger.exception(e)
| apache-2.0 | 8,607,258,574,056,000,000 | 33.388489 | 88 | 0.496234 | false | 3.914824 | true | false | false |
cdepman/falcon_api | site-packages/psycopg2cffi/_impl/exceptions.py | 1 | 3830 | import six
try:
StandardError = StandardError
except NameError:
StandardError = Exception
class OperationError(Exception):
pass
from psycopg2cffi._impl.libpq import libpq, ffi
class Warning(StandardError):
pass
class Error(StandardError):
pgerror = None
pgcode = None
cursor = None
_pgres = None
@property
def diag(self):
return Diagnostics(self)
def __del__(self):
if self._pgres:
libpq.PQclear(self._pgres)
self._pgres = None
def __reduce__(self):
t = super(Error, self).__reduce__()
if not isinstance(t, tuple):
return t
# note: in c implementation reduce returns a 2-items tuple;
# in python a 3-items tuple. Maybe the c exception doesn't have a dict?
if len(t) != 3:
return t
d = t[2].copy()
d.pop('cursor', None)
d.pop('_pgres', None)
return (t[0], t[1], d)
def __setstate__(self, state):
self.pgerror = state.get('pgerror')
self.pgcode = state.get('pgcode')
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class QueryCanceledError(OperationalError):
pass
class TransactionRollbackError(OperationalError):
pass
class Diagnostics(object):
def __init__(self, exc):
self._exc = exc
def _get_field(self, field):
from psycopg2cffi._impl.adapters import bytes_to_ascii
if self._exc and self._exc._pgres:
b = libpq.PQresultErrorField(self._exc._pgres, field)
if b:
b = ffi.string(b)
if six.PY3: # py2 tests insist on str here
b = bytes_to_ascii(b)
return b
@property
def severity(self):
return self._get_field(libpq.LIBPQ_DIAG_SEVERITY)
@property
def sqlstate(self):
return self._get_field(libpq.LIBPQ_DIAG_SQLSTATE)
@property
def message_primary(self):
return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_PRIMARY)
@property
def message_detail(self):
return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_DETAIL)
@property
def message_hint(self):
return self._get_field(libpq.LIBPQ_DIAG_MESSAGE_HINT)
@property
def statement_position(self):
return self._get_field(libpq.LIBPQ_DIAG_STATEMENT_POSITION)
@property
def internal_position(self):
return self._get_field(libpq.LIBPQ_DIAG_INTERNAL_POSITION)
@property
def internal_query(self):
return self._get_field(libpq.LIBPQ_DIAG_INTERNAL_QUERY)
@property
def context(self):
return self._get_field(libpq.LIBPQ_DIAG_CONTEXT)
@property
def schema_name(self):
return self._get_field(libpq.LIBPQ_DIAG_SCHEMA_NAME)
@property
def table_name(self):
return self._get_field(libpq.LIBPQ_DIAG_TABLE_NAME)
@property
def column_name(self):
return self._get_field(libpq.LIBPQ_DIAG_COLUMN_NAME)
@property
def datatype_name(self):
return self._get_field(libpq.LIBPQ_DIAG_DATATYPE_NAME)
@property
def constraint_name(self):
return self._get_field(libpq.LIBPQ_DIAG_CONSTRAINT_NAME)
@property
def source_file(self):
return self._get_field(libpq.LIBPQ_DIAG_SOURCE_FILE)
@property
def source_line(self):
return self._get_field(libpq.LIBPQ_DIAG_SOURCE_LINE)
@property
def source_function(self):
return self._get_field(libpq.LIBPQ_DIAG_SOURCE_FUNCTION)
| mit | 3,383,562,870,674,136,000 | 21.011494 | 79 | 0.632115 | false | 3.556175 | false | false | false |
163gal/Time-Line | libs_arm/wx/tools/XRCed/plugins/core.py | 4 | 26715 | # Name: core.py
# Purpose: Core components
# Author: Roman Rolinsky <[email protected]>
# Created: 31.05.2007
# RCS-ID: $Id: core.py 71860 2012-06-25 15:46:16Z ROL $
import wx
from wx.tools.XRCed import component, images, attribute, params, view
from wx.tools.XRCed.globals import TRACE,is_object,is_element,STD_NAME
import _bitmaps as bitmaps
TRACE('*** creating core components')
# Set panel images
component.Manager.panelImages['Windows'] = images.ToolPanel_Windows.GetImage()
component.Manager.panelImages['Menus'] = images.ToolPanel_Menus.GetImage()
component.Manager.panelImages['Sizers'] = images.ToolPanel_Sizers.GetImage()
component.Manager.panelImages['Panels'] = images.ToolPanel_Panels.GetImage()
component.Manager.panelImages['Gizmos'] = images.ToolPanel_Gizmos.GetImage()
### wxFrame
class Frame(component.Container):
def getChildObject(self, node, obj, index):
# Do not count toolbar and menubar
objects = filter(is_element, node.childNodes)
indexOffset = 0 # count non-window children
for i,o in enumerate(objects):
if o.getAttribute('class') == 'wxMenuBar':
if i == index: return obj.GetMenuBar()
elif i < index: indexOffset += 1
elif o.getAttribute('class') == 'wxToolBar':
if i == index: return obj.GetToolBar()
elif i < index: indexOffset += 1
return component.Container.getChildObject(self, node, obj, index - indexOffset)
c = Frame('wxFrame', ['frame','window','top_level'],
['pos', 'size', 'title', 'centered'],
image=images.TreeFrame.GetImage())
c.isTopLevel = True
c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION',
'wxSTAY_ON_TOP', 'wxSYSTEM_MENU',
'wxRESIZE_BORDER', 'wxCLOSE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX',
'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW',
'wxFRAME_FLOAT_ON_PARENT',
'wxTAB_TRAVERSAL')
c.addExStyles('wxFRAME_EX_CONTEXTHELP', 'wxFRAME_EX_METAL')
c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE',
'EVT_ACTIVATE', 'EVT_UPDATE_UI')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'frame', 'wxFrame', 10)
component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0))
### wxMDIParentFrame
class MDIParentFrame(component.Container):
def getChildObject(self, node, obj, index):
# Do not count toolbar and menubar
objects = filter(is_element, node.childNodes)
indexOffset = 0 # count non-window children
for i,o in enumerate(objects):
if o.getAttribute('class') == 'wxMenuBar':
if i == index: return obj.GetMenuBar()
elif i < index: indexOffset += 1
elif o.getAttribute('class') == 'wxToolBar':
if i == index: return obj.GetToolBar()
elif i < index: indexOffset += 1
return obj.GetClientWindow().GetChildren()[index]
c = MDIParentFrame('wxMDIParentFrame', ['mdi_parent_frame','top_level'],
['pos', 'size', 'title', 'centered'],
image=images.TreeFrame.GetImage())
c.isTopLevel = True
c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION',
'wxSTAY_ON_TOP', 'wxSYSTEM_MENU',
'wxRESIZE_BORDER', 'wxCLOSE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX',
'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW',
'wxFRAME_FLOAT_ON_PARENT', 'wxFRAME_NO_WINDOW_MENU',
'wxTAB_TRAVERSAL')
c.addExStyles('wxFRAME_EX_METAL')
c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE',
'EVT_ACTIVATE', 'EVT_UPDATE_UI')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'MDI parent frame', 'wxMDIParentFrame', 11)
#component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0))
### wxMDIChildFrame
class MDIChildFrame(component.Container):
def getChildObject(self, node, obj, index):
# Do not count toolbar and menubar
objects = filter(is_element, node.childNodes)
indexOffset = 0 # count non-window children
for i,o in enumerate(objects):
if o.getAttribute('class') == 'wxMenuBar':
if i == index: return obj.GetMenuBar()
elif i < index: indexOffset += 1
elif o.getAttribute('class') == 'wxToolBar':
if i == index: return obj.GetToolBar()
elif i < index: indexOffset += 1
return component.Container.getChildObject(self, node, obj, index - indexOffset)
c = MDIChildFrame('wxMDIChildFrame', ['mdi_child_frame','window'],
['pos', 'size', 'title', 'centered'],
image=images.TreeFrame.GetImage())
c.addStyles('wxDEFAULT_FRAME_STYLE', 'wxDEFAULT_DIALOG_STYLE', 'wxCAPTION',
'wxSTAY_ON_TOP', 'wxSYSTEM_MENU',
'wxRESIZE_BORDER', 'wxCLOSE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX',
'wxFRAME_NO_TASKBAR', 'wxFRAME_SHAPED', 'wxFRAME_TOOL_WINDOW',
'wxFRAME_FLOAT_ON_PARENT', 'wxFRAME_NO_WINDOW_MENU',
'wxTAB_TRAVERSAL')
c.addExStyles('wxFRAME_EX_METAL')
c.addEvents('EVT_SIZE', 'EVT_CLOSE', 'EVT_MENU_HIGHLIGHT', 'EVT_ICONIZE', 'EVT_MAXIMIZE',
'EVT_ACTIVATE', 'EVT_UPDATE_UI')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'MDI child frame', 'wxMDIChildFrame', 12)
#component.Manager.setTool(c, 'Windows', bitmaps.wxFrame.GetBitmap(), (0,0))
### wxDialog
c = component.Container('wxDialog', ['frame','window','top_level'],
['pos', 'size', 'title', 'centered', 'icon'],
image=images.TreeDialog.GetImage())
c.isTopLevel = True
c.setSpecial('icon', attribute.BitmapAttribute)
c.addStyles('wxDEFAULT_DIALOG_STYLE', 'wxCAPTION',
'wxSTAY_ON_TOP', 'wxSYSTEM_MENU',
'wxRESIZE_BORDER', 'wxCLOSE_BOX',
'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX',
'wxDIALOG_NO_PARENT', 'wxFRAME_SHAPED',
'wxTAB_TRAVERSAL')
c.addExStyles('wxDIALOG_EX_CONTEXTHELP', 'wxDIALOG_EX_METAL')
c.addEvents('EVT_INIT_DIALOG', 'EVT_SIZE', 'EVT_CLOSE',
'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'dialog', 'wxDialog', 20)
component.Manager.setTool(c, 'Windows', bitmaps.wxDialog.GetBitmap(), (0,1))
### wxPanel
c = component.Container('wxPanel', ['window', 'top_level', 'control'],
['pos', 'size'],
image=images.TreePanel.GetImage())
c.addStyles('wxTAB_TRAVERSAL')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'panel', 'wxPanel', 30)
component.Manager.setMenu(c, 'container', 'panel', 'wxPanel', 10)
component.Manager.setTool(c, 'Windows', bitmaps.wxPanel.GetBitmap(), (0,2))
### wxWizard
class Wizard(component.Container):
genericStyles = genericExStyles = []
def makeTestWin(self, res, name):
wiz = wx.wizard.PreWizard()
res.LoadOnObject(wiz, view.frame, STD_NAME, self.klass)
# Find and select first page
firstPage = None
for w in wiz.GetChildren():
if isinstance(w, wx.wizard.WizardPage):
firstPage = w
break
if firstPage:
wiz.RunWizard(firstPage)
else:
wx.LogMessage('Wizard is empty')
wiz.Destroy()
return None, None
c = Wizard('wxWizard', ['wizard', 'top_level'],
['pos', 'title', 'bitmap'],
image=images.TreeWizard.GetImage())
c.addExStyles('wxWIZARD_EX_HELPBUTTON')
c.setSpecial('bitmap', attribute.BitmapAttribute)
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'wizard', 'wxWizard', 40)
component.Manager.setTool(c, 'Windows', bitmaps.wxWizard.GetBitmap(), (1,0), (1,2))
### wxWizardPage
class WizardPage(component.Container):
def makeTestWin(self, res, name):
# Create single-page wizard
wiz = wx.wizard.Wizard(view.frame, title='Test Wizard')
print self.klass
import pdb;pdb.set_trace()
page = wx.wizard.PrePyWizardPage()
print res.LoadOnObject(page, wiz, STD_NAME, self.klass)
# page = res.LoadObject(wiz, STD_NAME, self.klass)
print page
wiz.RunWizard(page)
wiz.Destroy()
return None, None
c = WizardPage('wxWizardPage', ['wizard_page', 'window'], ['bitmap'],
image=images.TreePanel.GetImage())
c.setSpecial('bitmap', attribute.BitmapAttribute)
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'wizard page', 'wxWizardPage')
### wxWizardPageSimple
c = component.Container('wxWizardPageSimple', ['wizard_page', 'window'], ['bitmap'],
image=images.TreePanel.GetImage())
c.setSpecial('bitmap', attribute.BitmapAttribute)
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'simple wizard page', 'wxWizardPageSimple')
### wxPropertySheetDialog
class ParamButtons(params.ParamBinaryOr):
'''Button flags.'''
values = ['wxOK', 'wxCANCEL', 'wxYES', 'wxNO', 'wxHELP', 'wxNO_DEFAULT']
c = component.SmartContainer('wxPropertySheetDialog', ['frame','book','window','top_level'],
['pos', 'size', 'title', 'centered', 'icon', 'buttons'],
params={'buttons': ParamButtons},
implicit_klass='propertysheetpage',
implicit_page='PropertySheetPage',
implicit_attributes=['label', 'selected', 'bitmap'],
implicit_params={'label': params.ParamText, 'selected': params.ParamBool},
image=images.TreeDialog.GetImage())
c.isTopLevel = True
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.setSpecial('icon', attribute.BitmapAttribute)
c.addStyles('wxDEFAULT_DIALOG_STYLE', 'wxCAPTION', 'wxFRAME_SHAPED',
'wxTAB_TRAVERSAL', 'wxSTAY_ON_TOP', 'wxSYSTEM_MENU',
'wxRESIZE_BORDER', 'wxCLOSE_BOX', 'wxMAXIMIZE_BOX', 'wxMINIMIZE_BOX',
'wxDIALOG_MODAL', 'wxDIALOG_MODELESS', 'wxDIALOG_NO_PARENT',
'wxTAB_TRAVERSAL')
c.addExStyles('wxDIALOG_EX_CONTEXTHELP', 'wxDIALOG_EX_METAL')
c.addEvents('EVT_INIT_DIALOG', 'EVT_SIZE', 'EVT_CLOSE',
'EVT_ICONIZE', 'EVT_MAXIMIZE', 'EVT_ACTIVATE', 'EVT_UPDATE_UI')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'propery sheet dialog', 'wxPropertySheetDialog', 50)
component.Manager.setTool(c, 'Windows', bitmaps.wxPropertySheetDialog.GetBitmap(), (1,1))
### wxBoxSizer
c = component.BoxSizer('wxBoxSizer', ['sizer'], ['orient'],
defaults={'orient': 'wxVERTICAL'},
images=[images.TreeSizerV.GetImage(), images.TreeSizerH.GetImage()])
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'box sizer', 'wxBoxSizer', 10)
component.Manager.setTool(c, 'Sizers', pos=(0,0))
### wxStaticBoxSizer
c = component.BoxSizer('wxStaticBoxSizer', ['sizer'], ['label', 'orient'],
defaults={'orient': 'wxVERTICAL'},
images=[images.TreeSizerV.GetImage(), images.TreeSizerH.GetImage()])
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'static box sizer', 'wxStaticBoxSizer', 20)
component.Manager.setTool(c, 'Sizers', pos=(0,2))
### wxGridSizer
c = component.Sizer('wxGridSizer', ['sizer'],
['cols', 'rows', 'vgap', 'hgap'],
defaults={'cols': '2', 'rows': '2'},
image=images.TreeSizerGrid.GetImage())
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'grid sizer', 'wxGridSizer', 30)
component.Manager.setTool(c, 'Sizers', pos=(0,1))
### wxFlexGridSizer
c = component.Sizer('wxFlexGridSizer', ['sizer'],
['cols', 'rows', 'vgap', 'hgap', 'growablecols', 'growablerows'],
defaults={'cols': '2', 'rows': '2'},
image=images.TreeSizerFlexGrid.GetImage())
c.setSpecial('growablecols', attribute.MultiAttribute)
c.setParamClass('growablecols', params.ParamIntList)
c.setSpecial('growablerows', attribute.MultiAttribute)
c.setParamClass('growablerows', params.ParamIntList)
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'flex grid sizer', 'wxFlexGridSizer', 40)
component.Manager.setTool(c, 'Sizers', pos=(1,0))
### wxGridBagSizer
c = component.Sizer('wxGridBagSizer', ['sizer'],
['vgap', 'hgap', 'growablecols', 'growablerows'],
image=images.TreeSizerGridBag.GetImage(),
implicit_attributes=['option', 'flag', 'border', 'minsize', 'ratio', 'cellpos', 'cellspan'])
c.setSpecial('growablecols', attribute.MultiAttribute)
c.setParamClass('growablecols', params.ParamIntList)
c.setSpecial('growablerows', attribute.MultiAttribute)
c.setParamClass('growablerows', params.ParamIntList)
c.setImplicitParamClass('cellpos', params.ParamPosSize)
c.setImplicitParamClass('cellspan', params.ParamPosSize)
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'grid bag sizer', 'wxGridBagSizer', 50)
component.Manager.setTool(c, 'Sizers', pos=(1,1))
### wxStdDialogButtonSizer
class StdDialogButtonSizer(component.Sizer):
def getChildObject(self, node, obj, index):
# This sizer orders buttons by fixed ordering, so we must
# get the ID to find them
try:
n = filter(is_element, node.childNodes)[index]
n = filter(is_element, n.childNodes)[0]
id = n.getAttribute('name')
except IndexError:
return None
items = filter(wx.SizerItem.IsWindow, obj.GetChildren())
for item in items:
w = item.GetWindow()
if w.GetName() == id: return w
return None
c = StdDialogButtonSizer('wxStdDialogButtonSizer', ['btnsizer'], [],
implicit_klass='button',
implicit_attributes=[])
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'dialog button sizer', 'wxStdDialogButtonSizer', 60)
#component.Manager.setTool(c, 'Sizers', pos=(0,2))
### spacer
c = component.SimpleComponent('spacer', ['spacer'], ['size', 'option', 'flag', 'border'],
image=images.TreeSpacer.GetImage())
c.hasName = False
component.Manager.register(c)
component.Manager.setMenu(c, 'sizer', 'spacer', 'spacer', 70)
component.Manager.setTool(c, 'Sizers', pos=(1,2))
################################################################################
# Containers
# wxPanel is already added
### wxScrolledWindow
c = component.Container('wxScrolledWindow', ['window', 'control'], ['pos', 'size'])
c.addStyles('wxHSCROLL', 'wxVSCROLL', 'wxTAB_TRAVERSAL')
c.addEvents('EVT_SCROLLWIN_TOP',
'EVT_SCROLLWIN_BOTTOM',
'EVT_SCROLLWIN_LINEUP',
'EVT_SCROLLWIN_LINEDOWN',
'EVT_SCROLLWIN_PAGEUP',
'EVT_SCROLLWIN_PAGEDOWN',
'EVT_SCROLLWIN_THUMBTRACK',
'EVT_SCROLLWIN_THUMBRELEASE')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'scrolled window', 'wxScrolledWindow', 20)
component.Manager.setTool(c, 'Panels', pos=(3,0))
### wxSplitterWindow
c = component.Container('wxSplitterWindow', ['book', 'window', 'control'],
['pos', 'size', 'orientation', 'sashpos', 'minsize', 'gravity'],
# note: no flt[0..1], so just leaving gravity as text
params={'orientation': params.ParamOrientation,
'sashpos': params.ParamUnit,
'minsize': params.ParamUnit},
image=images.TreeSplitterWindow.GetImage())
c.addStyles('wxSP_3D', 'wxSP_3DSASH', 'wxSP_3DBORDER', 'wxSP_BORDER',
'wxSP_FULLSASH', 'wxSP_NOBORDER', 'wxSP_PERMIT_UNSPLIT', 'wxSP_LIVE_UPDATE',
'wxSP_NO_XP_THEME')
c.addEvents('EVT_SPLITTER_SASH_POS_CHANGING', 'EVT_SPLITTER_SASH_POS_CHANGED',
'EVT_SPLITTER_UNSPLIT', 'EVT_SPLITTER_DCLICK')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'splitter window', 'wxSplitterWindow', 30)
component.Manager.setTool(c, 'Panels', pos=(2,3))
### wxNotebook
c = component.SmartContainer('wxNotebook', ['book', 'window', 'control'], ['pos', 'size'],
implicit_klass='notebookpage',
implicit_page='NotebookPage',
implicit_attributes=['label', 'selected', 'bitmap'],
implicit_params={'label': params.ParamText, 'selected': params.ParamBool},
image=images.TreeNotebook.GetImage())
c.addStyles('wxBK_DEFAULT', 'wxBK_TOP', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_BOTTOM',
'wxNB_FIXEDWIDTH', 'wxNB_MULTILINE', 'wxNB_NOPAGETHEME')
c.addEquivStyles({'wxBK_DEFAULT': 'wxNB_DEFAULT', 'wxBK_LEFT': 'wxNB_LEFT',
'wxBK_RIGHT': 'wxNB_RIGHT', 'wxBK_TOP': 'wxNB_TOP',
'wxBK_BOTTOM': 'wxNB_BOTTOM'})
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addEvents('EVT_NOTEBOOK_PAGE_CHANGED', 'EVT_NOTEBOOK_PAGE_CHANGING')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'notebook', 'Notebook control', 40)
component.Manager.setTool(c, 'Panels', pos=(1,0))
### wxChoicebook
c = component.SmartContainer('wxChoicebook', ['book', 'window', 'control'], ['pos', 'size'],
implicit_klass='choicebookpage',
implicit_page='ChoicebookPage',
implicit_attributes=['label', 'selected', 'bitmap'],
implicit_params={'label': params.ParamText, 'selected': params.ParamBool})
c.addStyles('wxBK_DEFAULT', 'wxBK_TOP', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_BOTTOM')
c.addEquivStyles({'wxBK_DEFAULT': 'wxCHB_DEFAULT', 'wxBK_LEFT': 'wxCHB_LEFT',
'wxBK_RIGHT': 'wxCHB_RIGHT', 'wxBK_TOP': 'wxCHB_TOP',
'wxBK_BOTTOM': 'wxCHB_BOTTOM'})
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addEvents('EVT_CHOICEBOOK_PAGE_CHANGED', 'EVT_CHOICEBOOK_PAGE_CHANGING')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'choicebook', 'wxChoicebook', 50)
component.Manager.setTool(c, 'Panels', pos=(1,3))
### wxListbook
class ListBook(component.SmartContainer):
def getChildObject(self, node, obj, index):
# Listbook's first child is ListView
return obj.GetChildren()[index+1]
c = ListBook('wxListbook', ['book', 'window', 'control'], ['pos', 'size'],
implicit_klass='listbookpage',
implicit_page='ListbookPage',
implicit_attributes=['label', 'selected', 'bitmap'],
implicit_params={'label': params.ParamText, 'selected': params.ParamBool})
c.addStyles('wxBK_DEFAULT', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_TOP', 'wxBK_BOTTOM')
c.addEquivStyles({'wxBK_DEFAULT': 'wxLB_DEFAULT', 'wxBK_LEFT': 'wxLB_LEFT',
'wxBK_RIGHT': 'wxLB_RIGHT', 'wxBK_TOP': 'wxLB_TOP',
'wxBK_BOTTOM': 'wxLB_BOTTOM'})
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addEvents('EVT_LISTBOOK_PAGE_CHANGED', 'EVT_LISTBOOK_PAGE_CHANGING')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'listbook', 'wxListbook', 60)
component.Manager.setTool(c, 'Panels', pos=(0,3))
### wxTreebook
class TreeBook(component.SmartContainer):
def getChildObject(self, node, obj, index):
# Listbook's first child is ListView
return obj.GetChildren()[index+1]
c = TreeBook('wxTreebook', ['book', 'window', 'control'], ['pos', 'size'],
implicit_klass='treebookpage',
implicit_page='TreebookPage',
implicit_attributes=['label', 'selected', 'bitmap', 'depth'],
implicit_params={'label': params.ParamText,
'selected': params.ParamBool,
'depth': params.ParamInt})
c.addStyles('wxBK_DEFAULT', 'wxBK_LEFT', 'wxBK_RIGHT', 'wxBK_TOP', 'wxBK_BOTTOM')
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addEvents('EVT_TREEBOOK_PAGE_CHANGED', 'EVT_TREEBOOK_PAGE_CHANGING',
'EVT_TREEBOOK_NODE_COLLAPSED', 'EVT_TREEBOOK_NODE_EXPANDED')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'treebook', 'wxTreebook', 70)
component.Manager.setTool(c, 'Panels', pos=(1,1), span=(1,2))
### wxCollapsiblePane
c = component.SmartContainer('wxCollapsiblePane', ['book', 'window', 'control'], ['pos', 'size', 'label', 'collapsed'],
implicit_klass='panewindow',
implicit_page='',
implicit_attributes=[])
c.addStyles('wxCP_NO_TLW_RESIZE', 'wxCP_DEFAULT_STYLE')
c.setParamClass('collapsed', params.ParamBool)
c.addEvents('EVT_COMMAND_COLLPANE_CHANGED')
component.Manager.register(c)
component.Manager.setMenu(c, 'container', 'collapsible pane', 'wxCollapsiblePane', 71)
################################################################################
# Menus
### wxMenuBar
class MenuBar(component.SimpleContainer):
isTestable = True
# Menubar should be shown in a normal frame
def makeTestWin(self, res, name):
frame = wx.Frame(None, -1, '%s: %s' % (self.klass, name), name=STD_NAME)
object = res.LoadMenuBarOnFrame(frame, STD_NAME)
return None, frame
def getRect(self, obj):
return None
c = MenuBar('wxMenuBar', ['menubar', 'top_level'], [],
image=images.TreeMenuBar.GetImage())
c.addStyles('wxMB_DOCKABLE')
c.addEvents('EVT_MENU', 'EVT_MENU_OPEN', 'EVT_MENU_CLOSE', 'EVT_MENU_HIGHLIGHT_ALL')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'menu bar', 'wxMenuBar', 40)
component.Manager.setMenu(c, 'bar', 'menu bar', 'wxMenuBar', 10)
component.Manager.setTool(c, 'Menus', pos=(1,0))
### wxMenu
c = component.SimpleContainer('wxMenu', ['menu', 'top_level'],
['label', 'help', 'enabled'],
image=images.TreeMenu.GetImage())
#c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addStyles('wxMENU_TEAROFF')
c.addEvents('EVT_MENU', 'EVT_MENU_OPEN', 'EVT_MENU_CLOSE', 'EVT_MENU_HIGHLIGHT_ALL')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'menu', 'wxMenu', 50)
component.Manager.setMenu(c, 'ROOT', 'menu', 'wxMenu', 20)
component.Manager.setTool(c, 'Menus', pos=(1,1), span=(2,1))
### wxMenuItem
c = component.SimpleComponent('wxMenuItem', ['menu_item'],
['label', 'bitmap', 'accel', 'help',
'checkable', 'radio', 'enabled', 'checked'],
image=images.TreeMenuItem.GetImage())
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.addEvents('EVT_MENU', 'EVT_MENU_HIGHLIGHT')
component.Manager.register(c)
component.Manager.setMenu(c, 'ROOT', 'menu item', 'wxMenuItem', 10)
component.Manager.setTool(c, 'Menus', pos=(1,2))
### wxToolBar
class ToolBar(component.SimpleContainer):
isTestable = True
# Toolbar should be shown in a normal frame
def makeTestWin(self, res, name):
frame = wx.Frame(None, -1, '%s: %s' % (self.klass, name), name=STD_NAME)
object = res.LoadToolBar(frame, STD_NAME)
return None, frame
def getRect(self, obj):
return None
c = ToolBar('wxToolBar', ['toolbar', 'top_level', 'control'],
['bitmapsize', 'margins', 'packing', 'separation',
'dontattachtoframe', 'pos', 'size'],
image=images.TreeToolBar.GetImage())
c.addStyles('wxTB_FLAT', 'wxTB_DOCKABLE', 'wxTB_VERTICAL', 'wxTB_HORIZONTAL',
'wxTB_3DBUTTONS','wxTB_TEXT', 'wxTB_NOICONS', 'wxTB_NODIVIDER',
'wxTB_NOALIGN', 'wxTB_HORZ_LAYOUT', 'wxTB_HORZ_TEXT',
'wxTB_TOP', 'wxTB_LEFT', 'wxTB_RIGHT', 'wxTB_BOTTOM')
c.setParamClass('dontattachtoframe', params.ParamBool)
c.setParamClass('bitmapsize', params.ParamPosSize)
c.setParamClass('margins', params.ParamPosSize)
c.setParamClass('packing', params.ParamUnit)
c.setParamClass('separation', params.ParamUnit)
c.renameDict = {'dontattachtoframe': "don't attach"}
c.addEvents('EVT_TOOL', 'EVT_TOOL_ENTER', 'EVT_TOOL_RCLICKED')
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'tool bar', 'wxToolBar', 50)
component.Manager.setMenu(c, 'bar', 'tool bar', 'wxToolBar', 20)
component.Manager.setTool(c, 'Menus', pos=(0,0))
### wxTool
c = component.SimpleComponent('tool', ['tool'],
['bitmap', 'bitmap2', 'radio', 'toggle',
'tooltip', 'longhelp', 'label'],
image=images.TreeTool.GetImage())
component.Manager.register(c)
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.setSpecial('bitmap2', attribute.BitmapAttribute)
c.setParamClass('bitmap2', params.ParamBitmap)
c.setParamClass('toggle', params.ParamBool)
c.addEvents('EVT_TOOL', 'EVT_TOOL_ENTER', 'EVT_TOOL_RCLICKED')
component.Manager.setMenu(c, 'ROOT', 'tool', 'wxTool', 10)
component.Manager.setTool(c, 'Menus', pos=(0,1))
### wxSeparator
c = component.SimpleComponent('separator', ['separator'], [],
image=images.TreeSeparator.GetImage())
c.hasName = False
component.Manager.register(c)
component.Manager.setMenu(c, 'ROOT', 'separator', 'separator', 20)
component.Manager.setTool(c, 'Menus', pos=(0,2))
### wxBreak
c = component.SimpleComponent('break', ['break'], [],
image=images.TreeSeparator.GetImage())
c.hasName = False
component.Manager.register(c)
component.Manager.setMenu(c, 'ROOT', 'break', 'break', 21)
### wxStatusBar
c = component.SimpleComponent('wxStatusBar', ['statusbar'], ['fields', 'widths', 'styles'])
c.addStyles('wxST_SIZEGRIP')
c.setParamClass('fields', params.ParamIntP)
component.Manager.register(c)
component.Manager.setMenu(c, 'bar', 'status bar', 'wxStatusBar', 30)
component.Manager.setTool(c, 'Menus', pos=(2,0))
################################################################################
### wxBitmap
c = component.SimpleComponent('wxBitmap', ['top_level'], ['object'])
c.renameDict = {'object': ''}
c.setSpecial('object', attribute.BitmapAttribute)
c.setParamClass('object', params.ParamBitmap)
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'bitmap', 'wxBitmap', 60)
### wxIcon
c = component.SimpleComponent('wxIcon', ['top_level'], ['object'])
c.renameDict = {'object': ''}
c.setSpecial('object', attribute.BitmapAttribute)
c.setParamClass('object', params.ParamBitmap)
component.Manager.register(c)
component.Manager.setMenu(c, 'TOP_LEVEL', 'icon', 'wxIcon', 70)
### wxXXX
#c = component.Component('wxXXX', ['control','tool'],
# ['pos', 'size', ...])
#c.addStyles(...)
#component.Manager.register(c)
#component.Manager.setMenu(c, 'control', 'XXX', 'wxXXX', NN)
| gpl-3.0 | 8,654,210,028,634,255,000 | 43.011532 | 119 | 0.641101 | false | 3.316574 | false | false | false |
bcoca/ansible-modules-extras | storage/netapp/netapp_e_facts.py | 27 | 6628 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
module: netapp_e_facts
version_added: '2.2'
short_description: Get facts about NetApp E-Series arrays
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
description:
- Return various information about NetApp E-Series storage arrays (eg, configuration, disks)
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = """
---
- name: Get array facts
netapp_e_facts:
array_id: "{{ netapp_array_id }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
"""
RETURN = """
msg: Gathered facts for <StorageArrayId>.
"""
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
data = None
except:
if ignore_errors:
pass
else:
raise
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
ssid=dict(required=True))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
p = module.params
ssid = p['ssid']
validate_certs = p['validate_certs']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
facts = dict(ssid=ssid)
# fetch the list of storage-pool objects and look for one with a matching name
try:
(rc, resp) = request(api_url + "/storage-systems/%s/graph" % ssid,
headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs)
except:
error = get_exception()
module.fail_json(
msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (ssid, str(error)))
facts['snapshot_images'] = [
dict(
id=d['id'],
status=d['status'],
pit_capacity=d['pitCapacity'],
creation_method=d['creationMethod'],
reposity_cap_utilization=d['repositoryCapacityUtilization'],
active_cow=d['activeCOW'],
rollback_source=d['isRollbackSource']
) for d in resp['highLevelVolBundle']['pit']]
facts['netapp_disks'] = [
dict(
id=d['id'],
available=d['available'],
media_type=d['driveMediaType'],
status=d['status'],
usable_bytes=d['usableCapacity'],
tray_ref=d['physicalLocation']['trayRef'],
product_id=d['productID'],
firmware_version=d['firmwareVersion'],
serial_number=d['serialNumber'].lstrip()
) for d in resp['drive']]
facts['netapp_storage_pools'] = [
dict(
id=sp['id'],
name=sp['name'],
available_capacity=sp['freeSpace'],
total_capacity=sp['totalRaidedSpace'],
used_capacity=sp['usedSpace']
) for sp in resp['volumeGroup']]
all_volumes = list(resp['volume'])
# all_volumes.extend(resp['thinVolume'])
# TODO: exclude thin-volume repo volumes (how to ID?)
facts['netapp_volumes'] = [
dict(
id=v['id'],
name=v['name'],
parent_storage_pool_id=v['volumeGroupRef'],
capacity=v['capacity'],
is_thin_provisioned=v['thinProvisioned']
) for v in all_volumes]
features = [f for f in resp['sa']['capabilities']]
features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']])
features = list(set(features)) # ensure unique
features.sort()
facts['netapp_enabled_features'] = features
# TODO: include other details about the storage pool (size, type, id, etc)
result = dict(ansible_facts=facts, changed=False)
module.exit_json(msg="Gathered facts for %s." % ssid, **result)
if __name__ == "__main__":
main()
| gpl-3.0 | 2,120,178,646,795,601,200 | 31.975124 | 111 | 0.621454 | false | 3.846779 | false | false | false |
ric2b/Vivaldi-browser | chromium/third_party/blink/tools/blinkpy/web_tests/port/mac.py | 1 | 4464 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Chromium Mac implementation of the Port interface."""
import logging
from blinkpy.web_tests.port import base
_log = logging.getLogger(__name__)
class MacPort(base.Port):
SUPPORTED_VERSIONS = ('mac10.10', 'mac10.11', 'mac10.12', 'mac10.13', 'mac10.14', 'mac10.15', 'retina')
port_name = 'mac'
# FIXME: We treat Retina (High-DPI) devices as if they are running a
# different operating system version. This is lame and should be fixed.
# Note that the retina versions fallback to the non-retina versions and so
# no baselines are shared between retina versions; this keeps the fallback
# graph as a tree and maximizes the number of baselines we can share that
# way. We also currently only support Retina on 10.13.
FALLBACK_PATHS = {}
FALLBACK_PATHS['mac10.15'] = ['mac']
FALLBACK_PATHS['mac10.14'] = ['mac']
FALLBACK_PATHS['mac10.13'] = ['mac']
FALLBACK_PATHS['mac10.12'] = ['mac-mac10.12'] + FALLBACK_PATHS['mac10.13']
FALLBACK_PATHS['mac10.11'] = ['mac-mac10.11'] + FALLBACK_PATHS['mac10.12']
FALLBACK_PATHS['mac10.10'] = ['mac-mac10.10'] + FALLBACK_PATHS['mac10.11']
FALLBACK_PATHS['retina'] = ['mac-retina'] + FALLBACK_PATHS['mac10.13']
CONTENT_SHELL_NAME = 'Content Shell'
BUILD_REQUIREMENTS_URL = 'https://chromium.googlesource.com/chromium/src/+/master/docs/mac_build_instructions.md'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('mac'):
version = host.platform.os_version
if host.platform.is_highdpi():
version = 'retina'
return port_name + '-' + version
return port_name
def __init__(self, host, port_name, **kwargs):
super(MacPort, self).__init__(host, port_name, **kwargs)
self._version = port_name[port_name.index('mac-') + len('mac-'):]
assert self._version in self.SUPPORTED_VERSIONS
def check_build(self, needs_http, printer):
result = super(MacPort, self).check_build(needs_http, printer)
if result:
_log.error('For complete Mac build requirements, please see:')
_log.error('')
_log.error(' https://chromium.googlesource.com/chromium/src/+/master/docs/mac_build_instructions.md')
return result
def operating_system(self):
return 'mac'
#
# PROTECTED METHODS
#
def path_to_apache(self):
return self._path_from_chromium_base(
'third_party', 'apache-mac', 'bin', 'httpd')
def path_to_apache_config_file(self):
config_file_basename = 'apache2-httpd-%s-php7.conf' % (self._apache_version(),)
return self._filesystem.join(self.apache_config_directory(), config_file_basename)
def _path_to_driver(self, target=None):
return self._build_path_with_target(target, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
| bsd-3-clause | -5,245,923,353,456,412,000 | 42.339806 | 121 | 0.689068 | false | 3.757576 | false | false | false |
cs01/pygdbmi | pygdbmi/StringStream.py | 1 | 2663 | class StringStream:
"""A simple class to hold text so that when passed
between functions, the object is passed by reference
and memory does not need to be repeatedly allocated for the string.
This class was written here to avoid adding a dependency
to the project.
"""
def __init__(self, raw_text, debug=False):
self.raw_text = raw_text
self.index = 0
self.len = len(raw_text)
def read(self, count):
"""Read count characters starting at self.index,
and return those characters as a string
"""
new_index = self.index + count
if new_index > self.len:
buf = self.raw_text[self.index :] # return to the end, don't fail
else:
buf = self.raw_text[self.index : new_index]
self.index = new_index
return buf
def seek(self, offset):
"""Advance the index of this StringStream by offset characters"""
self.index = self.index + offset
def advance_past_chars(self, chars):
"""Advance the index past specific chars
Args chars (list): list of characters to advance past
Return substring that was advanced past
"""
start_index = self.index
while True:
current_char = self.raw_text[self.index]
self.index += 1
if current_char in chars:
break
elif self.index == self.len:
break
return self.raw_text[start_index : self.index - 1]
def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None):
"""characters that gdb escapes that should not be
escaped by this parser
"""
if chars_to_remove_gdb_escape is None:
chars_to_remove_gdb_escape = ['"']
buf = ""
while True:
c = self.raw_text[self.index]
self.index += 1
if c == "\\":
# We are on a backslash and there is another character after the backslash
# to parse. Handle this case specially since gdb escaped it for us
# Get the next char that is being escaped
c2 = self.raw_text[self.index]
self.index += 1
# only store the escaped character in the buffer; don't store the backslash
# (don't leave it escaped)
buf += c2
elif c == '"':
# Quote is closed. Exit (and don't include the end quote).
break
else:
# capture this character, and keep capturing
buf += c
return buf
| mit | 6,418,003,625,747,280,000 | 31.876543 | 91 | 0.558017 | false | 4.498311 | false | false | false |
matuu/ligadefutbol | ligafutbol/gui/clubs_list.py | 1 | 4556 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/clubs_list.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_dialog_clubs(object):
def setupUi(self, dialog_clubs):
dialog_clubs.setObjectName("dialog_clubs")
dialog_clubs.resize(439, 486)
dialog_clubs.setStyleSheet("")
dialog_clubs.setModal(True)
self.gridLayoutWidget = QtWidgets.QWidget(dialog_clubs)
self.gridLayoutWidget.setGeometry(QtCore.QRect(9, 9, 421, 471))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(10)
self.gridLayout.setObjectName("gridLayout")
self.line_search_clubes = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.line_search_clubes.setObjectName("line_search_clubes")
self.gridLayout.addWidget(self.line_search_clubes, 0, 1, 1, 1)
self.btn_search_clubes = QtWidgets.QPushButton(self.gridLayoutWidget)
self.btn_search_clubes.setAutoFillBackground(False)
self.btn_search_clubes.setStyleSheet("")
self.btn_search_clubes.setDefault(False)
self.btn_search_clubes.setFlat(False)
self.btn_search_clubes.setObjectName("btn_search_clubes")
self.gridLayout.addWidget(self.btn_search_clubes, 0, 2, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.btn_new_club = QtWidgets.QPushButton(self.gridLayoutWidget)
self.btn_new_club.setAutoFillBackground(False)
self.btn_new_club.setStyleSheet("")
self.btn_new_club.setDefault(False)
self.btn_new_club.setFlat(False)
self.btn_new_club.setObjectName("btn_new_club")
self.verticalLayout.addWidget(self.btn_new_club)
self.btn_edit_club = QtWidgets.QPushButton(self.gridLayoutWidget)
self.btn_edit_club.setAutoFillBackground(False)
self.btn_edit_club.setStyleSheet("")
self.btn_edit_club.setFlat(False)
self.btn_edit_club.setObjectName("btn_edit_club")
self.verticalLayout.addWidget(self.btn_edit_club)
self.btn_delete_club = QtWidgets.QPushButton(self.gridLayoutWidget)
self.btn_delete_club.setAutoFillBackground(False)
self.btn_delete_club.setStyleSheet("")
self.btn_delete_club.setFlat(False)
self.btn_delete_club.setObjectName("btn_delete_club")
self.verticalLayout.addWidget(self.btn_delete_club)
self.btn_close_club = QtWidgets.QPushButton(self.gridLayoutWidget)
self.btn_close_club.setAutoFillBackground(False)
self.btn_close_club.setStyleSheet("")
self.btn_close_club.setFlat(False)
self.btn_close_club.setObjectName("btn_close_club")
self.verticalLayout.addWidget(self.btn_close_club)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.gridLayout.addLayout(self.verticalLayout, 1, 2, 1, 1)
self.table_list_clubes = QtWidgets.QTableView(self.gridLayoutWidget)
self.table_list_clubes.setEditTriggers(QtWidgets.QAbstractItemView.AnyKeyPressed|QtWidgets.QAbstractItemView.EditKeyPressed)
self.table_list_clubes.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.table_list_clubes.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.table_list_clubes.setObjectName("table_list_clubes")
self.gridLayout.addWidget(self.table_list_clubes, 1, 1, 1, 1)
self.retranslateUi(dialog_clubs)
QtCore.QMetaObject.connectSlotsByName(dialog_clubs)
def retranslateUi(self, dialog_clubs):
_translate = QtCore.QCoreApplication.translate
dialog_clubs.setWindowTitle(_translate("dialog_clubs", "Listado de clubes"))
self.btn_search_clubes.setText(_translate("dialog_clubs", "Buscar"))
self.btn_new_club.setText(_translate("dialog_clubs", "Nuevo"))
self.btn_edit_club.setText(_translate("dialog_clubs", "Editar"))
self.btn_delete_club.setText(_translate("dialog_clubs", "Eliminar"))
self.btn_close_club.setText(_translate("dialog_clubs", "Cerrar"))
from . import resources_rc
| gpl-3.0 | 4,354,716,603,889,257,500 | 53.238095 | 132 | 0.713345 | false | 3.491188 | false | false | false |
afl-mothership/afl-mothership | mothership/utils.py | 1 | 1589 | import datetime
from math import floor, log
def format_timedelta(value, time_format='{days} days {hours} hours {minutes} minutes'):
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(floor(hours / 24))
days_total = days
hours -= days * 24
years = int(floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
})
def format_timedelta_secs(secs, time_format='{days} days {hours} hours {minutes} minutes'):
return format_timedelta(datetime.timedelta(seconds=secs), time_format=time_format)
def pretty_size(n, b=1024, u='B', pre=[''] + [p + 'i' for p in 'KMGTPEZY']):
pow, n = min(int(log(max(n, 1), b)), len(pre) - 1), n
return "%.2f %s%s" % (n / b ** float(pow), pre[pow], u)
def pretty_size_dec(value):
return pretty_size(value, b=1000, u = '', pre = ['', 'Thousand', 'Million', 'Billion'])
def format_ago(current_time, ago):
return (format_timedelta_secs(current_time - ago) + ' ago') if ago else 'none so far', | mit | -7,442,773,628,749,256,000 | 26.413793 | 91 | 0.651982 | false | 2.832442 | false | false | false |
jensengrouppsu/rapid | rapid/pyqtgraph/flowchart/FlowchartCtrlTemplate_pyside.py | 50 | 3175 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/flowchart/FlowchartCtrlTemplate.ui'
#
# Created: Mon Dec 23 10:10:51 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(217, 499)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.loadBtn = QtGui.QPushButton(Form)
self.loadBtn.setObjectName("loadBtn")
self.gridLayout.addWidget(self.loadBtn, 1, 0, 1, 1)
self.saveBtn = FeedbackButton(Form)
self.saveBtn.setObjectName("saveBtn")
self.gridLayout.addWidget(self.saveBtn, 1, 1, 1, 2)
self.saveAsBtn = FeedbackButton(Form)
self.saveAsBtn.setObjectName("saveAsBtn")
self.gridLayout.addWidget(self.saveAsBtn, 1, 3, 1, 1)
self.reloadBtn = FeedbackButton(Form)
self.reloadBtn.setCheckable(False)
self.reloadBtn.setFlat(False)
self.reloadBtn.setObjectName("reloadBtn")
self.gridLayout.addWidget(self.reloadBtn, 4, 0, 1, 2)
self.showChartBtn = QtGui.QPushButton(Form)
self.showChartBtn.setCheckable(True)
self.showChartBtn.setObjectName("showChartBtn")
self.gridLayout.addWidget(self.showChartBtn, 4, 2, 1, 2)
self.ctrlList = TreeWidget(Form)
self.ctrlList.setObjectName("ctrlList")
self.ctrlList.headerItem().setText(0, "1")
self.ctrlList.header().setVisible(False)
self.ctrlList.header().setStretchLastSection(False)
self.gridLayout.addWidget(self.ctrlList, 3, 0, 1, 4)
self.fileNameLabel = QtGui.QLabel(Form)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.fileNameLabel.setFont(font)
self.fileNameLabel.setText("")
self.fileNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.fileNameLabel.setObjectName("fileNameLabel")
self.gridLayout.addWidget(self.fileNameLabel, 0, 1, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.loadBtn.setText(QtGui.QApplication.translate("Form", "Load..", None, QtGui.QApplication.UnicodeUTF8))
self.saveBtn.setText(QtGui.QApplication.translate("Form", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.saveAsBtn.setText(QtGui.QApplication.translate("Form", "As..", None, QtGui.QApplication.UnicodeUTF8))
self.reloadBtn.setText(QtGui.QApplication.translate("Form", "Reload Libs", None, QtGui.QApplication.UnicodeUTF8))
self.showChartBtn.setText(QtGui.QApplication.translate("Form", "Flowchart", None, QtGui.QApplication.UnicodeUTF8))
from ..widgets.TreeWidget import TreeWidget
from ..widgets.FeedbackButton import FeedbackButton
| mit | -4,020,232,173,781,648,000 | 47.106061 | 122 | 0.695118 | false | 3.717799 | false | false | false |
BL-Labs/sample_generator_datatools | utils/marcutils.py | 1 | 6861 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pymarc, os, re
from collections import defaultdict
from settings import DATAROOT
YEAR_P = re.compile(r"(1[0-9]{3})")
def find_a_year(args):
for term in args:
d = YEAR_P.search(term)
if d != None:
return d.groups()[0]
return u""
def generate_marcfiles(reverse_order = False):
docfiles = sorted([x for x in os.listdir(DATAROOT) if x.startswith("19C_0")])
if reverse_order:
docfiles.reverse()
for docfile in docfiles:
docfilepath = os.path.join(DATAROOT, docfile)
yield (docfilepath, pymarc.parse_xml_to_array(docfilepath))
def get_language(marcdoc):
oh_eights = filter(lambda x: x.has_key("008"), marcdoc.as_dict()["fields"])
if len(oh_eights) == 1:
return oh_eight[0]['008'][35:38]
elif len(oh_eights) > 1:
raise Exception("More than one 008 field found. Bigger problem likely")
else:
return ""
def get_subfield(field, marccode, subfield):
subfields = filter(lambda x: x.has_key(subfield), field[marccode].get('subfields', [{}]))
for subf in subfields:
yield subf[subfield]
def collate(record):
collated = defaultdict(list)
for field in record.get_fields():
collated[field.tag].append(field)
return collated
def _normalise_name(args):
name, date, relator = args
# If no relator, assume author?
# Spaces to "_"
# eg "SMITH, John", "1948-", "" ---> "author/SMITH,_John__1948-"
if not relator:
relator = u"author"
else:
relator = relator[0].lower()
if not name:
name = u""
else:
name = name[0]
if not date:
date = u""
else:
date = date[0]
return (name, date, relator)
def flatten_name(args):
name, date, relator = _normalise_name(args)
fname = u"{0}/{1}".format(relator, name)
if date:
fname = u"{0}/{1} {2}".format(relator, name, date)
return fname
def flatten_name_for_facet(args):
name, date, relator = _normalise_name(args)
fname = u"{0}/{1}".format(relator, name)
if date:
fname = u"{0}/{1}__{2}".format(relator, name, date)
return re.sub(u" ", u"_", fname)
def get_sysnum(collated_record):
if len(collated_record["001"]) == 1:
return collated_record["001"][0].value()
else:
return ""
def get_lang(collated_record):
if len(collated_record["008"]) == 1:
return collated_record["008"][0].value()[35:38]
else:
return ""
def _gather_names(namefield):
name = namefield.get_subfields("a")
date = namefield.get_subfields("d")
relator = namefield.get_subfields("e")
return (name, date, relator)
def get_raw_names(collated_record):
# personal 100 - $a name $d date $e relator
# Corp 110 - $a name $b subgroup
# alt name 700 - $a name $t title of previous/related work (ADD later maybe?)
names = {'100':[], '110':[]}
for nametype in names:
for namefield in collated_record.get(nametype, []):
names[nametype].append(_gather_names(namefield))
return names
def get_names(collated_record, facet = False):
names = get_raw_names(collated_record)
if facet:
return {'personal': map(flatten_name_for_facet, names['100']),
'corporate': map(flatten_name_for_facet, names['110'])}
else:
return {'personal': map(flatten_name, names['100']),
'corporate': map(flatten_name, names['110'])}
def get_titles(collated_record):
# A title can hide in 245 $a + $b, 240 and 130 on occasion.
# ~99.9% of records had a main title in 245
# and 240 + 130 coverage was below 15% so skipping for now
# Output is still as a list, in case this changes
if collated_record.get('245', u""):
maintitles = [x.value() for x in collated_record['245'] + collated_record['240'] + collated_record['130']]
return maintitles
else:
return u""
def get_pub_detail(collated_record):
# 260 $a Place of Publication/Distribution
# $b Name of Publisher/Distrib
# $c date of Pub
# $e Place of Manuf
# $f manufacturer
# $g Manuf date
# Near 95% coverage in the dataset
if collated_record.get("260", u""):
# Typically all contained in a single field.
pubfield = collated_record['260'][0]
pdtype = u"m"
place = pubfield.get_subfields("e")
date = pubfield.get_subfields("f")
maker = pubfield.get_subfields("g")
if pubfield.get_subfields("a"):
pdtype = u"p"
place = pubfield.get_subfields("a")
date = pubfield.get_subfields("c")
maker = pubfield.get_subfields("b")
def present_value(items):
if len(items[0]) == 1:
return u"{0}/{1}".format(items[1], items[0][0])
return u""
return map(present_value, [(place, pdtype), (maker, pdtype), (date, pdtype)])
return [u"", u"", u""]
def get_phys_desc(collated_record):
# $a - Extent (R)
# $b - Other physical details (NR)
# $c - Dimensions (R)
# $e - Accompanying material (NR)
# $f - Type of unit (R)
# $g - Size of unit (R)
# $3 - Materials specified (NR)
# $6 - Linkage (NR)
# $8 - Field link and sequence number (R)
# Lump it all in there?
def iter_subf(fields):
for x in fields:
for y in x.get_subfields("a", "b", "c", "e", "f", "g", "3", "6"):
yield y
if collated_record.get("300"):
return [y for y in iter_subf(collated_record["300"])]
return []
def get_general_note(collated_record):
if collated_record.get("500"):
return [x.value() for x in collated_record['500']]
return []
def get_domids(collated_record):
if collated_record.get("852"):
sfx = filter(lambda x: x.get_subfields("c") == [u"SFX"], collated_record["852"])
if sfx:
domids = [x.get_subfields("j")[0] for x in sfx if x.get_subfields("j") and x.get_subfields("j")[0].startswith("lsid")]
return domids
return []
def get_shelfmarks(collated_record):
# ignore SFX + lsid shelfmarks, as these are harvested by the get_domids part
marks = []
if collated_record.get("852"):
for sm in collated_record['852']:
if not(sm.get_subfields("c") == [u"SFX"] and sm.get_subfields("j")[0].startswith("lsid")):
marks.append(sm.value())
return marks
def get_solr_doc(collated_record):
names = get_names(collated_record)
pubplace, maker, pubdate = get_pub_detail(collated_record)
domids = get_domids(collated_record)
digital = False
if domids:
digital = True
year = find_a_year([pubdate, maker, pubplace])
doc = {'id': get_sysnum(collated_record),
'title': get_titles(collated_record),
'personal': names['personal'],
'corporate': names['corporate'],
'place': pubplace,
'maker': maker,
'date': pubdate,
'year': year,
'physdesc': get_phys_desc(collated_record),
'general': get_general_note(collated_record),
'domids': domids,
'shelfmarks': get_shelfmarks(collated_record),
'lang': get_lang(collated_record),
'digital': digital}
return doc
| mit | -6,400,332,855,468,944,000 | 30.045249 | 124 | 0.628334 | false | 3.033156 | false | false | false |
jjstwerff/alien_engine | source/rbtree.py | 1 | 3958 | """Simple tree-like structure"""
import copy
class DictIter(object):
"""Iterator through the tree"""
__slots__ = 'data', 'keys', 'index'
def __init__(self, data):
self.data = data
self.keys = sorted(data.keys())
self.index = -1 # ready to iterate on the next() call
def __next__(self):
""" Return the next item in the container
Once we go off the list we stay off even if the list changes
"""
self.index += 1
if self.index >= len(self.keys):
raise StopIteration
return self.data[self.keys[self.index]]
class RBDict(object):
"""Sorted dictionary"""
__slots__ = 'data', 'changed'
def __init__(self, initial=None, changes=False):
self.data = {}
if changes:
self.changed = {}
else:
self.changed = None
if initial:
for key, value in initial.items():
self[key] = value
def remember_changes(self, remember_changes=True):
"""Start or stop remembering changes on this Set"""
self.changed = {} if remember_changes else None
def changes(self):
"""Get the list of changes with old and new value, clear the changes"""
if self.changes is None:
raise AttributeError("No change recoding supported on this Set")
res = [(
self.changed[chkey],
self.data[chkey] if chkey in self.data else None
) for chkey in sorted(self.changed.keys())]
self.changed.clear()
return res
def has_changes(self):
"""Return if there are changes in this rbtree"""
return self.changed is not None and len(self.changed) > 0
def restore(self, key):
"""Try to restore the old changed record"""
if self.changed is not None and key in self.changed:
self.data[key] = self.changed[key]
del self.changed[key]
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if self.changed is not None and key not in self.changed:
if key in self.data:
raise ValueError("Remove an item before storing a changed one")
self.changed[key] = None
self.data[key] = value
def __delitem__(self, key):
if self.changed is not None and key not in self.changed:
if key in self.data:
self.changed[key] = copy.copy(self.data[key])
del self.data[key]
def get(self, key, default=None):
"""Get a key from the dictionary with a default"""
if key in self.data:
return self.data[key]
return default
def __iter__(self):
return DictIter(self.data)
def __len__(self):
return len(self.data)
def keys(self):
"""Return all keys"""
return sorted(self.data.keys())
def values(self):
"""Return all values"""
return [self.data[k] for k in self.keys()]
def items(self):
"""Return all items"""
return [(k, self.data[k]) for k in self.keys()]
def __contains__(self, key):
return key in self.data
def clear(self):
"""delete all entries"""
self.data.clear()
if self.changed is not None:
self.changed.clear()
def copy(self):
"""return shallow copy"""
# there may be a more efficient way of doing this
return RBDict(self)
def update(self, other):
"""Add all items from the supplied mapping to this one.
Will overwrite old entries with new ones."""
for key in other.keys():
self[key] = other[key]
def __repr__(self):
ls = ['{']
for k, v in self.items():
if len(ls) > 1:
ls.append(', ')
ls.append(k)
ls.append('=')
ls.append(str(v))
ls.append('}')
return ''.join(ls)
| gpl-3.0 | 8,465,472,074,384,940,000 | 28.984848 | 79 | 0.551794 | false | 4.114345 | false | false | false |
mach327/chirp_fork | chirp/drivers/ic208.py | 2 | 7770 | # Copyright 2013 Dan Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from chirp.drivers import icf
from chirp import chirp_common, errors, directory, bitwise
MEM_FORMAT = """
struct memory {
u24 freq;
u16 offset;
u8 power:2,
rtone:6;
u8 duplex:2,
ctone:6;
u8 unknown1:1,
dtcs:7;
u8 tuning_step:4,
unknown2:4;
u8 unknown3;
u8 alt_mult:1,
unknown4:1,
is_fm:1,
is_wide:1,
unknown5:2,
tmode:2;
u16 dtcs_polarity:2,
usealpha:1,
empty:1,
name1:6,
name2:6;
u24 name3:6,
name4:6,
name5:6,
name6:6;
};
struct memory memory[510];
struct {
u8 unknown1:1,
empty:1,
pskip:1,
skip:1,
bank:4;
} flags[512];
struct memory call[2];
"""
MODES = ["AM", "FM", "NFM", "NAM"]
TMODES = ["", "Tone", "TSQL", "DTCS"]
DUPLEX = ["", "", "-", "+"]
DTCS_POL = ["NN", "NR", "RN", "RR"]
STEPS = [5.0, 10.0, 12.5, 15, 20.0, 25.0, 30.0, 50.0, 100.0, 200.0]
POWER = [chirp_common.PowerLevel("High", watts=50),
chirp_common.PowerLevel("Low", watts=5),
chirp_common.PowerLevel("Mid", watts=15),
]
IC208_SPECIAL = []
for i in range(1, 6):
IC208_SPECIAL.append("%iA" % i)
IC208_SPECIAL.append("%iB" % i)
CHARSET = dict(zip([0x00, 0x08, 0x09, 0x0a, 0x0b, 0x0d, 0x0f], " ()*+-/") +
zip(range(0x10, 0x1a), "0123456789") +
[(0x1c, '|'), (0x1d, '=')] +
zip(range(0x21, 0x3b), "ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
CHARSET_REV = dict(zip(CHARSET.values(), CHARSET.keys()))
def get_name(_mem):
"""Decode the name from @_mem"""
def _get_char(val):
try:
return CHARSET[int(val)]
except KeyError:
return "*"
name_bytes = [_mem.name1, _mem.name2, _mem.name3,
_mem.name4, _mem.name5, _mem.name6]
name = ""
for val in name_bytes:
name += _get_char(val)
return name.rstrip()
def set_name(_mem, name):
"""Encode @name in @_mem"""
def _get_index(char):
try:
return CHARSET_REV[char]
except KeyError:
return CHARSET_REV["*"]
name = name.ljust(6)[:6]
_mem.usealpha = bool(name.strip())
# The element override calling convention makes this harder to automate.
# It's just six, so do it manually
_mem.name1 = _get_index(name[0])
_mem.name2 = _get_index(name[1])
_mem.name3 = _get_index(name[2])
_mem.name4 = _get_index(name[3])
_mem.name5 = _get_index(name[4])
_mem.name6 = _get_index(name[5])
@directory.register
class IC208Radio(icf.IcomCloneModeRadio):
"""Icom IC800"""
VENDOR = "Icom"
MODEL = "IC-208H"
_model = "\x26\x32\x00\x01"
_memsize = 0x2600
_endframe = "Icom Inc\x2e30"
_can_hispeed = True
_memories = []
_ranges = [(0x0000, 0x2600, 32)]
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.memory_bounds = (1, 500)
rf.has_bank = True
rf.valid_tuning_steps = list(STEPS)
rf.valid_tmodes = list(TMODES)
rf.valid_modes = list(MODES)
rf.valid_duplexes = list(DUPLEX)
rf.valid_power_levels = list(POWER)
rf.valid_skips = ["", "S", "P"]
rf.valid_bands = [(118000000, 174000000),
(230000000, 550000000),
(810000000, 999995000)]
rf.valid_special_chans = ["C1", "C2"] + sorted(IC208_SPECIAL)
rf.valid_characters = "".join(CHARSET.values())
return rf
def get_raw_memory(self, number):
_mem, _flg, index = self._get_memory(number)
return repr(_mem) + repr(_flg)
def process_mmap(self):
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
def _get_bank(self, loc):
_flg = self._memobj.flags[loc-1]
if _flg.bank >= 0x0A:
return None
else:
return _flg.bank
def _set_bank(self, loc, bank):
_flg = self._memobj.flags[loc-1]
if bank is None:
_flg.bank = 0x0A
else:
_flg.bank = bank
def _get_memory(self, number):
if isinstance(number, str):
if "A" in number or "B" in number:
index = 501 + IC208_SPECIAL.index(number)
_mem = self._memobj.memory[index - 1]
_flg = self._memobj.flags[index - 1]
else:
index = int(number[1]) - 1
_mem = self._memobj.call[index]
_flg = self._memobj.flags[510 + index]
index = index + -10
elif number <= 0:
index = 10 - abs(number)
_mem = self._memobj.call[index]
_flg = self._memobj.flags[index + 510]
else:
index = number
_mem = self._memobj.memory[number - 1]
_flg = self._memobj.flags[number - 1]
return _mem, _flg, index
def get_memory(self, number):
_mem, _flg, index = self._get_memory(number)
mem = chirp_common.Memory()
mem.number = index
if isinstance(number, str):
mem.extd_number = number
else:
mem.skip = _flg.pskip and "P" or _flg.skip and "S" or ""
if _flg.empty:
mem.empty = True
return mem
mult = _mem.alt_mult and 6250 or 5000
mem.freq = int(_mem.freq) * mult
mem.offset = int(_mem.offset) * 5000
mem.rtone = chirp_common.TONES[_mem.rtone]
mem.ctone = chirp_common.TONES[_mem.ctone]
mem.dtcs = chirp_common.DTCS_CODES[_mem.dtcs]
mem.dtcs_polarity = DTCS_POL[_mem.dtcs_polarity]
mem.duplex = DUPLEX[_mem.duplex]
mem.tmode = TMODES[_mem.tmode]
mem.mode = ((not _mem.is_wide and "N" or "") +
(_mem.is_fm and "FM" or "AM"))
mem.tuning_step = STEPS[_mem.tuning_step]
mem.name = get_name(_mem)
mem.power = POWER[_mem.power]
return mem
def set_memory(self, mem):
_mem, _flg, index = self._get_memory(mem.number)
if mem.empty:
_flg.empty = True
self._set_bank(mem.number, None)
return
if _flg.empty:
_mem.set_raw("\x00" * 16)
_flg.empty = False
_mem.alt_mult = chirp_common.is_fractional_step(mem.freq)
_mem.freq = mem.freq / (_mem.alt_mult and 6250 or 5000)
_mem.offset = mem.offset / 5000
_mem.rtone = chirp_common.TONES.index(mem.rtone)
_mem.ctone = chirp_common.TONES.index(mem.ctone)
_mem.dtcs = chirp_common.DTCS_CODES.index(mem.dtcs)
_mem.dtcs_polarity = DTCS_POL.index(mem.dtcs_polarity)
_mem.duplex = DUPLEX.index(mem.duplex)
_mem.tmode = TMODES.index(mem.tmode)
_mem.is_fm = "FM" in mem.mode
_mem.is_wide = mem.mode[0] != "N"
_mem.tuning_step = STEPS.index(mem.tuning_step)
set_name(_mem, mem.name)
try:
_mem.power = POWER.index(mem.power)
except Exception:
pass
if not isinstance(mem.number, str):
_flg.skip = mem.skip == "S"
_flg.pskip = mem.skip == "P"
| gpl-3.0 | 4,489,974,256,005,181,000 | 28.770115 | 76 | 0.553411 | false | 3.038717 | false | false | false |
sagemathinc/smc | src/dev/project/start_postgres.py | 5 | 2083 | #!/usr/bin/env python
"""
This is a script for starting postgres for development purposes
in an SMC project.
"""
import os, sys, time, util
path = os.path.split(os.path.realpath(__file__))[0]; os.chdir(path); sys.path.insert(0, path)
PG_DATA = os.path.join(path, "postgres_data")
if not os.path.exists(PG_DATA):
util.cmd("pg_ctl init -D '%s'"%PG_DATA)
# Lock down authentication so it is ONLY via unix socket
open(os.path.join(PG_DATA,'pg_hba.conf'), 'w').write(
"""
# This is safe since we only enable a socket protected by filesystem permissions:
local all all trust
# You can uncomment this and comment out the above if you want to test password auth.
#local all all md5
""")
# Make it so the socket is in this subdirectory, so that it is
# protected by UNIX permissions. This approach avoids any need
# for accounts/passwords for development and the Docker image.
conf = os.path.join(PG_DATA, 'postgresql.conf')
s = open(conf).read()
s += '\n'
# Move the default directory where the socket is from /tmp to right here.
socket_dir = os.path.join(PG_DATA, 'socket')
s += "unix_socket_directories = '%s'\nlisten_addresses=''\n"%socket_dir
os.makedirs(socket_dir)
util.cmd("chmod og-rwx '%s'"%PG_DATA) # just in case -- be paranoid...
open(conf,'w').write(s)
# Create script so that clients will know where socket dir is.
open("postgres-env", 'w').write("""#!/bin/sh
export PGUSER='smc'
export PGHOST='%s'
"""%socket_dir)
util.cmd('chmod +x postgres-env')
# Start database running in background as daemon
util.cmd("postgres -D '%s' >%s/postgres.log 2>&1 &"%(PG_DATA, PG_DATA))
time.sleep(5)
# Create the smc user with no password (not needed since we are using local file permissions)
util.cmd("unset PGUSER; unset PGHOST; createuser -h '%s' -sE smc"%socket_dir)
# Stop database daemon
util.cmd("kill %s"%(open(os.path.join(PG_DATA, 'postmaster.pid')).read().split()[0]))
# Let it die and remove lock file.
time.sleep(3)
util.cmd("postgres -D '%s'"%PG_DATA)
| agpl-3.0 | -4,752,196,203,030,599,000 | 32.596774 | 97 | 0.666827 | false | 3.259781 | false | false | false |
codeforamerica/mdc-feedback | feedback/public/views.py | 1 | 2650 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
import ujson
import urllib
import datetime
today = datetime.date.today()
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from flask import (
Blueprint, request, render_template, flash,
current_app, abort
)
from flask.ext.login import current_user, login_user, logout_user
from feedback.extensions import login_manager
from feedback.user.models import User
blueprint = Blueprint('public', __name__, static_folder="../static")
@login_manager.user_loader
def load_user(id):
return User.query.filter_by(email=id).first()
@blueprint.route('/login', methods=['GET'])
def login():
return render_template("user/login.html", current_user=current_user, date=today.strftime('%B %d, %Y'),)
@blueprint.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
if request.args.get('persona', None):
return 'OK'
else:
flash('You are logged out.', 'info')
return render_template('user/logout.html', date=today.strftime('%B %d, %Y'),)
@blueprint.route('/auth', methods=['POST'])
def auth():
'''
Endpoint from AJAX request for authentication from persona
'''
data = urllib.urlencode({
'assertion': request.form.get('assertion'),
'audience': current_app.config.get('BROWSERID_URL')
})
req = urllib2.Request('https://verifier.login.persona.org/verify', data)
response = ujson.loads(urllib2.urlopen(req).read())
current_app.logger.debug(
'LOGIN: status from persona: {}'.format(response))
if response.get('status') != 'okay':
current_app.logger.debug(
'REJECTEDUSER: User login rejected from persona. Messages: {}'.format(response))
abort(403)
next_url = request.args.get('next', None)
email = response.get('email')
user = User.query.filter(User.email == email).first()
domain = email.split('@')[1] if len(email.split('@')) > 1 else None
if user:
login_user(user)
flash('Logged in successfully!', 'alert-success')
current_app.logger.debug(
'LOGIN: User {} logged in successfully'.format(user.email))
return next_url if next_url else '/'
elif domain in current_app.config.get('CITY_DOMAINS'):
user = User.create(email=email)
login_user(user)
current_app.logger.debug(
'NEWUSER: New User {} successfully created'.format(user.email))
return '/'
else:
current_app.logger.debug(
'NOTINDB: User {} not in DB -- aborting!'.format(email))
abort(403)
| mit | 3,382,055,097,900,435,000 | 27.804348 | 107 | 0.641132 | false | 3.675451 | false | false | false |
wooey/Wooey | wooey/conf/project_template/settings/wooey_settings.py | 1 | 1333 | from .django_settings import *
from wooey.version import DJANGO_VERSION, DJ110
from django.utils.translation import ugettext_lazy as _
INSTALLED_APPS += (
# 'corsheaders',
'wooey',
)
if DJANGO_VERSION < DJ110:
MIDDLEWARE_CLASSES = list(MIDDLEWARE_CLASSES)
MIDDLEWARE_CLASSES.append('{{ project_name }}.middleware.ProcessExceptionMiddleware')
MIDDLEWARE_OBJ = MIDDLEWARE_CLASSES
else:
# Using Django 1.10 +
MIDDLEWARE = list(MIDDLEWARE)
MIDDLEWARE.append('{{ project_name }}.middleware.ProcessExceptionMiddleware')
MIDDLEWARE_OBJ = MIDDLEWARE
LANGUAGES = [
('de', _('German')),
('en', _('English')),
('fr', _('French')),
('ja', _('Japanese')),
('nl', _('Dutch')),
('zh-hans', _('Simplified Chinese')),
('ko', _('Korean')),
('es', _('Spanish'))
]
NEW_MIDDLEWARE = []
for i in MIDDLEWARE_OBJ:
NEW_MIDDLEWARE.append(i)
if i == 'django.contrib.sessions.middleware.SessionMiddleware':
NEW_MIDDLEWARE.append('django.middleware.locale.LocaleMiddleware')
NEW_MIDDLEWARE.append('{{ project_name }}.middleware.ProcessExceptionMiddleware')
if DJANGO_VERSION < DJ110:
MIDDLEWARE_CLASSES = NEW_MIDDLEWARE
else:
MIDDLEWARE = NEW_MIDDLEWARE
PROJECT_NAME = "{{ project_name }}"
WOOEY_CELERY_APP_NAME = 'wooey.celery'
WOOEY_CELERY_TASKS = 'wooey.tasks'
| bsd-3-clause | -3,964,541,111,412,052,500 | 28.622222 | 89 | 0.672168 | false | 3.545213 | false | false | false |
WMD-group/MacroDensity | examples/ActiveSpace.py | 1 | 3926 | #! /usr/bin/env python
import macrodensity as md
import math
import numpy as np
import matplotlib.pyplot as plt
import csv
from itertools import izip
#------------------------------------------------------------------
# Get the potential
# This section should not be altered
#------------------------------------------------------------------
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density('LOCPOT.slab')
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
cutoff_varience = 1E-4
hanksConstant = 4.89E-7
## Get the gradiens (Field), if required.
## Comment out if not required, due to compuational expense.
#grad_x,grad_y,grad_z = np.gradient(grid_pot[:,:,:],resolution_x,resolution_y,resolution_z)
#------------------------------------------------------------------
##------------------------------------------------------------------
## Get the equation for the plane
## This is the section for plotting on a user defined plane;
## uncomment commands if this is the option that you want.
##------------------------------------------------------------------
## Input section (define the plane with 3 points)
#a_point = [0, 0, 0]
#b_point = [1, 0, 1]
#c_point = [0, 1, 0]
## Convert the fractional points to grid points on the density surface
#a = pot.numbers_2_grid(a_point,NGX,NGY,NGZ)
#b = pot.numbers_2_grid(b_point,NGX,NGY,NGZ)
#c = pot.numbers_2_grid(c_point,NGX,NGY,NGZ)
#plane_coeff = pot.points_2_plane(a,b,c)
## Get the gradients
#XY = np.multiply(grad_x,grad_y)
#grad_mag = np.multiply(XY,grad_z)
## Create the plane
#xx,yy,grd = pot.create_plotting_mesh(NGX,NGY,NGZ,plane_coeff,grad_x)
## Plot the surface
#plt.contourf(xx,yy,grd,V)
#plt.show()
##------------------------------------------------------------------
##------------------------------------------------------------------
##------------------------------------------------------------------
## Plotting a planar average (Field/potential) throughout the material
##------------------------------------------------------------------
## FIELDS
#planar = pot.planar_average(grad_x,NGX,NGY,NGZ)
## POTENTIAL
#planar = pot.planar_average(grid_pot,NGX,NGY,NGZ)
## MACROSCOPIC AVERAGE
#macro = pot.macroscopic_average(planar,4.80,resolution_z)
#plt.plot(planar)
#plt.plot(macro)
#plt.savefig('Planar.eps')
#plt.show()
##------------------------------------------------------------------
##------------------------------------------------------------------
##------------------------------------------------------------------
# Getting the average potential in a single cube of arbitrary size
##------------------------------------------------------------------
## cube defines the size of the cube in units of mesh points (NGX/Y/Z)
cube = [2,2,2]
## origin defines the bottom left point of the cube the "0,0,0" point in fractional coordinates
origin = [0,0,0]
## Uncomment the lines below to do the business
vacuum = []
non_vacuum = []
beers = hanksConstant*NGX*NGY*NGZ
pinners = beers*1.5
print "You have time for %.1f oz of beer "%beers
print "... or %.1f oz of Pinner (TM)."%pinners
for i in range(0,NGX,cube[0]):
print float(i)/NGX
for j in range(0,NGY,cube[1]):
for k in range(0,NGZ,cube[2]):
origin = [float(i)/NGX,float(j)/NGY,float(k)/NGZ]
volume_average, cube_var = md.voulme_average(origin, cube, grid_pot, NGX, NGY, NGZ)
if cube_var <= cutoff_varience:
vacuum.append(origin)
else:
non_vacuum.append(origin)
print "Number of vacuum cubes: ", len(vacuum)
print "Number of non-vacuum cubes: ", len(non_vacuum)
print "Percentage of vacuum cubes: ",(float(len(vacuum))/(float(len(vacuum))+float(len(non_vacuum)))*100.)
print "Percentage of non-vacuum cubes: ",(float(len(non_vacuum))/(float(len(vacuum))+float(len(non_vacuum)))*100.)
| mit | -3,934,570,029,597,105,000 | 37.490196 | 114 | 0.546612 | false | 3.271667 | false | false | false |
mschmidt87/python-neo | neo/core/analogsignal.py | 5 | 20804 | # -*- coding: utf-8 -*-
'''
This module implements objects relating to analog signals,
:class:`BaseAnalogSignal` and its child :class:`AnalogSignal`.
:class:`AnalogSignalArray` is derived from :class:`BaseAnalogSignal` but is
defined in :module:`neo.core.analogsignalarray`.
:class:`IrregularlySampledSignal` is not derived from :class:`BaseAnalogSignal`
and is defined in :module:`neo.core.irregularlysampledsignal`.
:class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo
def _get_sampling_rate(sampling_rate, sampling_period):
'''
Gets the sampling_rate from either the sampling_period or the
sampling_rate, or makes sure they match if both are specified
'''
if sampling_period is None:
if sampling_rate is None:
raise ValueError("You must provide either the sampling rate or " +
"sampling period")
elif sampling_rate is None:
sampling_rate = 1.0 / sampling_period
elif sampling_period != 1.0 / sampling_rate:
raise ValueError('The sampling_rate has to be 1/sampling_period')
if not hasattr(sampling_rate, 'units'):
raise TypeError("Sampling rate/sampling period must have units")
return sampling_rate
def _new_BaseAnalogSignal(cls, signal, units=None, dtype=None, copy=True,
t_start=0*pq.s, sampling_rate=None,
sampling_period=None, name=None, file_origin=None,
description=None, channel_index=None,
annotations=None):
'''
A function to map BaseAnalogSignal.__new__ to function that
does not do the unit checking. This is needed for pickle to work.
'''
return cls(signal=signal, units=units, dtype=dtype, copy=copy,
t_start=t_start, sampling_rate=sampling_rate,
sampling_period=sampling_period, name=name,
file_origin=file_origin, description=description,
channel_index=channel_index,
**annotations)
class BaseAnalogSignal(BaseNeo, pq.Quantity):
'''
Base class for AnalogSignal and AnalogSignalArray
'''
_single_parent_objects = ('Segment', 'RecordingChannel')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 1),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = ((('channel_index', int),) +
BaseNeo._recommended_attrs)
def __new__(cls, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Constructs new :class:`BaseAnalogSignal` from data.
This is called whenever a new class:`BaseAnalogSignal` is created from
the constructor, but not when slicing.
__array_finalize__ is called on the new object.
'''
if units is None:
if hasattr(signal, "units"):
units = signal.units
else:
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# could improve this test, what if units is a string?
if units != signal.units:
signal = signal.rescale(units)
obj = pq.Quantity.__new__(cls, signal, units=units, dtype=dtype,
copy=copy)
if t_start is None:
raise ValueError('t_start cannot be None')
obj._t_start = t_start
obj._sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.channel_index = channel_index
obj.segment = None
obj.recordingchannel = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Initializes a newly constructed :class:`BaseAnalogSignal` instance.
'''
# This method is only called when constructing a new BaseAnalogSignal,
# not when slicing or viewing. We use the same call signature
# as __new__ for documentation purposes. Anything not in the call
# signature is stored in annotations.
# Calls parent __init__, which grabs universally recommended
# attributes and sets up self.annotations
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
def __reduce__(self):
'''
Map the __new__ function onto _new_BaseAnalogSignal, so that pickle
works
'''
return _new_BaseAnalogSignal, (self.__class__,
np.array(self),
self.units,
self.dtype,
True,
self.t_start,
self.sampling_rate,
self.sampling_period,
self.name,
self.file_origin,
self.description,
self.channel_index,
self.annotations)
def __array_finalize__(self, obj):
'''
This is called every time a new :class:`BaseAnalogSignal` is created.
It is the appropriate place to set default values for attributes
for :class:`BaseAnalogSignal` constructed by slicing or viewing.
User-specified values are only relevant for construction from
constructor, and these are set in __new__. Then they are just
copied over here.
'''
super(BaseAnalogSignal, self).__array_finalize__(obj)
self._t_start = getattr(obj, '_t_start', 0 * pq.s)
self._sampling_rate = getattr(obj, '_sampling_rate', None)
# The additional arguments
self.annotations = getattr(obj, 'annotations', None)
# Globally recommended attributes
self.name = getattr(obj, 'name', None)
self.file_origin = getattr(obj, 'file_origin', None)
self.description = getattr(obj, 'description', None)
self.channel_index = getattr(obj, 'channel_index', None)
def __repr__(self):
'''
Returns a string representing the :class:`BaseAnalogSignal`.
'''
return ('<%s(%s, [%s, %s], sampling rate: %s)>' %
(self.__class__.__name__,
super(BaseAnalogSignal, self).__repr__(), self.t_start,
self.t_stop, self.sampling_rate))
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
obj = super(BaseAnalogSignal, self).__getslice__(i, j)
obj.t_start = self.t_start + i * self.sampling_period
return obj
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(BaseAnalogSignal, self).__getitem__(i)
if isinstance(obj, BaseAnalogSignal):
# update t_start and sampling_rate
slice_start = None
slice_step = None
if isinstance(i, slice):
slice_start = i.start
slice_step = i.step
elif isinstance(i, tuple) and len(i) == 2:
slice_start = i[0].start
slice_step = i[0].step
if slice_start:
obj.t_start = self.t_start + slice_start * self.sampling_period
if slice_step:
obj.sampling_period *= slice_step
return obj
# sampling_rate attribute is handled as a property so type checking can
# be done
@property
def sampling_rate(self):
'''
Number of samples per unit time.
(1/:attr:`sampling_period`)
'''
return self._sampling_rate
@sampling_rate.setter
def sampling_rate(self, rate):
'''
Setter for :attr:`sampling_rate`
'''
if rate is None:
raise ValueError('sampling_rate cannot be None')
elif not hasattr(rate, 'units'):
raise ValueError('sampling_rate must have units')
self._sampling_rate = rate
# sampling_period attribute is handled as a property on underlying rate
@property
def sampling_period(self):
'''
Interval between two samples.
(1/:attr:`sampling_rate`)
'''
return 1. / self.sampling_rate
@sampling_period.setter
def sampling_period(self, period):
'''
Setter for :attr:`sampling_period`
'''
if period is None:
raise ValueError('sampling_period cannot be None')
elif not hasattr(period, 'units'):
raise ValueError('sampling_period must have units')
self.sampling_rate = 1. / period
# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
'''
Time when signal begins.
'''
return self._t_start
@t_start.setter
def t_start(self, start):
'''
Setter for :attr:`t_start`
'''
if start is None:
raise ValueError('t_start cannot be None')
self._t_start = start
@property
def duration(self):
'''
Signal duration
(:attr:`size` * :attr:`sampling_period`)
'''
return self.shape[0] / self.sampling_rate
@property
def t_stop(self):
'''
Time when signal ends.
(:attr:`t_start` + :attr:`duration`)
'''
return self.t_start + self.duration
@property
def times(self):
'''
The time points of each sample of the signal
(:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`)
'''
return self.t_start + np.arange(self.shape[0]) / self.sampling_rate
def rescale(self, units):
'''
Return a copy of the AnalogSignal(Array) converted to the specified
units
'''
to_dims = pq.quantity.validate_dimensionality(units)
if self.dimensionality == to_dims:
to_u = self.units
signal = np.array(self)
else:
to_u = pq.Quantity(1.0, to_dims)
from_u = pq.Quantity(1.0, self.dimensionality)
try:
cf = pq.quantity.get_conversion_factor(from_u, to_u)
except AssertionError:
raise ValueError('Unable to convert between units of "%s" \
and "%s"' % (from_u._dimensionality,
to_u._dimensionality))
signal = cf * self.magnitude
new = self.__class__(signal=signal, units=to_u,
sampling_rate=self.sampling_rate)
new._copy_data_complement(self)
new.annotations.update(self.annotations)
return new
def duplicate_with_new_array(self, signal):
'''
Create a new :class:`BaseAnalogSignal` with the same metadata
but different data
'''
#signal is the new signal
new = self.__class__(signal=signal, units=self.units,
sampling_rate=self.sampling_rate)
new._copy_data_complement(self)
new.annotations.update(self.annotations)
return new
def __eq__(self, other):
'''
Equality test (==)
'''
if (self.t_start != other.t_start or
self.sampling_rate != other.sampling_rate):
return False
return super(BaseAnalogSignal, self).__eq__(other)
def __ne__(self, other):
'''
Non-equality test (!=)
'''
return not self.__eq__(other)
def _check_consistency(self, other):
'''
Check if the attributes of another :class:`BaseAnalogSignal`
are compatible with this one.
'''
if isinstance(other, BaseAnalogSignal):
for attr in "t_start", "sampling_rate":
if getattr(self, attr) != getattr(other, attr):
raise ValueError("Inconsistent values of %s" % attr)
# how to handle name and annotations?
def _copy_data_complement(self, other):
'''
Copy the metadata from another :class:`BaseAnalogSignal`.
'''
for attr in ("t_start", "sampling_rate", "name", "file_origin",
"description", "channel_index", "annotations"):
setattr(self, attr, getattr(other, attr, None))
def _apply_operator(self, other, op, *args):
'''
Handle copying metadata to the new :class:`BaseAnalogSignal`
after a mathematical operation.
'''
self._check_consistency(other)
f = getattr(super(BaseAnalogSignal, self), op)
new_signal = f(other, *args)
new_signal._copy_data_complement(self)
return new_signal
def __add__(self, other, *args):
'''
Addition (+)
'''
return self._apply_operator(other, "__add__", *args)
def __sub__(self, other, *args):
'''
Subtraction (-)
'''
return self._apply_operator(other, "__sub__", *args)
def __mul__(self, other, *args):
'''
Multiplication (*)
'''
return self._apply_operator(other, "__mul__", *args)
def __truediv__(self, other, *args):
'''
Float division (/)
'''
return self._apply_operator(other, "__truediv__", *args)
def __div__(self, other, *args):
'''
Integer division (//)
'''
return self._apply_operator(other, "__div__", *args)
__radd__ = __add__
__rmul__ = __sub__
def __rsub__(self, other, *args):
'''
Backwards subtraction (other-self)
'''
return self.__mul__(-1, *args) + other
def _repr_pretty_(self, pp, cycle):
'''
Handle pretty-printing the :class:`BaseAnalogSignal`.
'''
pp.text(" ".join([self.__class__.__name__,
"in",
str(self.units),
"with",
"x".join(map(str, self.shape)),
str(self.dtype),
"values",
]))
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
if hasattr(self, "channel_index"):
_pp("channel index: {0}".format(self.channel_index))
for line in ["sampling rate: {0}".format(self.sampling_rate),
"time: {0} to {1}".format(self.t_start, self.t_stop)
]:
_pp(line)
class AnalogSignal(BaseAnalogSignal):
'''
A continuous analog signal.
A representation of a continuous, analog signal acquired at time
:attr:`t_start` at a certain sampling rate.
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignal
>>> from quantities import kHz, ms, nA, s, uV
>>> import numpy as np
>>>
>>> sig0 = AnalogSignal([1, 2, 3], sampling_rate=0.42*kHz,
... units='mV')
>>> sig1 = AnalogSignal([4, 5, 6]*nA, sampling_period=42*ms)
>>> sig2 = AnalogSignal(np.array([1.0, 2.0, 3.0]), t_start=42*ms,
... sampling_rate=0.42*kHz, units=uV)
>>> sig3 = AnalogSignal([1], units='V', day='Monday',
... sampling_period=1*s)
>>>
>>> sig3
<AnalogSignal(array([1]) * V, [0.0 s, 1.0 s], sampling rate: 1.0 1/s)>
>>> sig3.annotations['day']
'Monday'
>>> sig3[0]
array(1) * V
>>> sig3[::2]
<AnalogSignal(array([1]) * V, [0.0 s, 2.0 s], sampling rate: 0.5 1/s)>
*Required attributes/properties*:
:signal: (quantity array 1D, numpy array 1D, or list) The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:sampling_rate: *or* :sampling_period: (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:t_start: (quantity scalar) Time when signal begins.
Default: 0.0 seconds
:channel_index: (int) You can use this to order :class:`AnalogSignal`
objects in an way you want. :class:`AnalogSignalArray` and
:class:`Unit` objects can be given indexes as well so related
objects can be linked together.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`sampling_rate`)
:duration: (quantity scalar) Signal duration, read-only.
(:attr:`size` * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`)/:attr:`sampling_rate`)
*Slicing*:
:class:`AnalogSignal` objects can be sliced. When this occurs, a new
:class:`AnalogSignal` (actually a view) is returned, with the same
metadata, except that :attr:`sampling_period` is changed if
the step size is greater than 1, and :attr:`t_start` is changed if
the start index is greater than 0. Getting a single item
returns a :class:`~quantity.Quantity` scalar.
*Operations available on this object*:
== != + * /
'''
def __new__(cls, signal, units=None, dtype=None, copy=True,
t_start=0*pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Constructs new :class:`AnalogSignal` from data.
This is called whenever a new class:`AnalogSignal` is created from
the constructor, but not when slicing.
'''
obj = BaseAnalogSignal.__new__(cls, signal, units, dtype, copy,
t_start, sampling_rate, sampling_period,
name, file_origin, description,
channel_index, **annotations)
return obj
def merge(self, other):
'''
Merging is not supported in :class:`AnalogSignal`.
'''
raise NotImplementedError('Cannot merge AnalogSignal objects')
| bsd-3-clause | 3,470,357,480,423,491,000 | 36.017794 | 79 | 0.55273 | false | 4.256137 | false | false | false |
majora2007/plexpy | lib/pygazelle/torrent_group.py | 27 | 7252 | from .torrent import Torrent
class InvalidTorrentGroupException(Exception):
pass
class TorrentGroup(object):
"""
Represents a Torrent Group (usually an album). Note that TorrentGroup.torrents may not be comprehensive if you
haven't called TorrentGroup.update_group_data()...it may have only been populated with filtered search results.
Check TorrentGroup.has_complete_torrent_list (boolean) to be sure.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.name = None
self.wiki_body = None
self.wiki_image = None
self.year = None
self.record_label = None
self.catalogue_number = None
self.tags = []
self.release_type = None
self.vanity_house = None
self.has_bookmarked = None
self.category = None
self.time = None
self.music_info = None
self.torrents = []
self.has_complete_torrent_list = False
self.parent_api.cached_torrent_groups[self.id] = self
def update_group_data(self):
response = self.parent_api.request(action='torrentgroup', id=self.id)
self.set_group_data(response)
def set_group_data(self, torrent_group_json_response):
"""
Takes parsed JSON response from 'torrentgroup' action on api, and updates relevant information.
To avoid problems, only pass in data from an API call that used this torrentgroup's ID as an argument.
"""
if self.id != torrent_group_json_response['group']['id']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, torrent_group_json_response['group']['groupId']) )
self.name = torrent_group_json_response['group']['name']
self.year = torrent_group_json_response['group']['year']
self.wiki_body = torrent_group_json_response['group']['wikiBody']
self.wiki_image = torrent_group_json_response['group']['wikiImage']
self.record_label = torrent_group_json_response['group']['recordLabel']
self.catalogue_number = torrent_group_json_response['group']['catalogueNumber']
self.release_type = torrent_group_json_response['group']['releaseType']
self.category = self.parent_api.get_category(torrent_group_json_response['group']['categoryId'],
torrent_group_json_response['group']['categoryName'])
self.time = torrent_group_json_response['group']['time']
self.vanity_house = torrent_group_json_response['group']['vanityHouse']
self.music_info = torrent_group_json_response['group']['musicInfo']
self.music_info['artists'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['artists'] ]
self.music_info['with'] = [ self.parent_api.get_artist(artist['id'], artist['name'])
for artist in self.music_info['with'] ]
if 'torrents' in torrent_group_json_response:
self.torrents = []
for torrent_dict in torrent_group_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_group_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
elif 'torrent' in torrent_group_json_response:
torrent = self.parent_api.get_torrent(torrent_group_json_response['torrent']['id'])
self.torrents.append(torrent)
def set_artist_group_data(self, artist_group_json_response):
"""
Takes torrentgroup section from parsed JSON response from 'artist' action on api, and updates relevant information.
"""
if self.id != artist_group_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'artist' API call with a different id." +
" Should be %s, got %s" % (self.id, artist_group_json_response['groupId']) )
self.name = artist_group_json_response['groupName']
self.year = artist_group_json_response['groupYear']
self.record_label = artist_group_json_response['groupRecordLabel']
self.catalogue_number = artist_group_json_response['groupCatalogueNumber']
self.tags = []
for tag_name in artist_group_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
self.release_type = artist_group_json_response['releaseType']
self.has_bookmarked = artist_group_json_response['hasBookmarked']
self.torrents = []
for torrent_dict in artist_group_json_response['torrent']:
torrent = self.parent_api.get_torrent(torrent_dict['id'])
torrent.set_torrent_artist_data(torrent_dict)
self.torrents.append(torrent)
self.has_complete_torrent_list = True
def set_torrent_search_data(self, search_json_response):
if self.id != search_json_response['groupId']:
raise InvalidTorrentGroupException("Tried to update a TorrentGroup's information from an 'browse'/search API call with a different id." +
" Should be %s, got %s" % (self.id, search_json_response['groupId']) )
self.name = search_json_response['groupName']
# purposefully ignoring search_json_response['artist']...the other data updates don't include it, would just get confusing
self.tags = []
for tag_name in search_json_response['tags']:
tag = self.parent_api.get_tag(tag_name)
self.tags.append(tag)
# some of the below keys aren't in things like comics...should probably watch out for this elsewhere
if 'bookmarked' in search_json_response.keys():
self.has_bookmarked = search_json_response['bookmarked']
if 'vanityHouse' in search_json_response.keys():
self.vanity_house = search_json_response['vanityHouse']
if 'groupYear' in search_json_response.keys():
self.year = search_json_response['groupYear']
if 'releaseType' in search_json_response.keys():
self.release_type = search_json_response['releaseType']
self.time = search_json_response['groupTime']
if 'torrentId' in search_json_response.keys():
search_json_response['torrents'] = [{'torrentId': search_json_response['torrentId']}]
new_torrents = []
for torrent_dict in search_json_response['torrents']:
torrent_dict['groupId'] = self.id
torrent = self.parent_api.get_torrent(torrent_dict['torrentId'])
new_torrents.append(torrent)
# torrent information gets populated in API search call, no need to duplicate that here
self.torrents = self.torrents + new_torrents
def __repr__(self):
return "TorrentGroup: %s - ID: %s" % (self.name, self.id)
| gpl-3.0 | -8,363,289,931,290,770,000 | 51.172662 | 149 | 0.628378 | false | 4.026652 | false | false | false |
Snergster/virl-salt | openstack/neutron/files/kilo/plugin.py | 1 | 72636 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from eventlet import greenthread
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as os_db_exception
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import netmtu_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.db import vlantransparent_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as provider
from neutron.extensions import securitygroup as ext_sg
from neutron.extensions import vlantransparent
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
MAX_BIND_TRIES = 10
# REVISIT(rkukura): Move this and other network_type constants to
# providernet.py?
TYPE_MULTI_SEGMENT = 'multi-segment'
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
vlantransparent_db.Vlantransparent_db_mixin,
extradhcpopt_db.ExtraDhcpOptMixin,
netmtu_db.Netmtu_db_mixin):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt", "subnet_allocation",
"net-mtu", "vlan-transparent"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
aliases += self.extension_manager.extension_aliases()
sg_rpc.disable_security_group_extension_by_config(aliases)
vlantransparent.disable_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.extension_manager = managers.ExtensionManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
self.type_manager.initialize()
self.extension_manager.initialize()
self.mechanism_manager.initialize()
self._setup_rpc()
# REVISIT(rkukura): Use stevedore for these?
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.start_periodic_dhcp_agent_status_check()
LOG.info(_LI("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
def start_rpc_listeners(self):
self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dvr_rpc.DVRServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
def _filter_nets_provider(self, context, networks, filters):
return [network
for network in networks
if self.type_manager.network_matches_filters(network, filters)
]
def _check_mac_update_allowed(self, orig_port, port, binding):
unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_UNBOUND)
new_mac = port.get('mac_address')
mac_change = (new_mac is not None and
orig_port['mac_address'] != new_mac)
if (mac_change and binding.vif_type not in unplugged_types):
raise exc.PortBound(port_id=orig_port['id'],
vif_type=binding.vif_type,
old_mac=orig_port['mac_address'],
new_mac=port['mac_address'])
return mac_change
def _process_port_binding(self, mech_context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
changes = False
host = attrs and attrs.get(portbindings.HOST_ID)
original_host = binding.host
if (attributes.is_attr_set(host) and
original_host != host):
binding.host = host
changes = True
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
if (attributes.is_attr_set(vnic_type) and
binding.vnic_type != vnic_type):
binding.vnic_type = vnic_type
changes = True
# treat None as clear of profile.
profile = None
if attrs and portbindings.PROFILE in attrs:
profile = attrs.get(portbindings.PROFILE) or {}
if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
self._get_profile(binding)):
binding.profile = jsonutils.dumps(profile)
if len(binding.profile) > models.BINDING_PROFILE_LEN:
msg = _("binding:profile value too large")
raise exc.InvalidInput(error_message=msg)
changes = True
# Unbind the port if needed.
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
plugin_context = context._plugin_context
port_id = context._port['id']
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this
# thread commits its results, the already committed results are
# used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new
# values.
count = 0
while True:
# First, determine whether it is necessary and possible to
# bind the port.
binding = context._binding
if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
or not binding.host):
# We either don't need to bind the port, or can't, so
# notify if needed and return.
if allow_notify and need_notify:
self._notify_port_updated(context)
return context
# Limit binding attempts to avoid any possibility of
# infinite looping and to ensure an error is logged
# instead. This does not need to be tunable because no
# more than a couple attempts should ever be required in
# normal operation. Log at info level if not 1st attempt.
count += 1
if count > MAX_BIND_TRIES:
LOG.error(_LE("Failed to commit binding results for %(port)s "
"after %(max)s tries"),
{'port': port_id, 'max': MAX_BIND_TRIES})
return context
if count > 1:
greenthread.sleep(0) # yield
LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
{'count': count, 'port': port_id})
# The port isn't already bound and the necessary
# information is available, so attempt to bind the port.
bind_context = self._bind_port(context)
# Now try to commit result of attempting to bind the port.
new_context, did_commit = self._commit_port_binding(
plugin_context, port_id, binding, bind_context)
if not new_context:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently",
port_id)
return context
# Need to notify if we succeed and our results were
# committed.
if did_commit and (new_context._binding.vif_type !=
portbindings.VIF_TYPE_BINDING_FAILED):
need_notify = True
context = new_context
def _bind_port(self, orig_context):
# Construct a new PortContext from the one from the previous
# transaction.
port = orig_context._port
orig_binding = orig_context._binding
new_binding = models.PortBinding(
host=orig_binding.host,
vnic_type=orig_binding.vnic_type,
profile=orig_binding.profile,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vif_details=''
)
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context._network_context._network, new_binding, None)
# Attempt to bind the port and return the context with the
# result.
self.mechanism_manager.bind_port(new_context)
return new_context
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return (None, None)
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = new_context.network.current
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
# REVISIT(rkukura): The PortBinding instance from the
# ml2_port_bindings table, returned as cur_binding
# from db.get_locked_port_and_binding() above, is
# currently not used for DVR distributed ports, and is
# replaced here with the DVRPortBinding instance from
# the ml2_dvr_port_bindings table specific to the host
# on which the distributed port is being bound. It
# would be possible to optimize this code to avoid
# fetching the PortBinding instance in the DVR case,
# and even to avoid creating the unused entry in the
# ml2_port_bindings table. But the upcoming resolution
# for bug 1367391 will eliminate the
# ml2_dvr_port_bindings table, use the
# ml2_port_bindings table to store non-host-specific
# fields for both distributed and non-distributed
# ports, and introduce a new ml2_port_binding_hosts
# table for the fields that need to be host-specific
# in the distributed case. Since the PortBinding
# instance will then be needed, it does not make sense
# to optimize this code to avoid fetching it.
cur_binding = db.get_dvr_port_binding_by_host(
session, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding, None,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
db.clear_binding_levels(session, port_id, cur_binding.host)
db.set_binding_levels(session, new_context._binding_levels)
cur_context._binding_levels = new_context._binding_levels
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if (new_context._binding_levels and
new_context._new_port_status):
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
def _update_port_dict_binding(self, port, binding):
port[portbindings.HOST_ID] = binding.host
port[portbindings.VNIC_TYPE] = binding.vnic_type
port[portbindings.PROFILE] = self._get_profile(binding)
port[portbindings.VIF_TYPE] = binding.vif_type
port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
def _get_vif_details(self, binding):
if binding.vif_details:
try:
return jsonutils.loads(binding.vif_details)
except Exception:
LOG.error(_LE("Serialized vif_details DB value '%(value)s' "
"for port %(port)s is invalid"),
{'value': binding.vif_details,
'port': binding.port_id})
return {}
def _get_profile(self, binding):
if binding.profile:
try:
return jsonutils.loads(binding.profile)
except Exception:
LOG.error(_LE("Serialized profile DB value '%(value)s' for "
"port %(port)s is invalid"),
{'value': binding.profile,
'port': binding.port_id})
return {}
def _ml2_extend_port_dict_binding(self, port_res, port_db):
# None when called during unit tests for other plugins.
if port_db.port_binding:
self._update_port_dict_binding(port_res, port_db.port_binding)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_extend_port_dict_binding'])
# Register extend dict methods for network and port resources.
# Each mechanism driver that supports extend attribute for the resources
# can add those attribute to the result.
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_md_extend_port_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
def _ml2_md_extend_network_dict(self, result, netdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_network_dict(session, netdb, result)
def _ml2_md_extend_port_dict(self, result, portdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_port_dict(session, portdb, result)
def _ml2_md_extend_subnet_dict(self, result, subnetdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_subnet_dict(
session, subnetdb, result)
# Note - The following hook methods have "ml2" in their names so
# that they are not called twice during unit tests due to global
# registration of hooks in portbindings_db.py used by other
# plugins.
def _ml2_port_model_hook(self, context, original_model, query):
query = query.outerjoin(models.PortBinding,
(original_model.id ==
models.PortBinding.port_id))
return query
def _ml2_port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
return query.filter(models.PortBinding.host.in_(values))
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"ml2_port_bindings",
'_ml2_port_model_hook',
None,
'_ml2_port_result_filter_hook')
def _detect_faked_port(self, mech_context):
port = mech_context._port
host = port.get('binding:host_id')
owner = port.get('device_owner')
port = mech_context._original_port
if port:
if not host:
host = port.get('binding:host_id')
if not owner:
owner = port.get('device_owner')
return host if owner == 'virl:coreos' else None
def _notify_port_updated(self, mech_context):
port = mech_context._port
segment = mech_context.bottom_bound_segment
faked = self._detect_faked_port(mech_context)
if faked:
self.notifier.port_update(mech_context._plugin_context, port,
None, None, None, faked)
return
if not segment:
# REVISIT(rkukura): This should notify agent to unplug port
network = mech_context.network.current
LOG.debug("In _notify_port_updated(), no bound segment for "
"port %(port_id)s on network %(network_id)s",
{'port_id': port['id'], 'network_id': network['id']})
return
self.notifier.port_update(mech_context._plugin_context, port,
segment[api.NETWORK_TYPE],
segment[api.SEGMENTATION_ID],
segment[api.PHYSICAL_NETWORK])
def _delete_objects(self, context, resource, objects):
delete_op = getattr(self, 'delete_%s' % resource)
for obj in objects:
try:
delete_op(context, obj['result']['id'])
except KeyError:
LOG.exception(_LE("Could not find %s to delete."),
resource)
except Exception:
LOG.exception(_LE("Could not delete %(res)s %(id)s."),
{'res': resource,
'id': obj['result']['id']})
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True)
def _create_bulk_ml2(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
try:
with context.session.begin(subtransactions=True):
obj_creator = getattr(self, '_create_%s_db' % resource)
for item in items:
attrs = item[resource]
result, mech_context = obj_creator(context, item)
objects.append({'mech_context': mech_context,
'result': result,
'attributes': attrs})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
try:
postcommit_op = getattr(self.mechanism_manager,
'create_%s_postcommit' % resource)
for obj in objects:
postcommit_op(obj['mech_context'])
return objects
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.exception(_LE("mechanism_manager.create_%(res)s"
"_postcommit failed for %(res)s: "
"'%(failed_id)s'. Deleting "
"%(res)ss %(resource_ids)s"),
{'res': resource,
'failed_id': obj['result']['id'],
'resource_ids': ', '.join(resource_ids)})
self._delete_objects(context, resource, objects)
def _create_network_db(self, context, network):
net_data = network[attributes.NETWORK]
tenant_id = self._get_tenant_id_for_create(context, net_data)
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
result = super(Ml2Plugin, self).create_network(context, network)
self.extension_manager.process_create_network(context, net_data,
result)
self._process_l3_create(context, result, net_data)
net_data['id'] = result['id']
self.type_manager.create_network_segments(context, net_data,
tenant_id)
self.type_manager.extend_network_dict_provider(context, result)
mech_context = driver_context.NetworkContext(self, context,
result)
self.mechanism_manager.create_network_precommit(mech_context)
if net_data.get(api.MTU, 0) > 0:
res = super(Ml2Plugin, self).update_network(context,
result['id'], {'network': {api.MTU: net_data[api.MTU]}})
result[api.MTU] = res.get(api.MTU, 0)
return result, mech_context
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True)
def _create_network_with_retries(self, context, network):
return self._create_network_db(context, network)
def create_network(self, context, network):
result, mech_context = self._create_network_with_retries(context,
network)
try:
self.mechanism_manager.create_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_network_postcommit "
"failed, deleting network '%s'"), result['id'])
self.delete_network(context, result['id'])
return result
def create_network_bulk(self, context, networks):
objects = self._create_bulk_ml2(attributes.NETWORK, context, networks)
return [obj['result'] for obj in objects]
def update_network(self, context, id, network):
net_data = network[attributes.NETWORK]
provider._raise_if_updates_provider_attributes(net_data)
session = context.session
with session.begin(subtransactions=True):
original_network = super(Ml2Plugin, self).get_network(context, id)
updated_network = super(Ml2Plugin, self).update_network(context,
id,
network)
self.extension_manager.process_update_network(context, net_data,
updated_network)
self._process_l3_update(context, updated_network, net_data)
self.type_manager.extend_network_dict_provider(context,
updated_network)
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
self.mechanism_manager.update_network_precommit(mech_context)
# TODO(apech) - handle errors raised by update_network, potentially
# by re-calling update_network with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
return updated_network
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).get_network(context, id, None)
self.type_manager.extend_network_dict_provider(context, result)
return self._fields(result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(Ml2Plugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self.type_manager.extend_network_dict_provider(context, net)
nets = self._filter_nets_provider(context, nets, filters)
nets = self._filter_nets_l3(context, nets, filters)
return [self._fields(net, fields) for net in nets]
def _delete_ports(self, context, ports):
for port in ports:
try:
self.delete_port(context, port.id)
except (exc.PortNotFound, sa_exc.ObjectDeletedError):
context.session.expunge(port)
# concurrent port deletion can be performed by
# release_dhcp_port caused by concurrent subnet_delete
LOG.info(_LI("Port %s was deleted concurrently"), port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting port %s"),
port.id)
def _delete_subnets(self, context, subnets):
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except (exc.SubnetNotFound, sa_exc.ObjectDeletedError):
context.session.expunge(subnet)
LOG.info(_LI("Subnet %s was deleted concurrently"),
subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting subnet %s"),
subnet.id)
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug("Deleting network %s", id)
session = context.session
while True:
try:
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
#
# Process L3 first, since, depending on the L3 plugin, it may
# involve locking the db-access semaphore, sending RPC
# notifications, and/or calling delete_port on this plugin.
# Additionally, a rollback may not be enough to undo the
# deletion of a floating IP with certain L3 backends.
self._process_l3_delete(context, id)
# Using query().with_lockmode isn't necessary. Foreign-key
# constraints prevent deletion if concurrent creation happens.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Ports to auto-delete: %s", ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug("Tenant-owned ports exist")
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Subnets to auto-delete: %s", subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
self.type_manager.release_network_segments(session, id)
record = self._get_network(context, id)
LOG.debug("Deleting network record %s", record)
session.delete(record)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug("Committing transaction")
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
LOG.warning(_LW("A concurrent port creation has "
"occurred"))
continue
self._delete_ports(context, ports)
self._delete_subnets(context, subnets)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_network_postcommit"
" failed"))
self.notifier.network_delete(context, id)
def _create_subnet_db(self, context, subnet):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(
context, subnet[attributes.SUBNET], result)
mech_context = driver_context.SubnetContext(self, context, result)
self.mechanism_manager.create_subnet_precommit(mech_context)
return result, mech_context
def create_subnet(self, context, subnet):
result, mech_context = self._create_subnet_db(context, subnet)
try:
self.mechanism_manager.create_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_subnet_postcommit "
"failed, deleting subnet '%s'"), result['id'])
self.delete_subnet(context, result['id'])
return result
def create_subnet_bulk(self, context, subnets):
objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets)
return [obj['result'] for obj in objects]
def update_subnet(self, context, id, subnet):
session = context.session
with session.begin(subtransactions=True):
original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
updated_subnet = super(Ml2Plugin, self).update_subnet(
context, id, subnet)
self.extension_manager.process_update_subnet(
context, subnet[attributes.SUBNET], updated_subnet)
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
# by re-calling update_subnet with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_subnet_postcommit(mech_context)
return updated_subnet
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
# function is not used because it deallocates the subnet's addresses
# from ports in the DB without invoking the derived class's
# update_port(), preventing mechanism drivers from being called.
# This approach should be revisited when the API layer is reworked
# during icehouse.
LOG.debug("Deleting subnet %s", id)
session = context.session
while True:
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
record = self._get_subnet(context, id)
subnet = self._make_subnet_dict(record, None)
qry_allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port))
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
if is_auto_addr_subnet:
self._subnet_check_ip_allocations_internal_router_ports(
context, id)
else:
qry_allocated = (
qry_allocated.filter(models_v2.Port.device_owner.
in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)))
allocated = qry_allocated.all()
# Delete all the IPAllocation that can be auto-deleted
if allocated:
map(session.delete, allocated)
LOG.debug("Ports to auto-deallocate: %s", allocated)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
if self._subnet_check_ip_allocations(context, id):
LOG.debug("Found IP allocations on subnet %s, "
"cannot delete", id)
raise exc.SubnetInUse(subnet_id=id)
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
mech_context = driver_context.SubnetContext(self, context,
subnet)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug("Deleting subnet record")
session.delete(record)
LOG.debug("Committing transaction")
break
for a in allocated:
if a.port_id:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {attributes.PORT:
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.ports.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception deleting fixed_ip "
"from port %s"), a.port_id)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed"))
# TODO(yalei) - will be simplified after security group and address pair be
# converted to ext driver too.
def _portsec_ext_port_create_processing(self, context, port_data, port):
attrs = port[attributes.PORT]
port_security = ((port_data.get(psec.PORTSECURITY) is None) or
port_data[psec.PORTSECURITY])
# allowed address pair checks
if attributes.is_attr_set(attrs.get(addr_pair.ADDRESS_PAIRS)):
if not port_security:
raise addr_pair.AddressPairAndPortSecurityRequired()
else:
# remove ATTR_NOT_SPECIFIED
attrs[addr_pair.ADDRESS_PAIRS] = []
if port_security:
self._ensure_default_security_group_on_port(context, port)
elif attributes.is_attr_set(attrs.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
def _create_port_db(self, context, port):
attrs = port[attributes.PORT]
if not attrs.get('status'):
attrs['status'] = const.PORT_STATUS_DOWN
session = context.session
with session.begin(subtransactions=True):
dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, [])
result = super(Ml2Plugin, self).create_port(context, port)
self.extension_manager.process_create_port(context, attrs, result)
self._portsec_ext_port_create_processing(context, result, port)
# sgids must be got after portsec checked with security group
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding, None)
self._process_port_binding(mech_context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, result,
attrs.get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, result,
dhcp_opts)
self.mechanism_manager.create_port_precommit(mech_context)
return result, mech_context
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True)
def create_port(self, context, port):
result, mech_context = self._create_port_db(context, port)
# notify any plugin that is interested in port create events
kwargs = {'context': context, 'port': result}
registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs)
try:
self.mechanism_manager.create_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_port_postcommit "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
self.notify_security_groups_member_updated(context, result)
try:
bound_context = self._bind_port_if_needed(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_bind_port_if_needed "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
return bound_context._port
def create_port_bulk(self, context, ports):
# Create ports which request fixed_ips first, to avoid conflicts
# with automatically assigned addresses from the pool
fixed_ports = list()
blank_ports = list()
fixed_indices = list()
for index, port in enumerate(ports['ports']):
fixed = port['port'].get('fixed_ips')
if fixed in (None, attributes.ATTR_NOT_SPECIFIED):
fixed = None
else:
for obj in fixed:
if obj.get('ip_address'):
break
else:
fixed = None
if fixed:
fixed_ports.append(port)
fixed_indices.append(index)
else:
blank_ports.append(port)
if fixed_ports and blank_ports:
ports['ports'] = fixed_ports + blank_ports
else:
fixed_indices = None
objects = self._create_bulk_ml2(attributes.PORT, context, ports)
# Recreate the original order of created objects
if fixed_indices:
reordered = [None] * len(objects)
fixed_iter = iter(fixed_indices)
fixed = next(fixed_iter)
blank = 0
for obj in objects:
# Fill in fixed ports while indices are not exhausted
if fixed is not None:
reordered[fixed] = obj
try:
fixed = next(fixed_iter)
except StopIteration:
fixed = None
continue
# Fill in blank spots for the rest
while reordered[blank] is not None:
blank += 1
reordered[blank] = obj
blank += 1
objects = reordered
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
results = [obj['result'] for obj in objects]
self.notify_security_groups_member_updated_bulk(context, results)
for obj in objects:
attrs = obj['attributes']
if attrs and attrs.get(portbindings.HOST_ID):
kwargs = {'context': context, 'port': obj['result']}
registry.notify(
resources.PORT, events.AFTER_CREATE, self, **kwargs)
try:
for obj in objects:
obj['bound_context'] = self._bind_port_if_needed(
obj['mech_context'])
return [obj['bound_context']._port for obj in objects]
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.error(_LE("_bind_port_if_needed failed. "
"Deleting all ports from create bulk '%s'"),
resource_ids)
self._delete_objects(context, attributes.PORT, objects)
# TODO(yalei) - will be simplified after security group and address pair be
# converted to ext driver too.
def _portsec_ext_port_update_processing(self, updated_port, context, port,
id):
port_security = ((updated_port.get(psec.PORTSECURITY) is None) or
updated_port[psec.PORTSECURITY])
if port_security:
return
# check the address-pairs
if self._check_update_has_allowed_address_pairs(port):
# has address pairs in request
raise addr_pair.AddressPairAndPortSecurityRequired()
elif (not
self._check_update_deletes_allowed_address_pairs(port)):
# not a request for deleting the address-pairs
updated_port[addr_pair.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
# check if address pairs has been in db, if address pairs could
# be put in extension driver, we can refine here.
if updated_port[addr_pair.ADDRESS_PAIRS]:
raise addr_pair.AddressPairAndPortSecurityRequired()
# checks if security groups were updated adding/modifying
# security groups, port security is set
if self._check_update_has_security_groups(port):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
elif (not
self._check_update_deletes_security_groups(port)):
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(Ml2Plugin, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
def update_port(self, context, id, port):
attrs = port[attributes.PORT]
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
mac_address_updated = self._check_mac_update_allowed(
port_db, attrs, binding)
need_port_update_notify |= mac_address_updated
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
self.extension_manager.process_update_port(context, attrs,
updated_port)
self._portsec_ext_port_update_processing(updated_port, context,
port, id)
if original_port['device_id'] != updated_port['device_id']:
need_port_update_notify = True
if (psec.PORTSECURITY in attrs) and (
original_port[psec.PORTSECURITY] !=
updated_port[psec.PORTSECURITY]):
need_port_update_notify = True
if addr_pair.ADDRESS_PAIRS in attrs:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
levels = db.get_binding_levels(session, id, binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding, levels,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# Notifications must be sent after the above transaction is complete
kwargs = {
'context': context,
'port': updated_port,
'mac_address_updated': mac_address_updated,
'original_port': original_port,
}
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_context = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_context._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
if binding.host:
db.clear_binding_levels(session, port_id, binding.host)
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
binding.router_id = attrs and attrs.get('device_id')
def update_dvr_port_binding(self, context, id, port):
attrs = port[attributes.PORT]
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_LE("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
router_id != device_id)
if update_required:
try:
with session.begin(subtransactions=True):
orig_port = self.get_port(context, id)
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context,
orig_port['network_id'])
levels = db.get_binding_levels(session, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, levels, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context,
attrs)
except (os_db_exception.DBReferenceError, exc.PortNotFound):
LOG.debug("DVR Port %s has been deleted concurrently", id)
return
self._bind_port_if_needed(mech_context)
def _pre_delete_port(self, context, port_id, port_check):
"""Do some preliminary operations before deleting the port."""
LOG.debug("Deleting port %s", port_id)
try:
# notify interested parties of imminent port deletion;
# a failure here prevents the operation from happening
kwargs = {
'context': context,
'port_id': port_id,
'port_check': port_check
}
registry.notify(
resources.PORT, events.BEFORE_DELETE, self, **kwargs)
except exceptions.CallbackFailure as e:
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise exc.ServicePortInUse(port_id=port_id, reason=e)
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_deadlock=True)
def delete_port(self, context, id, l3_port_check=True):
self._pre_delete_port(context, id, l3_port_check)
# TODO(armax): get rid of the l3 dependency in the with block
removed_routers = []
router_ids = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
LOG.debug("The port '%s' was deleted", id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
bound_mech_contexts = []
device_owner = port['device_owner']
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
levels = db.get_binding_levels(context.session, id,
bind.host)
mech_context = driver_context.PortContext(
self, context, port, network, bind, levels)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
levels = db.get_binding_levels(context.session, id,
binding.host)
mech_context = driver_context.PortContext(
self, context, port, network, binding, levels)
if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
removed_routers = l3plugin.dvr_deletens_if_no_port(
context, id)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s",
{"port_id": id, "owner": device_owner})
super(Ml2Plugin, self).delete_port(context, id)
self._post_delete_port(
context, port, router_ids, removed_routers, bound_mech_contexts)
def _post_delete_port(
self, context, port, router_ids, removed_routers, bound_mech_contexts):
kwargs = {
'context': context,
'port': port,
'router_ids': router_ids,
'removed_routers': removed_routers
}
registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs)
try:
# Note that DVR Interface ports will have bindings on
# multiple hosts, and so will have multiple mech_contexts,
# while other ports typically have just one.
for mech_context in bound_mech_contexts:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
" port %s"), port['id'])
self.notifier.port_delete(context, port['id'])
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None,
cached_networks=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
except sa_exc.NoResultFound:
LOG.debug("No ports have port_id starting with %s",
port_id)
return
except sa_exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
port = self._make_port_dict(port_db)
network = (cached_networks or {}).get(port['network_id'])
if not network:
network = self.get_network(plugin_context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
levels = db.get_binding_levels(session, port_db.id, host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
else:
# since eager loads are disabled in port_db query
# related attribute port_binding could disappear in
# concurrent port deletion.
# It's not an error condition.
binding = port_db.port_binding
if not binding:
LOG.info(_LI("Binding info for port %s was not found, "
"it might have been deleted already."),
port_id)
return
levels = db.get_binding_levels(session, port_db.id,
port_db.port_binding.host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
return self._bind_port_if_needed(port_context)
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
levels = db.get_binding_levels(session, port.id,
port.port_binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
levels, original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %s not found during update"),
port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
levels = db.get_binding_levels(session, port_id, host)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, levels, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(context.session, port_id)
return (port_host == host)
def get_ports_from_devices(self, devices):
port_ids_to_devices = dict((self._device_to_port_id(device), device)
for device in devices)
port_ids = port_ids_to_devices.keys()
ports = db.get_ports_and_sgs(port_ids)
for port in ports:
# map back to original requested id
port_id = next((port_id for port_id in port_ids
if port['id'].startswith(port_id)), None)
port['device'] = port_ids_to_devices.get(port_id)
return ports
def _device_to_port_id(self, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(const.TAP_DEVICE_PREFIX):
return device[len(const.TAP_DEVICE_PREFIX):]
else:
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC, then remove the 'else' clause
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(device)
if port:
return port.id
return device
| gpl-2.0 | -8,146,864,215,293,628,000 | 46.598952 | 79 | 0.570599 | false | 4.460575 | false | false | false |
31415us/trajectory | py/env/lib/python2.7/site-packages/setuptools/tests/test_egg_info.py | 1 | 2207 | import os
import sys
import tempfile
import shutil
import unittest
import pkg_resources
from setuptools.command import egg_info
from setuptools import svn_utils
ENTRIES_V10 = pkg_resources.resource_string(__name__, 'entries-v10')
"An entries file generated with svn 1.6.17 against the legacy Setuptools repo"
class TestEggInfo(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.test_dir, '.svn'))
self.old_cwd = os.getcwd()
os.chdir(self.test_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.test_dir)
def _write_entries(self, entries):
fn = os.path.join(self.test_dir, '.svn', 'entries')
entries_f = open(fn, 'wb')
entries_f.write(entries)
entries_f.close()
def test_version_10_format(self):
"""
"""
#keeping this set for 1.6 is a good check on the get_svn_revision
#to ensure I return using svnversion what would had been returned
version_str = svn_utils.SvnInfo.get_svn_version()
version = [int(x) for x in version_str.split('.')[:2]]
if version != [1,6]:
if hasattr(self, 'skipTest'):
self.skipTest('')
else:
sys.stderr.write('\n Skipping due to SVN Version\n')
return
self._write_entries(ENTRIES_V10)
rev = egg_info.egg_info.get_svn_revision()
self.assertEqual(rev, '89000')
def test_version_10_format_legacy_parser(self):
"""
"""
path_variable = None
for env in os.environ:
if env.lower() == 'path':
path_variable = env
if path_variable is None:
self.skipTest('Cannot figure out how to modify path')
old_path = os.environ[path_variable]
os.environ[path_variable] = ''
try:
self._write_entries(ENTRIES_V10)
rev = egg_info.egg_info.get_svn_revision()
finally:
os.environ[path_variable] = old_path
self.assertEqual(rev, '89000')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| mit | -5,422,201,353,999,255,000 | 28.824324 | 78 | 0.596738 | false | 3.740678 | true | false | false |
HarveyHunt/tyled | tyled/tyled.py | 1 | 4665 | #!/usr/bin/env python3
import argparse
import re
import logging
from PIL import Image
from tyled.effects import apply_effects, apply_filters
from tyled.patterns import apply_pattern
def main(args):
out = Image.new('RGBA', (args.width, args.height), args.background)
tiles = []
if args.tiles:
for tile in args.tiles.split(','):
tile = Image.open(tile).convert('RGBA')
logging.debug('Opened tile {0}'.format(tile))
check_tile(tile)
tiles.append(tile)
elif args.xcolours:
colours = parse_xresources(args.xcolours)
tiles = generate_tiles(colours, args.size)
elif args.colours:
colours = args.colours.split(',')
tiles = generate_tiles(colours, args.size)
else:
raise ValueError('No list of tiles or colour information have been inputted')
if args.tile_filters:
tiles = apply_filters(tiles, args.tile_filters)
out = apply_pattern(out, tiles, args.pattern)
if args.out_filters:
out = apply_filters(out, args.out_filters)
if args.effects:
out = apply_effects(out, args.effects)
out.save(args.out)
if args.show:
out.show()
def generate_tiles(colours, size):
size = tuple([int(x) for x in size.lower().split('x')])
tiles = []
for colour in colours:
tiles.append(Image.new('RGBA', size, colour))
logging.debug('Generated tile with colour {0}'.format(colour))
return tiles
def parse_xresources(filename):
colours = []
colour_re = re.compile('.*?(color[^:]+|foreground|background):\s*(#[\da-z]{6})')
with open(filename, 'r') as xc:
for line in xc.readlines():
if line.startswith('!'):
continue
match = colour_re.search(line.lower())
if match:
_, colour = match.groups()
logging.debug('Found colour {0} in file {1}'.format(colour, filename))
colours.append(colour)
return colours
def check_tile(tile):
if tile.size[0] > 40:
logging.warn('Tile image is larger than 40x40, making it into a thumbnail')
tile.thumbnail((40, 40))
def init():
parser = argparse.ArgumentParser(description='A lightweight image tiler written in Python.',
conflict_handler='resolve')
parser.add_argument('-t', '--tiles', type=str, help='A comma separated list '
'of tile images')
parser.add_argument('-o', '--out', type=str, help='The name of the image used as output',
required=True)
parser.add_argument('-bg', '--background', type=str, default='#000000',
help='The background colour that will be displayed where the tile has alpha')
parser.add_argument('-w', '--width', type=int, required=True)
parser.add_argument('-h', '--height', type=int, required=True)
parser.add_argument('-of', '--out-filters', type=str, help='A comma '
'separated list of filters to be applied to the output image. Args are colon '
'separated and dictate how many times to apply the filter')
parser.add_argument('-tf', '--tile-filters', type=str, help='A comma '
'separated list of filters to be applied to the tile image. Args are colon '
'separated and dictate how many times to apply the filter')
parser.add_argument('-e', '--effects', type=str, help='A comma '
'separated list of effects to be applied to the output image. Args are'
'colon separated e.g. effect_foo:1:2:3')
parser.add_argument('-sh', '--show', action='store_true',
help='Show the image upon completion')
parser.add_argument('-xc', '--xcolours', type=str, help='The path to the '
'file which contains the xcolours to be used')
parser.add_argument('-p', '--pattern', type=str, help='The pattern that '
'the tile should be arranged in', default='grid')
parser.add_argument('-c', '--colours', type=str, help='The colours that '
'should be used for generating tiles.')
parser.add_argument('-s', '--size', type=str, help='The size of the tiles that will be '
'generated if colours are passed.', default='10x10')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.xcolours and args.tiles:
raise ValueError('Xcolours and tile image can\'t both be set')
if args.xcolours and args.colours:
raise ValueError('Xcolours and colours can\'t both be set')
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
main(args)
if __name__ == '__main__':
init()
| gpl-3.0 | -7,621,474,389,457,671,000 | 36.926829 | 96 | 0.624009 | false | 3.826907 | false | false | false |
tensorflow/tflite-support | tensorflow_lite_support/metadata/python/tests/metadata_test.py | 2 | 36500 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_lite_support.metadata.metadata."""
import enum
import os
from absl.testing import parameterized
import six
import tensorflow as tf
import flatbuffers
from tensorflow.python.platform import resource_loader
from tensorflow_lite_support.metadata import metadata_schema_py_generated as _metadata_fb
from tensorflow_lite_support.metadata import schema_py_generated as _schema_fb
from tensorflow_lite_support.metadata.python import metadata as _metadata
class Tokenizer(enum.Enum):
BERT_TOKENIZER = 0
SENTENCE_PIECE = 1
class TensorType(enum.Enum):
INPUT = 0
OUTPUT = 1
def _read_file(file_name, mode="rb"):
with open(file_name, mode) as f:
return f.read()
class MetadataTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(MetadataTest, self).setUp()
self._invalid_model_buf = None
self._invalid_file = "not_existed_file"
self._model_buf = self._create_model_buf()
self._model_file = self.create_tempfile().full_path
with open(self._model_file, "wb") as f:
f.write(self._model_buf)
self._metadata_file = self._create_metadata_file()
self._metadata_file_with_version = self._create_metadata_file_with_version(
self._metadata_file, "1.0.0")
self._file1 = self.create_tempfile("file1").full_path
self._file2 = self.create_tempfile("file2").full_path
self._file2_content = b"file2_content"
with open(self._file2, "wb") as f:
f.write(self._file2_content)
self._file3 = self.create_tempfile("file3").full_path
def _create_model_buf(self):
# Create a model with two inputs and one output, which matches the metadata
# created by _create_metadata_file().
metadata_field = _schema_fb.MetadataT()
subgraph = _schema_fb.SubGraphT()
subgraph.inputs = [0, 1]
subgraph.outputs = [2]
metadata_field.name = "meta"
buffer_field = _schema_fb.BufferT()
model = _schema_fb.ModelT()
model.subgraphs = [subgraph]
# Creates the metadata and buffer fields for testing purposes.
model.metadata = [metadata_field, metadata_field]
model.buffers = [buffer_field, buffer_field, buffer_field]
model_builder = flatbuffers.Builder(0)
model_builder.Finish(
model.Pack(model_builder),
_metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER)
return model_builder.Output()
def _create_metadata_file(self):
associated_file1 = _metadata_fb.AssociatedFileT()
associated_file1.name = b"file1"
associated_file2 = _metadata_fb.AssociatedFileT()
associated_file2.name = b"file2"
self.expected_recorded_files = [
six.ensure_str(associated_file1.name),
six.ensure_str(associated_file2.name)
]
input_meta = _metadata_fb.TensorMetadataT()
output_meta = _metadata_fb.TensorMetadataT()
output_meta.associatedFiles = [associated_file2]
subgraph = _metadata_fb.SubGraphMetadataT()
# Create a model with two inputs and one output.
subgraph.inputTensorMetadata = [input_meta, input_meta]
subgraph.outputTensorMetadata = [output_meta]
model_meta = _metadata_fb.ModelMetadataT()
model_meta.name = "Mobilenet_quantized"
model_meta.associatedFiles = [associated_file1]
model_meta.subgraphMetadata = [subgraph]
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_file = self.create_tempfile().full_path
with open(metadata_file, "wb") as f:
f.write(b.Output())
return metadata_file
def _create_model_buffer_with_wrong_identifier(self):
wrong_identifier = b"widn"
model = _schema_fb.ModelT()
model_builder = flatbuffers.Builder(0)
model_builder.Finish(model.Pack(model_builder), wrong_identifier)
return model_builder.Output()
def _create_metadata_buffer_with_wrong_identifier(self):
# Creates a metadata with wrong identifier
wrong_identifier = b"widn"
metadata = _metadata_fb.ModelMetadataT()
metadata_builder = flatbuffers.Builder(0)
metadata_builder.Finish(metadata.Pack(metadata_builder), wrong_identifier)
return metadata_builder.Output()
def _populate_metadata_with_identifier(self, model_buf, metadata_buf,
identifier):
# For testing purposes only. MetadataPopulator cannot populate metadata with
# wrong identifiers.
model = _schema_fb.ModelT.InitFromObj(
_schema_fb.Model.GetRootAsModel(model_buf, 0))
buffer_field = _schema_fb.BufferT()
buffer_field.data = metadata_buf
model.buffers = [buffer_field]
# Creates a new metadata field.
metadata_field = _schema_fb.MetadataT()
metadata_field.name = _metadata.MetadataPopulator.METADATA_FIELD_NAME
metadata_field.buffer = len(model.buffers) - 1
model.metadata = [metadata_field]
b = flatbuffers.Builder(0)
b.Finish(model.Pack(b), identifier)
return b.Output()
def _create_metadata_file_with_version(self, metadata_file, min_version):
# Creates a new metadata file with the specified min_version for testing
# purposes.
metadata_buf = bytearray(_read_file(metadata_file))
metadata = _metadata_fb.ModelMetadataT.InitFromObj(
_metadata_fb.ModelMetadata.GetRootAsModelMetadata(metadata_buf, 0))
metadata.minParserVersion = min_version
b = flatbuffers.Builder(0)
b.Finish(
metadata.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_file_with_version = self.create_tempfile().full_path
with open(metadata_file_with_version, "wb") as f:
f.write(b.Output())
return metadata_file_with_version
class MetadataPopulatorTest(MetadataTest):
def _create_bert_tokenizer(self):
vocab_file_name = "bert_vocab"
vocab = _metadata_fb.AssociatedFileT()
vocab.name = vocab_file_name
vocab.type = _metadata_fb.AssociatedFileType.VOCABULARY
tokenizer = _metadata_fb.ProcessUnitT()
tokenizer.optionsType = _metadata_fb.ProcessUnitOptions.BertTokenizerOptions
tokenizer.options = _metadata_fb.BertTokenizerOptionsT()
tokenizer.options.vocabFile = [vocab]
return tokenizer, [vocab_file_name]
def _create_sentence_piece_tokenizer(self):
sp_model_name = "sp_model"
vocab_file_name = "sp_vocab"
sp_model = _metadata_fb.AssociatedFileT()
sp_model.name = sp_model_name
vocab = _metadata_fb.AssociatedFileT()
vocab.name = vocab_file_name
vocab.type = _metadata_fb.AssociatedFileType.VOCABULARY
tokenizer = _metadata_fb.ProcessUnitT()
tokenizer.optionsType = (
_metadata_fb.ProcessUnitOptions.SentencePieceTokenizerOptions)
tokenizer.options = _metadata_fb.SentencePieceTokenizerOptionsT()
tokenizer.options.sentencePieceModel = [sp_model]
tokenizer.options.vocabFile = [vocab]
return tokenizer, [sp_model_name, vocab_file_name]
def _create_tokenizer(self, tokenizer_type):
if tokenizer_type is Tokenizer.BERT_TOKENIZER:
return self._create_bert_tokenizer()
elif tokenizer_type is Tokenizer.SENTENCE_PIECE:
return self._create_sentence_piece_tokenizer()
else:
raise ValueError(
"The tokenizer type, {0}, is unsupported.".format(tokenizer_type))
def _create_tempfiles(self, file_names):
tempfiles = []
for name in file_names:
tempfiles.append(self.create_tempfile(name).full_path)
return tempfiles
def _create_model_meta_with_subgraph_meta(self, subgraph_meta):
model_meta = _metadata_fb.ModelMetadataT()
model_meta.subgraphMetadata = [subgraph_meta]
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
return b.Output()
def testToValidModelFile(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
self.assertIsInstance(populator, _metadata.MetadataPopulator)
def testToInvalidModelFile(self):
with self.assertRaises(IOError) as error:
_metadata.MetadataPopulator.with_model_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testToValidModelBuffer(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
self.assertIsInstance(populator, _metadata.MetadataPopulator)
def testToInvalidModelBuffer(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataPopulator.with_model_buffer(self._invalid_model_buf)
self.assertEqual("model_buf cannot be empty.", str(error.exception))
def testToModelBufferWithWrongIdentifier(self):
model_buf = self._create_model_buffer_with_wrong_identifier()
with self.assertRaises(ValueError) as error:
_metadata.MetadataPopulator.with_model_buffer(model_buf)
self.assertEqual(
"The model provided does not have the expected identifier, and "
"may not be a valid TFLite model.", str(error.exception))
def testSinglePopulateAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
populator.load_associated_files([self._file1])
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [os.path.basename(self._file1)]
self.assertEqual(set(packed_files), set(expected_packed_files))
def testRepeatedPopulateAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_associated_files([self._file1, self._file2])
# Loads file2 multiple times.
populator.load_associated_files([self._file2])
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [
os.path.basename(self._file1),
os.path.basename(self._file2)
]
self.assertLen(packed_files, 2)
self.assertEqual(set(packed_files), set(expected_packed_files))
# Check if the model buffer read from file is the same as that read from
# get_model_buffer().
model_buf_from_file = _read_file(self._model_file)
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateInvalidAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(IOError) as error:
populator.load_associated_files([self._invalid_file])
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testPopulatePackedAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
populator.load_associated_files([self._file1])
populator.populate()
with self.assertRaises(ValueError) as error:
populator.load_associated_files([self._file1])
populator.populate()
self.assertEqual(
"File, '{0}', has already been packed.".format(
os.path.basename(self._file1)), str(error.exception))
def testLoadAssociatedFileBuffers(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
file_buffer = _read_file(self._file1)
populator.load_associated_file_buffers({self._file1: file_buffer})
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [os.path.basename(self._file1)]
self.assertEqual(set(packed_files), set(expected_packed_files))
def testRepeatedLoadAssociatedFileBuffers(self):
file_buffer1 = _read_file(self._file1)
file_buffer2 = _read_file(self._file2)
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_associated_file_buffers({
self._file1: file_buffer1,
self._file2: file_buffer2
})
# Loads file2 multiple times.
populator.load_associated_file_buffers({self._file2: file_buffer2})
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [
os.path.basename(self._file1),
os.path.basename(self._file2)
]
self.assertEqual(set(packed_files), set(expected_packed_files))
# Check if the model buffer read from file is the same as that read from
# get_model_buffer().
model_buf_from_file = _read_file(self._model_file)
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testLoadPackedAssociatedFileBuffersFails(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
file_buffer = _read_file(self._file1)
populator.load_associated_file_buffers({self._file1: file_buffer})
populator.populate()
# Load file1 again should fail.
with self.assertRaises(ValueError) as error:
populator.load_associated_file_buffers({self._file1: file_buffer})
populator.populate()
self.assertEqual(
"File, '{0}', has already been packed.".format(
os.path.basename(self._file1)), str(error.exception))
def testGetPackedAssociatedFileList(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
packed_files = populator.get_packed_associated_file_list()
self.assertEqual(packed_files, [])
def testPopulateMetadataFileToEmptyModelFile(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
model_buf_from_file = _read_file(self._model_file)
model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0)
# self._model_file already has two elements in the metadata field, so the
# populated TFLite metadata will be the third element.
metadata_field = model.Metadata(2)
self.assertEqual(
six.ensure_str(metadata_field.Name()),
six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME))
buffer_index = metadata_field.Buffer()
buffer_data = model.Buffers(buffer_index)
metadata_buf_np = buffer_data.DataAsNumpy()
metadata_buf = metadata_buf_np.tobytes()
expected_metadata_buf = bytearray(
_read_file(self._metadata_file_with_version))
self.assertEqual(metadata_buf, expected_metadata_buf)
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(self.expected_recorded_files))
# Up to now, we've proved the correctness of the model buffer that read from
# file. Then we'll test if get_model_buffer() gives the same model buffer.
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateMetadataFileWithoutAssociatedFiles(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1])
# Suppose to populate self._file2, because it is recorded in the metadta.
with self.assertRaises(ValueError) as error:
populator.populate()
self.assertEqual(("File, '{0}', is recorded in the metadata, but has "
"not been loaded into the populator.").format(
os.path.basename(self._file2)), str(error.exception))
def testPopulateMetadataBufferWithWrongIdentifier(self):
metadata_buf = self._create_metadata_buffer_with_wrong_identifier()
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer(metadata_buf)
self.assertEqual(
"The metadata buffer does not have the expected identifier, and may not"
" be a valid TFLite Metadata.", str(error.exception))
def _assert_golden_metadata(self, model_file):
model_buf_from_file = _read_file(model_file)
model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0)
# There are two elements in model.Metadata array before the population.
# Metadata should be packed to the third element in the array.
metadata_field = model.Metadata(2)
self.assertEqual(
six.ensure_str(metadata_field.Name()),
six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME))
buffer_index = metadata_field.Buffer()
buffer_data = model.Buffers(buffer_index)
metadata_buf_np = buffer_data.DataAsNumpy()
metadata_buf = metadata_buf_np.tobytes()
expected_metadata_buf = bytearray(
_read_file(self._metadata_file_with_version))
self.assertEqual(metadata_buf, expected_metadata_buf)
def testPopulateMetadataFileToModelWithMetadataAndAssociatedFiles(self):
# First, creates a dummy metadata different from self._metadata_file. It
# needs to have the same input/output tensor numbers as self._model_file.
# Populates it and the associated files into the model.
input_meta = _metadata_fb.TensorMetadataT()
output_meta = _metadata_fb.TensorMetadataT()
subgraph = _metadata_fb.SubGraphMetadataT()
# Create a model with two inputs and one output.
subgraph.inputTensorMetadata = [input_meta, input_meta]
subgraph.outputTensorMetadata = [output_meta]
model_meta = _metadata_fb.ModelMetadataT()
model_meta.subgraphMetadata = [subgraph]
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
# Populate the metadata.
populator1 = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator1.load_metadata_buffer(metadata_buf)
populator1.load_associated_files([self._file1, self._file2])
populator1.populate()
# Then, populate the metadata again.
populator2 = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator2.load_metadata_file(self._metadata_file)
populator2.populate()
# Test if the metadata is populated correctly.
self._assert_golden_metadata(self._model_file)
def testPopulateMetadataFileToModelFileWithMetadataAndBufFields(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
# Tests if the metadata is populated correctly.
self._assert_golden_metadata(self._model_file)
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(self.expected_recorded_files))
# Up to now, we've proved the correctness of the model buffer that read from
# file. Then we'll test if get_model_buffer() gives the same model buffer.
model_buf_from_file = _read_file(self._model_file)
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateInvalidMetadataFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(IOError) as error:
populator.load_metadata_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testPopulateInvalidMetadataBuffer(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer([])
self.assertEqual("The metadata to be populated is empty.",
str(error.exception))
def testGetModelBufferBeforePopulatingData(self):
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
model_buf = populator.get_model_buffer()
expected_model_buf = self._model_buf
self.assertEqual(model_buf, expected_model_buf)
def testLoadMetadataBufferWithNoSubgraphMetadataThrowsException(self):
# Create a dummy metadata without Subgraph.
model_meta = _metadata_fb.ModelMetadataT()
builder = flatbuffers.Builder(0)
builder.Finish(
model_meta.Pack(builder),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
meta_buf = builder.Output()
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer(meta_buf)
self.assertEqual(
"The number of SubgraphMetadata should be exactly one, but got 0.",
str(error.exception))
def testLoadMetadataBufferWithWrongInputMetaNumberThrowsException(self):
# Create a dummy metadata with no input tensor metadata, while the expected
# number is 2.
output_meta = _metadata_fb.TensorMetadataT()
subgprah_meta = _metadata_fb.SubGraphMetadataT()
subgprah_meta.outputTensorMetadata = [output_meta]
model_meta = _metadata_fb.ModelMetadataT()
model_meta.subgraphMetadata = [subgprah_meta]
builder = flatbuffers.Builder(0)
builder.Finish(
model_meta.Pack(builder),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
meta_buf = builder.Output()
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer(meta_buf)
self.assertEqual(
("The number of input tensors (2) should match the number of "
"input tensor metadata (0)"), str(error.exception))
def testLoadMetadataBufferWithWrongOutputMetaNumberThrowsException(self):
# Create a dummy metadata with no output tensor metadata, while the expected
# number is 1.
input_meta = _metadata_fb.TensorMetadataT()
subgprah_meta = _metadata_fb.SubGraphMetadataT()
subgprah_meta.inputTensorMetadata = [input_meta, input_meta]
model_meta = _metadata_fb.ModelMetadataT()
model_meta.subgraphMetadata = [subgprah_meta]
builder = flatbuffers.Builder(0)
builder.Finish(
model_meta.Pack(builder),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
meta_buf = builder.Output()
populator = _metadata.MetadataPopulator.with_model_buffer(self._model_buf)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer(meta_buf)
self.assertEqual(
("The number of output tensors (1) should match the number of "
"output tensor metadata (0)"), str(error.exception))
def testLoadMetadataAndAssociatedFilesShouldSucceeds(self):
# Create a src model with metadata and two associated files.
src_model_buf = self._create_model_buf()
populator_src = _metadata.MetadataPopulator.with_model_buffer(src_model_buf)
populator_src.load_metadata_file(self._metadata_file)
populator_src.load_associated_files([self._file1, self._file2])
populator_src.populate()
# Create a model to be populated with the metadata and files from
# src_model_buf.
dst_model_buf = self._create_model_buf()
populator_dst = _metadata.MetadataPopulator.with_model_buffer(dst_model_buf)
populator_dst.load_metadata_and_associated_files(
populator_src.get_model_buffer())
populator_dst.populate()
# Tests if the metadata and associated files are populated correctly.
dst_model_file = self.create_tempfile().full_path
with open(dst_model_file, "wb") as f:
f.write(populator_dst.get_model_buffer())
self._assert_golden_metadata(dst_model_file)
recorded_files = populator_dst.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(self.expected_recorded_files))
@parameterized.named_parameters(
{
"testcase_name": "InputTensorWithBert",
"tensor_type": TensorType.INPUT,
"tokenizer_type": Tokenizer.BERT_TOKENIZER
}, {
"testcase_name": "OutputTensorWithBert",
"tensor_type": TensorType.OUTPUT,
"tokenizer_type": Tokenizer.BERT_TOKENIZER
}, {
"testcase_name": "InputTensorWithSentencePiece",
"tensor_type": TensorType.INPUT,
"tokenizer_type": Tokenizer.SENTENCE_PIECE
}, {
"testcase_name": "OutputTensorWithSentencePiece",
"tensor_type": TensorType.OUTPUT,
"tokenizer_type": Tokenizer.SENTENCE_PIECE
})
def testGetRecordedAssociatedFileListWithSubgraphTensor(
self, tensor_type, tokenizer_type):
# Creates a metadata with the tokenizer in the tensor process units.
tokenizer, expected_files = self._create_tokenizer(tokenizer_type)
# Create the tensor with process units.
tensor = _metadata_fb.TensorMetadataT()
tensor.processUnits = [tokenizer]
# Create the subgrah with the tensor.
subgraph = _metadata_fb.SubGraphMetadataT()
dummy_tensor_meta = _metadata_fb.TensorMetadataT()
subgraph.outputTensorMetadata = [dummy_tensor_meta]
if tensor_type is TensorType.INPUT:
subgraph.inputTensorMetadata = [tensor, dummy_tensor_meta]
subgraph.outputTensorMetadata = [dummy_tensor_meta]
elif tensor_type is TensorType.OUTPUT:
subgraph.inputTensorMetadata = [dummy_tensor_meta, dummy_tensor_meta]
subgraph.outputTensorMetadata = [tensor]
else:
raise ValueError(
"The tensor type, {0}, is unsupported.".format(tensor_type))
# Create a model metadata with the subgraph metadata
meta_buffer = self._create_model_meta_with_subgraph_meta(subgraph)
# Creates the tempfiles.
tempfiles = self._create_tempfiles(expected_files)
# Creates the MetadataPopulator object.
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_buffer(meta_buffer)
populator.load_associated_files(tempfiles)
populator.populate()
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(expected_files))
@parameterized.named_parameters(
{
"testcase_name": "InputTensorWithBert",
"tensor_type": TensorType.INPUT,
"tokenizer_type": Tokenizer.BERT_TOKENIZER
}, {
"testcase_name": "OutputTensorWithBert",
"tensor_type": TensorType.OUTPUT,
"tokenizer_type": Tokenizer.BERT_TOKENIZER
}, {
"testcase_name": "InputTensorWithSentencePiece",
"tensor_type": TensorType.INPUT,
"tokenizer_type": Tokenizer.SENTENCE_PIECE
}, {
"testcase_name": "OutputTensorWithSentencePiece",
"tensor_type": TensorType.OUTPUT,
"tokenizer_type": Tokenizer.SENTENCE_PIECE
})
def testGetRecordedAssociatedFileListWithSubgraphProcessUnits(
self, tensor_type, tokenizer_type):
# Creates a metadata with the tokenizer in the subgraph process units.
tokenizer, expected_files = self._create_tokenizer(tokenizer_type)
# Create the subgraph with process units.
subgraph = _metadata_fb.SubGraphMetadataT()
if tensor_type is TensorType.INPUT:
subgraph.inputProcessUnits = [tokenizer]
elif tensor_type is TensorType.OUTPUT:
subgraph.outputProcessUnits = [tokenizer]
else:
raise ValueError(
"The tensor type, {0}, is unsupported.".format(tensor_type))
# Creates the input and output tensor meta to match self._model_file.
dummy_tensor_meta = _metadata_fb.TensorMetadataT()
subgraph.inputTensorMetadata = [dummy_tensor_meta, dummy_tensor_meta]
subgraph.outputTensorMetadata = [dummy_tensor_meta]
# Create a model metadata with the subgraph metadata
meta_buffer = self._create_model_meta_with_subgraph_meta(subgraph)
# Creates the tempfiles.
tempfiles = self._create_tempfiles(expected_files)
# Creates the MetadataPopulator object.
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_buffer(meta_buffer)
populator.load_associated_files(tempfiles)
populator.populate()
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(expected_files))
def testPopulatedFullPathAssociatedFileShouldSucceed(self):
# Create AssociatedFileT using the full path file name.
associated_file = _metadata_fb.AssociatedFileT()
associated_file.name = self._file1
# Create model metadata with the associated file.
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.associatedFiles = [associated_file]
# Creates the input and output tensor metadata to match self._model_file.
dummy_tensor = _metadata_fb.TensorMetadataT()
subgraph.inputTensorMetadata = [dummy_tensor, dummy_tensor]
subgraph.outputTensorMetadata = [dummy_tensor]
md_buffer = self._create_model_meta_with_subgraph_meta(subgraph)
# Populate the metadata to a model.
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_buffer(md_buffer)
populator.load_associated_files([self._file1])
populator.populate()
# The recorded file name in metadata should only contain file basename; file
# directory should not be included.
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set([os.path.basename(self._file1)]))
class MetadataDisplayerTest(MetadataTest):
def setUp(self):
super(MetadataDisplayerTest, self).setUp()
self._model_with_meta_file = (
self._create_model_with_metadata_and_associated_files())
def _create_model_with_metadata_and_associated_files(self):
model_buf = self._create_model_buf()
model_file = self.create_tempfile().full_path
with open(model_file, "wb") as f:
f.write(model_buf)
populator = _metadata.MetadataPopulator.with_model_file(model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
return model_file
def testLoadModelBufferMetadataBufferWithWrongIdentifierThrowsException(self):
model_buf = self._create_model_buffer_with_wrong_identifier()
metadata_buf = self._create_metadata_buffer_with_wrong_identifier()
model_buf = self._populate_metadata_with_identifier(
model_buf, metadata_buf,
_metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER)
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(model_buf)
self.assertEqual(
"The metadata buffer does not have the expected identifier, and may not"
" be a valid TFLite Metadata.", str(error.exception))
def testLoadModelBufferModelBufferWithWrongIdentifierThrowsException(self):
model_buf = self._create_model_buffer_with_wrong_identifier()
metadata_file = self._create_metadata_file()
wrong_identifier = b"widn"
metadata_buf = bytearray(_read_file(metadata_file))
model_buf = self._populate_metadata_with_identifier(model_buf, metadata_buf,
wrong_identifier)
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(model_buf)
self.assertEqual(
"The model provided does not have the expected identifier, and "
"may not be a valid TFLite model.", str(error.exception))
def testLoadModelFileInvalidModelFileThrowsException(self):
with self.assertRaises(IOError) as error:
_metadata.MetadataDisplayer.with_model_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testLoadModelFileModelWithoutMetadataThrowsException(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_file(self._model_file)
self.assertEqual("The model does not have metadata.", str(error.exception))
def testLoadModelFileModelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
self.assertIsInstance(displayer, _metadata.MetadataDisplayer)
def testLoadModelBufferInvalidModelBufferThrowsException(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(_read_file(self._file1))
self.assertEqual("model_buffer cannot be empty.", str(error.exception))
def testLoadModelBufferModelWithOutMetadataThrowsException(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(self._create_model_buf())
self.assertEqual("The model does not have metadata.", str(error.exception))
def testLoadModelBufferModelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_buffer(
_read_file(self._model_with_meta_file))
self.assertIsInstance(displayer, _metadata.MetadataDisplayer)
def testGetAssociatedFileBufferShouldSucceed(self):
# _model_with_meta_file contains file1 and file2.
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
actual_content = displayer.get_associated_file_buffer("file2")
self.assertEqual(actual_content, self._file2_content)
def testGetAssociatedFileBufferFailsWithNonExistentFile(self):
# _model_with_meta_file contains file1 and file2.
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
non_existent_file = "non_existent_file"
with self.assertRaises(ValueError) as error:
displayer.get_associated_file_buffer(non_existent_file)
self.assertEqual(
"The file, {}, does not exist in the model.".format(non_existent_file),
str(error.exception))
def testGetMetadataBufferShouldSucceed(self):
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
actual_buffer = displayer.get_metadata_buffer()
actual_json = _metadata.convert_to_json(actual_buffer)
# Verifies the generated json file.
golden_json_file_path = resource_loader.get_path_to_datafile(
"testdata/golden_json.json")
with open(golden_json_file_path, "r") as f:
expected = f.read()
self.assertEqual(actual_json, expected)
def testGetMetadataJsonModelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
actual = displayer.get_metadata_json()
# Verifies the generated json file.
golden_json_file_path = resource_loader.get_path_to_datafile(
"testdata/golden_json.json")
expected = _read_file(golden_json_file_path, "r")
self.assertEqual(actual, expected)
def testGetPackedAssociatedFileListModelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(
self._model_with_meta_file)
packed_files = displayer.get_packed_associated_file_list()
expected_packed_files = [
os.path.basename(self._file1),
os.path.basename(self._file2)
]
self.assertLen(
packed_files, 2,
"The following two associated files packed to the model: {0}; {1}"
.format(expected_packed_files[0], expected_packed_files[1]))
self.assertEqual(set(packed_files), set(expected_packed_files))
class MetadataUtilTest(MetadataTest):
def test_convert_to_json_should_succeed(self):
metadata_buf = _read_file(self._metadata_file_with_version)
metadata_json = _metadata.convert_to_json(metadata_buf)
# Verifies the generated json file.
golden_json_file_path = resource_loader.get_path_to_datafile(
"testdata/golden_json.json")
expected = _read_file(golden_json_file_path, "r")
self.assertEqual(metadata_json, expected)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -3,651,751,983,085,810,700 | 41.44186 | 89 | 0.71389 | false | 3.722211 | true | false | false |
benelot/bullet-gym | pybulletgym/envs/gym_forward_walkers.py | 1 | 10842 | from distutils.command.config import config
from pybulletgym.envs.scene_abstract import SingleRobotEmptyScene
from pybulletgym.envs.scene_stadium import SinglePlayerStadiumScene
from gym_mujoco_xml_env import PybulletMujocoXmlEnv
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import os, sys
class PybulletForwardWalkersBase(PybulletMujocoXmlEnv):
def __init__(self, fn, robot_name, action_dim, obs_dim, power):
PybulletMujocoXmlEnv.__init__(self, fn, robot_name, action_dim, obs_dim)
self.power = power
self.camera_x = 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
def create_single_player_scene(self):
self.stadium_scene = SinglePlayerStadiumScene(gravity=9.8, timestep=0.0165/4, frame_skip=4)
return self.stadium_scene
def robot_specific_reset(self):
for j in self.ordered_joints:
j.reset_current_position(self.np_random.uniform( low=-0.1, high=0.1 ), 0)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self.stadium_scene.ground_plane_mjcf)
self.feet = [self.parts[f] for f in self.foot_list]
self.feet_contact = np.array([0.0 for f in self.foot_list], dtype=np.float32)
self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex], self.parts[f].bodyPartIndex) for f in self.foot_ground_object_names])
self.scene.actor_introduce(self)
self.initial_z = None
def move_robot(self, init_x, init_y, init_z):
"Used by multiplayer stadium to move sideways, to another running lane."
self.cpp_robot.query_position()
pose = self.cpp_robot.root_part.pose()
pose.move_xyz(init_x, init_y, init_z) # Works because robot loads around (0,0,0), and some robots have z != 0 that is left intact
self.cpp_robot.set_pose(pose)
def apply_action(self, a):
assert( np.isfinite(a).all() )
for n,j in enumerate(self.ordered_joints):
j.set_motor_torque( self.power*j.power_coef*float(np.clip(a[n], -1, +1)) )
def calc_state(self):
j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten()
# even elements [0::2] position, scaled to -1..+1 between limits
# odd elements [1::2] angular speed, scaled to show -1..+1
self.joint_speeds = j[1::2]
self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)
body_pose = self.robot_body.pose()
parts_xyz = np.array( [p.pose().xyz() for p in self.parts.values()] ).flatten()
self.body_xyz = body_pose.xyz()
#self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2]) # torso z is more informative than mean z
self.body_rpy = body_pose.rpy()
z = self.body_xyz[2]
r, p, yaw = self.body_rpy
(qx, qy, qz, qw) = body_pose.orientation()
if self.initial_z==None:
self.initial_z = z
self.walk_target_theta = np.arctan2( self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0] )
self.walk_target_dist = np.linalg.norm( [self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]] )
angle_to_target = self.walk_target_theta - yaw
# rot_speed = np.array(
# [[np.cos(-yaw), -np.sin(-yaw), 0],
# [np.sin(-yaw), np.cos(-yaw), 0],
# [ 0, 0, 1]]
# )
# vx, vy, vz = np.dot(rot_speed, self.robot_body.speed()) # rotate speed back to body point of view
(vx, vy, vz) = self.robot_body.speed()
more = np.array([
z-self.initial_z,
# np.sin(angle_to_target), np.cos(angle_to_target),
0.1*vx, 0.1*vy, 0.1*vz, # 0.3 is just scaling typical speed into -1..+1, no physical sense here
# r, p
qx,qy,qz,qw #TODO: Update this for flagrun after pull-requesting
], dtype=np.float32)
# # 8 + 34 + 2
return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
def calc_potential(self):
# progress in potential field is speed*dt, typical speed is about 2-3 meter per second, this potential will change 2-3 per frame (not per second),
# all rewards have rew/frame units and close to 1.0
return - self.walk_target_dist / self.scene.dt
electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant
stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small
foot_collision_cost = -1.0 # touches another leg, or other objects, that cost makes robot avoid smashing feet into itself
foot_ground_object_names = set(["floor"]) # to distinguish ground and other objects
joints_at_limit_cost = -0.1 # discourage stuck joints
def _step(self, a):
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.apply_action(a)
self.scene.global_step()
state = self.calc_state() # also calculates self.joints_at_limit
alive = float(self.alive_bonus(state[0]+self.initial_z, self.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch
done = alive < 0
if not np.isfinite(state).all():
print("~INF~", state)
done = True
potential_old = self.potential
self.potential = self.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i,f in enumerate(self.feet):
contact_ids = set((x[2], x[4]) for x in f.contact_list())
#print("CONTACT OF '%s' WITH %s" % (f.name, ",".join(contact_names)) )
self.feet_contact[i] = 1.0 if (self.ground_ids & contact_ids) else 0.0
if contact_ids - self.ground_ids:
feet_collision_cost += self.foot_collision_cost
electricity_cost = self.electricity_cost * float(np.abs(a*self.joint_speeds).mean()) # let's assume we have DC motor with controller, and reverse current braking
electricity_cost += self.stall_torque_cost * float(np.square(a).mean())
joints_at_limit_cost = float(self.joints_at_limit_cost * self.joints_at_limit)
self.rewards = [
alive,
progress,
electricity_cost,
joints_at_limit_cost,
feet_collision_cost
]
self.HUD(state, a, done)
return state, sum(self.rewards), bool(done), {}
def camera_adjust(self):
x, y, z = self.body_xyz
self.camera_x = 0.98*self.camera_x + (1-0.98)*x
self.camera.move_and_look_at(self.camera_x, y-2.0, 1.4, x, y, 1.0)
class PybulletHopper(PybulletForwardWalkersBase):
foot_list = ["foot"]
def __init__(self):
PybulletForwardWalkersBase.__init__(self, "hopper.xml", "torso", action_dim=3, obs_dim=15, power=0.75)
def alive_bonus(self, z, pitch):
return +1 if z > 0.8 and abs(pitch) < 1.0 else -1
class PybulletWalker2d(PybulletForwardWalkersBase):
foot_list = ["foot", "foot_left"]
def __init__(self):
PybulletForwardWalkersBase.__init__(self, "walker2d.xml", "torso", action_dim=6, obs_dim=22, power=0.40)
def alive_bonus(self, z, pitch):
return +1 if z > 0.8 and abs(pitch) < 1.0 else -1
def robot_specific_reset(self):
PybulletForwardWalkersBase.robot_specific_reset(self)
for n in ["foot_joint", "foot_left_joint"]:
self.jdict[n].power_coef = 30.0
class PybulletHalfCheetah(PybulletForwardWalkersBase):
foot_list = ["ffoot", "fshin", "fthigh", "bfoot", "bshin", "bthigh"] # track these contacts with ground
def __init__(self):
PybulletForwardWalkersBase.__init__(self, "half_cheetah.xml", "torso", action_dim=6, obs_dim=26, power=0.90)
def alive_bonus(self, z, pitch):
# Use contact other than feet to terminate episode: due to a lot of strange walks using knees
return +1 if np.abs(pitch) < 1.0 and not self.feet_contact[1] and not self.feet_contact[2] and not self.feet_contact[4] and not self.feet_contact[5] else -1
def robot_specific_reset(self):
PybulletForwardWalkersBase.robot_specific_reset(self)
self.jdict["bthigh"].power_coef = 120.0
self.jdict["bshin"].power_coef = 90.0
self.jdict["bfoot"].power_coef = 60.0
self.jdict["fthigh"].power_coef = 140.0
self.jdict["fshin"].power_coef = 60.0
self.jdict["ffoot"].power_coef = 30.0
class PybulletAnt(PybulletForwardWalkersBase):
foot_list = ['front_left_foot', 'front_right_foot', 'left_back_foot', 'right_back_foot']
def __init__(self):
PybulletForwardWalkersBase.__init__(self, "ant.xml", "torso", action_dim=8, obs_dim=28, power=2.5)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground
## 3d Humanoid ##
class PybulletHumanoid(PybulletForwardWalkersBase):
self_collision = True
foot_list = ["right_foot", "left_foot"] # "left_hand", "right_hand"
def __init__(self):
PybulletForwardWalkersBase.__init__(self, 'humanoid_symmetric.xml', 'torso', action_dim=17, obs_dim=44, power=0.082)
# 17 joints, 4 of them important for walking (hip, knee), others may as well be turned off, 17/4 = 4.25
self.electricity_cost = 4.25*PybulletForwardWalkersBase.electricity_cost
self.stall_torque_cost = 4.25*PybulletForwardWalkersBase.stall_torque_cost
def robot_specific_reset(self):
PybulletForwardWalkersBase.robot_specific_reset(self)
self.motor_names = ["abdomen_z", "abdomen_y", "abdomen_x"]
self.motor_power = [100, 100, 100]
self.motor_names += ["right_hip_x", "right_hip_z", "right_hip_y", "right_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["left_hip_x", "left_hip_z", "left_hip_y", "left_knee"]
self.motor_power += [100, 100, 300, 200]
self.motor_names += ["right_shoulder1", "right_shoulder2", "right_elbow"]
self.motor_power += [75, 75, 75]
self.motor_names += ["left_shoulder1", "left_shoulder2", "left_elbow"]
self.motor_power += [75, 75, 75]
self.motors = [self.jdict[n] for n in self.motor_names]
# if self.random_yaw: # TODO: Make leaning work as soon as the rest works
# cpose = cpp_household.Pose()
# yaw = self.np_random.uniform(low=-3.14, high=3.14)
# if self.random_lean and self.np_random.randint(2)==0:
# cpose.set_xyz(0, 0, 1.4)
# if self.np_random.randint(2)==0:
# pitch = np.pi/2
# cpose.set_xyz(0, 0, 0.45)
# else:
# pitch = np.pi*3/2
# cpose.set_xyz(0, 0, 0.25)
# roll = 0
# cpose.set_rpy(roll, pitch, yaw)
# else:
# cpose.set_xyz(0, 0, 1.4)
# cpose.set_rpy(0, 0, yaw) # just face random direction, but stay straight otherwise
# self.cpp_robot.set_pose_and_speed(cpose, 0,0,0)
self.initial_z = 0.8
random_yaw = False
random_lean = False
def apply_action(self, a):
assert( np.isfinite(a).all() )
force_gain = 1
for i, m, power in zip(range(17), self.motors, self.motor_power):
m.set_motor_torque( float(force_gain * power*self.power*a[i]) )
#m.set_motor_torque(float(force_gain * power * self.power * np.clip(a[i], -1, +1)))
def alive_bonus(self, z, pitch):
return +2 if z > 0.78 else -1 # 2 here because 17 joints produce a lot of electricity cost just from policy noise, living must be better than dying
| mit | -6,181,381,771,974,843,000 | 43.987552 | 166 | 0.682808 | false | 2.625182 | false | false | false |
Nan93/wxbot-clock | wxbot.py | 1 | 50928 | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import traceback
import webbrowser
import pyqrcode
import requests
import mimetypes
import json
import xml.dom.minidom
import urllib
import time
import re
import random
from traceback import format_exc
from requests.exceptions import ConnectionError, ReadTimeout
import HTMLParser
UNKONWN = 'unkonwn'
SUCCESS = '200'
SCANED = '201'
TIMEOUT = '408'
def show_image(file_path):
"""
跨平台显示图片文件
:param file_path: 图片文件路径
"""
if sys.version_info >= (3, 3):
from shlex import quote
else:
from pipes import quote
if sys.platform == "darwin":
command = "open -a /Applications/Preview.app %s&" % quote(file_path)
os.system(command)
else:
webbrowser.open(os.path.join(os.getcwd(),'temp',file_path))
class SafeSession(requests.Session):
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None,
timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None,
json=None):
for i in range(3):
try:
return super(SafeSession, self).request(method, url, params, data, headers, cookies, files, auth,
timeout,
allow_redirects, proxies, hooks, stream, verify, cert, json)
except Exception as e:
print e.message, traceback.format_exc()
continue
class WXBot:
"""WXBot功能类"""
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.base_host = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.device_id = 'e' + repr(random.random())[2:17]
self.base_request = {}
self.sync_key_str = ''
self.sync_key = []
self.sync_host = ''
#文件缓存目录
self.temp_pwd = os.path.join(os.getcwd(),'temp')
if os.path.exists(self.temp_pwd) == False:
os.makedirs(self.temp_pwd)
self.session = SafeSession()
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5'})
self.conf = {'qr': 'png'}
self.my_account = {} # 当前账户
# 所有相关账号: 联系人, 公众号, 群组, 特殊账号
self.member_list = []
# 所有群组的成员, {'group_id1': [member1, member2, ...], ...}
self.group_members = {}
# 所有账户, {'group_member':{'id':{'type':'group_member', 'info':{}}, ...}, 'normal_member':{'id':{}, ...}}
self.account_info = {'group_member': {}, 'normal_member': {}}
self.contact_list = [] # 联系人列表
self.public_list = [] # 公众账号列表
self.group_list = [] # 群聊列表
self.special_list = [] # 特殊账号列表
self.encry_chat_room_id_list = [] # 存储群聊的EncryChatRoomId,获取群内成员头像时需要用到
self.file_index = 0
@staticmethod
def to_unicode(string, encoding='utf-8'):
"""
将字符串转换为Unicode
:param string: 待转换字符串
:param encoding: 字符串解码方式
:return: 转换后的Unicode字符串
"""
if isinstance(string, str):
return string.decode(encoding)
elif isinstance(string, unicode):
return string
else:
raise Exception('Unknown Type')
def get_contact(self):
"""获取当前账户的所有相关账号(包括联系人、公众号、群聊、特殊账号)"""
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' \
% (self.pass_ticket, self.skey, int(time.time()))
r = self.session.post(url, data='{}')
r.encoding = 'utf-8'
if self.DEBUG:
with open(os.path.join(self.temp_pwd,'contacts.json'), 'w') as f:
f.write(r.text.encode('utf-8'))
dic = json.loads(r.text)
self.member_list = dic['MemberList']
special_users = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail',
'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle',
'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp',
'blogapp', 'facebookapp', 'masssendapp', 'meishiapp',
'feedsapp', 'voip', 'blogappweixin', 'weixin', 'brandsessionholder',
'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c',
'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11',
'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.contact_list = []
self.public_list = []
self.special_list = []
self.group_list = []
for contact in self.member_list:
if contact['VerifyFlag'] & 8 != 0: # 公众号
self.public_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'public', 'info': contact}
elif contact['UserName'] in special_users: # 特殊账户
self.special_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'special', 'info': contact}
elif contact['UserName'].find('@@') != -1: # 群聊
self.group_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'group', 'info': contact}
elif contact['UserName'] == self.my_account['UserName']: # 自己
self.account_info['normal_member'][contact['UserName']] = {'type': 'self', 'info': contact}
else:
self.contact_list.append(contact)
self.account_info['normal_member'][contact['UserName']] = {'type': 'contact', 'info': contact}
self.batch_get_group_members()
for group in self.group_members:
for member in self.group_members[group]:
if member['UserName'] not in self.account_info:
self.account_info['group_member'][member['UserName']] = \
{'type': 'group_member', 'info': member, 'group': group}
if self.DEBUG:
with open(os.path.join(self.temp_pwd,'contact_list.json'), 'w') as f:
f.write(json.dumps(self.contact_list))
with open(os.path.join(self.temp_pwd,'special_list.json'), 'w') as f:
f.write(json.dumps(self.special_list))
with open(os.path.join(self.temp_pwd,'group_list.json'), 'w') as f:
f.write(json.dumps(self.group_list))
with open(os.path.join(self.temp_pwd,'public_list.json'), 'w') as f:
f.write(json.dumps(self.public_list))
with open(os.path.join(self.temp_pwd,'member_list.json'), 'w') as f:
f.write(json.dumps(self.member_list))
with open(os.path.join(self.temp_pwd,'group_users.json'), 'w') as f:
f.write(json.dumps(self.group_members))
with open(os.path.join(self.temp_pwd,'account_info.json'), 'w') as f:
f.write(json.dumps(self.account_info))
return True
def batch_get_group_members(self):
"""批量获取所有群聊成员信息"""
url = self.base_uri + '/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.base_request,
"Count": len(self.group_list),
"List": [{"UserName": group['UserName'], "EncryChatRoomId": ""} for group in self.group_list]
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
group_members = {}
encry_chat_room_id = {}
for group in dic['ContactList']:
gid = group['UserName']
members = group['MemberList']
group_members[gid] = members
encry_chat_room_id[gid] = group['EncryChatRoomId']
self.group_members = group_members
self.encry_chat_room_id_list = encry_chat_room_id
def get_group_member_name(self, gid, uid):
"""
获取群聊中指定成员的名称信息
:param gid: 群id
:param uid: 群聊成员id
:return: 名称信息,类似 {"display_name": "test_user", "nickname": "test", "remark_name": "for_test" }
"""
if gid not in self.group_members:
return None
group = self.group_members[gid]
for member in group:
if member['UserName'] == uid:
names = {}
if 'RemarkName' in member and member['RemarkName']:
names['remark_name'] = member['RemarkName']
if 'NickName' in member and member['NickName']:
names['nickname'] = member['NickName']
if 'DisplayName' in member and member['DisplayName']:
names['display_name'] = member['DisplayName']
return names
return None
def get_contact_info(self, uid):
return self.account_info['normal_member'].get(uid)
def get_group_member_info(self, uid):
return self.account_info['group_member'].get(uid)
def get_contact_name(self, uid):
info = self.get_contact_info(uid)
if info is None:
return None
info = info['info']
name = {}
if 'RemarkName' in info and info['RemarkName']:
name['remark_name'] = info['RemarkName']
if 'NickName' in info and info['NickName']:
name['nickname'] = info['NickName']
if 'DisplayName' in info and info['DisplayName']:
name['display_name'] = info['DisplayName']
if len(name) == 0:
return None
else:
return name
@staticmethod
def get_contact_prefer_name(name):
if name is None:
return None
if 'remark_name' in name:
return name['remark_name']
if 'nickname' in name:
return name['nickname']
if 'display_name' in name:
return name['display_name']
return None
@staticmethod
def get_group_member_prefer_name(name):
if name is None:
return None
if 'remark_name' in name:
return name['remark_name']
if 'display_name' in name:
return name['display_name']
if 'nickname' in name:
return name['nickname']
return None
def get_user_type(self, wx_user_id):
"""
获取特定账号与自己的关系
:param wx_user_id: 账号id:
:return: 与当前账号的关系
"""
for account in self.contact_list:
if wx_user_id == account['UserName']:
return 'contact'
for account in self.public_list:
if wx_user_id == account['UserName']:
return 'public'
for account in self.special_list:
if wx_user_id == account['UserName']:
return 'special'
for account in self.group_list:
if wx_user_id == account['UserName']:
return 'group'
for group in self.group_members:
for member in self.group_members[group]:
if member['UserName'] == wx_user_id:
return 'group_member'
return 'unknown'
def is_contact(self, uid):
for account in self.contact_list:
if uid == account['UserName']:
return True
return False
def is_public(self, uid):
for account in self.public_list:
if uid == account['UserName']:
return True
return False
def is_special(self, uid):
for account in self.special_list:
if uid == account['UserName']:
return True
return False
def handle_msg_all(self, msg):
"""
处理所有消息,请子类化后覆盖此函数
msg:
msg_id -> 消息id
msg_type_id -> 消息类型id
user -> 发送消息的账号id
content -> 消息内容
:param msg: 收到的消息
"""
pass
@staticmethod
def proc_at_info(msg):
if not msg:
return '', []
segs = msg.split(u'\u2005')
str_msg_all = ''
str_msg = ''
infos = []
if len(segs) > 1:
for i in range(0, len(segs) - 1):
segs[i] += u'\u2005'
pm = re.search(u'@.*\u2005', segs[i]).group()
if pm:
name = pm[1:-1]
string = segs[i].replace(pm, '')
str_msg_all += string + '@' + name + ' '
str_msg += string
if string:
infos.append({'type': 'str', 'value': string})
infos.append({'type': 'at', 'value': name})
else:
infos.append({'type': 'str', 'value': segs[i]})
str_msg_all += segs[i]
str_msg += segs[i]
str_msg_all += segs[-1]
str_msg += segs[-1]
infos.append({'type': 'str', 'value': segs[-1]})
else:
infos.append({'type': 'str', 'value': segs[-1]})
str_msg_all = msg
str_msg = msg
return str_msg_all.replace(u'\u2005', ''), str_msg.replace(u'\u2005', ''), infos
def extract_msg_content(self, msg_type_id, msg):
"""
content_type_id:
0 -> Text
1 -> Location
3 -> Image
4 -> Voice
5 -> Recommend
6 -> Animation
7 -> Share
8 -> Video
9 -> VideoCall
10 -> Redraw
11 -> Empty
99 -> Unknown
:param msg_type_id: 消息类型id
:param msg: 消息结构体
:return: 解析的消息
"""
mtype = msg['MsgType']
content = HTMLParser.HTMLParser().unescape(msg['Content'])
msg_id = msg['MsgId']
msg_content = {}
if msg_type_id == 0:
return {'type': 11, 'data': ''}
elif msg_type_id == 2: # File Helper
return {'type': 0, 'data': content.replace('<br/>', '\n')}
elif msg_type_id == 3: # 群聊
sp = content.find('<br/>')
uid = content[:sp]
content = content[sp:]
content = content.replace('<br/>', '')
uid = uid[:-1]
name = self.get_contact_prefer_name(self.get_contact_name(uid))
if not name:
name = self.get_group_member_prefer_name(self.get_group_member_name(msg['FromUserName'], uid))
if not name:
name = 'unknown'
msg_content['user'] = {'id': uid, 'name': name}
else: # Self, Contact, Special, Public, Unknown
pass
msg_prefix = (msg_content['user']['name'] + ':') if 'user' in msg_content else ''
if mtype == 1:
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
r = self.session.get(content)
r.encoding = 'gbk'
data = r.text
pos = self.search_content('title', data, 'xml')
msg_content['type'] = 1
msg_content['data'] = pos
msg_content['detail'] = data
if self.DEBUG:
print ' %s[Location] %s ' % (msg_prefix, pos)
else:
msg_content['type'] = 0
if msg_type_id == 3 or (msg_type_id == 1 and msg['ToUserName'][:2] == '@@'): # Group text message
msg_infos = self.proc_at_info(content)
str_msg_all = msg_infos[0]
str_msg = msg_infos[1]
detail = msg_infos[2]
msg_content['data'] = str_msg_all
msg_content['detail'] = detail
msg_content['desc'] = str_msg
else:
msg_content['data'] = content
if self.DEBUG:
try:
print ' %s[Text] %s' % (msg_prefix, msg_content['data'])
except UnicodeEncodeError:
print ' %s[Text] (illegal text).' % msg_prefix
elif mtype == 3:
msg_content['type'] = 3
msg_content['data'] = self.get_msg_img_url(msg_id)
msg_content['img'] = self.session.get(msg_content['data']).content.encode('hex')
if self.DEBUG:
image = self.get_msg_img(msg_id)
print ' %s[Image] %s' % (msg_prefix, image)
elif mtype == 34:
msg_content['type'] = 4
msg_content['data'] = self.get_voice_url(msg_id)
msg_content['voice'] = self.session.get(msg_content['data']).content.encode('hex')
if self.DEBUG:
voice = self.get_voice(msg_id)
print ' %s[Voice] %s' % (msg_prefix, voice)
elif mtype == 37:
msg_content['type'] = 37
msg_content['data'] = msg['RecommendInfo']
if self.DEBUG:
print ' %s[useradd] %s' % (msg_prefix,msg['RecommendInfo']['NickName'])
elif mtype == 42:
msg_content['type'] = 5
info = msg['RecommendInfo']
msg_content['data'] = {'nickname': info['NickName'],
'alias': info['Alias'],
'province': info['Province'],
'city': info['City'],
'gender': ['unknown', 'male', 'female'][info['Sex']]}
if self.DEBUG:
print ' %s[Recommend]' % msg_prefix
print ' -----------------------------'
print ' | NickName: %s' % info['NickName']
print ' | Alias: %s' % info['Alias']
print ' | Local: %s %s' % (info['Province'], info['City'])
print ' | Gender: %s' % ['unknown', 'male', 'female'][info['Sex']]
print ' -----------------------------'
elif mtype == 47:
msg_content['type'] = 6
msg_content['data'] = self.search_content('cdnurl', content)
if self.DEBUG:
print ' %s[Animation] %s' % (msg_prefix, msg_content['data'])
elif mtype == 49:
msg_content['type'] = 7
if msg['AppMsgType'] == 3:
app_msg_type = 'music'
elif msg['AppMsgType'] == 5:
app_msg_type = 'link'
elif msg['AppMsgType'] == 7:
app_msg_type = 'weibo'
else:
app_msg_type = 'unknown'
msg_content['data'] = {'type': app_msg_type,
'title': msg['FileName'],
'desc': self.search_content('des', content, 'xml'),
'url': msg['Url'],
'from': self.search_content('appname', content, 'xml'),
'content': msg.get('Content') # 有的公众号会发一次性3 4条链接一个大图,如果只url那只能获取第一条,content里面有所有的链接
}
if self.DEBUG:
print ' %s[Share] %s' % (msg_prefix, app_msg_type)
print ' --------------------------'
print ' | title: %s' % msg['FileName']
print ' | desc: %s' % self.search_content('des', content, 'xml')
print ' | link: %s' % msg['Url']
print ' | from: %s' % self.search_content('appname', content, 'xml')
print ' | content: %s' % (msg.get('content')[:20] if msg.get('content') else "unknown")
print ' --------------------------'
elif mtype == 62:
msg_content['type'] = 8
msg_content['data'] = content
if self.DEBUG:
print ' %s[Video] Please check on mobiles' % msg_prefix
elif mtype == 53:
msg_content['type'] = 9
msg_content['data'] = content
if self.DEBUG:
print ' %s[Video Call]' % msg_prefix
elif mtype == 10002:
msg_content['type'] = 10
msg_content['data'] = content
if self.DEBUG:
print ' %s[Redraw]' % msg_prefix
elif mtype == 10000: # unknown, maybe red packet, or group invite
msg_content['type'] = 12
msg_content['data'] = msg['Content']
if self.DEBUG:
print ' [Unknown]'
else:
msg_content['type'] = 99
msg_content['data'] = content
if self.DEBUG:
print ' %s[Unknown]' % msg_prefix
return msg_content
def handle_msg(self, r):
"""
处理原始微信消息的内部函数
msg_type_id:
0 -> Init
1 -> Self
2 -> FileHelper
3 -> Group
4 -> Contact
5 -> Public
6 -> Special
99 -> Unknown
:param r: 原始微信消息
"""
for msg in r['AddMsgList']:
user = {'id': msg['FromUserName'], 'name': 'unknown'}
if msg['MsgType'] == 51: # init message
msg_type_id = 0
user['name'] = 'system'
elif msg['MsgType'] == 37: # friend request
msg_type_id = 37
pass
# content = msg['Content']
# username = content[content.index('fromusername='): content.index('encryptusername')]
# username = username[username.index('"') + 1: username.rindex('"')]
# print u'[Friend Request]'
# print u' Nickname:' + msg['RecommendInfo']['NickName']
# print u' 附加消息:'+msg['RecommendInfo']['Content']
# # print u'Ticket:'+msg['RecommendInfo']['Ticket'] # Ticket添加好友时要用
# print u' 微信号:'+username #未设置微信号的 腾讯会自动生成一段微信ID 但是无法通过搜索 搜索到此人
elif msg['FromUserName'] == self.my_account['UserName']: # Self
msg_type_id = 1
user['name'] = 'self'
elif msg['ToUserName'] == 'filehelper': # File Helper
msg_type_id = 2
user['name'] = 'file_helper'
elif msg['FromUserName'][:2] == '@@': # Group
msg_type_id = 3
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_contact(msg['FromUserName']): # Contact
msg_type_id = 4
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_public(msg['FromUserName']): # Public
msg_type_id = 5
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
elif self.is_special(msg['FromUserName']): # Special
msg_type_id = 6
user['name'] = self.get_contact_prefer_name(self.get_contact_name(user['id']))
else:
msg_type_id = 99
user['name'] = 'unknown'
if not user['name']:
user['name'] = 'unknown'
user['name'] = HTMLParser.HTMLParser().unescape(user['name'])
if msg.has_key('CreateTime'):
CreateTime = msg['CreateTime']
if self.DEBUG and msg_type_id != 0:
print u'[MSG] %s:' % user['name']
content = self.extract_msg_content(msg_type_id, msg)
message = {'msg_type_id': msg_type_id,
'msg_id': msg['MsgId'],
'content': content,
'to_user_id': msg['ToUserName'],
'user': user,
'time':CreateTime}
self.handle_msg_all(message)
def schedule(self):
"""
做任务型事情的函数,如果需要,可以在子类中覆盖此函数
此函数在处理消息的间隙被调用,请不要长时间阻塞此函数
"""
pass
def proc_msg(self):
self.test_sync_check()
while True:
check_time = time.time()
try:
[retcode, selector] = self.sync_check()
# print '[DEBUG] sync_check:', retcode, selector
if retcode == '1100': # 从微信客户端上登出
break
elif retcode == '1101': # 从其它设备上登了网页微信
break
elif retcode == '0':
if selector == '2': # 有新消息
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '3': # 未知
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '4': # 通讯录更新
r = self.sync()
if r is not None:
self.get_contact()
elif selector == '6': # 可能是红包
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '7': # 在手机上操作了微信
r = self.sync()
if r is not None:
self.handle_msg(r)
elif selector == '0': # 无事件
pass
else:
print '[DEBUG] sync_check:', retcode, selector
r = self.sync()
if r is not None:
self.handle_msg(r)
else:
print '[DEBUG] sync_check:', retcode, selector
time.sleep(10)
self.schedule()
except:
print '[ERROR] Except in proc_msg'
print format_exc()
check_time = time.time() - check_time
if check_time < 0.8:
time.sleep(1 - check_time)
def apply_useradd_requests(self,RecommendInfo):
url = self.base_uri + '/webwxverifyuser?r='+str(int(time.time()))+'&lang=zh_CN'
params = {
"BaseRequest": self.base_request,
"Opcode": 3,
"VerifyUserListSize": 1,
"VerifyUserList": [
{
"Value": RecommendInfo['UserName'],
"VerifyUserTicket": RecommendInfo['Ticket'] }
],
"VerifyContent": "",
"SceneListCount": 1,
"SceneList": [
33
],
"skey": self.skey
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def add_groupuser_to_friend_by_uid(self,uid,VerifyContent):
"""
主动向群内人员打招呼,提交添加好友请求
uid-群内人员得uid VerifyContent-好友招呼内容
慎用此接口!封号后果自负!慎用此接口!封号后果自负!慎用此接口!封号后果自负!
"""
if self.is_contact(uid):
return True
url = self.base_uri + '/webwxverifyuser?r='+str(int(time.time()))+'&lang=zh_CN'
params ={
"BaseRequest": self.base_request,
"Opcode": 2,
"VerifyUserListSize": 1,
"VerifyUserList": [
{
"Value": uid,
"VerifyUserTicket": ""
}
],
"VerifyContent": VerifyContent,
"SceneListCount": 1,
"SceneList": [
33
],
"skey": self.skey
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def add_friend_to_group(self,uid,group_name):
"""
将好友加入到群聊中
"""
gid = ''
#通过群名获取群id,群没保存到通讯录中的话无法添加哦
for group in self.group_list:
if group['NickName'] == group_name:
gid = group['UserName']
if gid == '':
return False
#通过群id判断uid是否在群中
for user in self.group_members[gid]:
if user['UserName'] == uid:
#已经在群里面了,不用加了
return True
url = self.base_uri + '/webwxupdatechatroom?fun=addmember&pass_ticket=%s' % self.pass_ticket
params ={
"AddMemberList": uid,
"ChatRoomName": gid,
"BaseRequest": self.base_request
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def delete_user_from_group(self,uname,gid):
"""
将群用户从群中剔除,只有群管理员有权限
"""
uid = ""
for user in self.group_members[gid]:
if user['NickName'] == uname:
uid = user['UserName']
if uid == "":
return False
url = self.base_uri + '/webwxupdatechatroom?fun=delmember&pass_ticket=%s' % self.pass_ticket
params ={
"DelMemberList": uid,
"ChatRoomName": gid,
"BaseRequest": self.base_request
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def set_group_name(self,gid,gname):
"""
设置群聊名称
"""
url = self.base_uri + '/webwxupdatechatroom?fun=modtopic&pass_ticket=%s' % self.pass_ticket
params ={
"NewTopic": gname,
"ChatRoomName": gid,
"BaseRequest": self.base_request
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def send_msg_by_uid(self, word, dst='filehelper'):
url = self.base_uri + '/webwxsendmsg?pass_ticket=%s' % self.pass_ticket
msg_id = str(int(time.time() * 1000)) + str(random.random())[:5].replace('.', '')
word = self.to_unicode(word)
params = {
'BaseRequest': self.base_request,
'Msg': {
"Type": 1,
"Content": word,
"FromUserName": self.my_account['UserName'],
"ToUserName": dst,
"LocalID": msg_id,
"ClientMsgId": msg_id
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
try:
r = self.session.post(url, data=data, headers=headers)
except (ConnectionError, ReadTimeout):
return False
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def upload_media(self, fpath, is_img=False):
if not os.path.exists(fpath):
print '[ERROR] File not exists.'
return None
url_1 = 'https://file.'+self.base_host+'/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
url_2 = 'https://file2.'+self.base_host+'/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
flen = str(os.path.getsize(fpath))
ftype = mimetypes.guess_type(fpath)[0] or 'application/octet-stream'
files = {
'id': (None, 'WU_FILE_%s' % str(self.file_index)),
'name': (None, os.path.basename(fpath)),
'type': (None, ftype),
'lastModifiedDate': (None, time.strftime('%m/%d/%Y, %H:%M:%S GMT+0800 (CST)')),
'size': (None, flen),
'mediatype': (None, 'pic' if is_img else 'doc'),
'uploadmediarequest': (None, json.dumps({
'BaseRequest': self.base_request,
'ClientMediaId': int(time.time()),
'TotalLen': flen,
'StartPos': 0,
'DataLen': flen,
'MediaType': 4,
})),
'webwx_data_ticket': (None, self.session.cookies['webwx_data_ticket']),
'pass_ticket': (None, self.pass_ticket),
'filename': (os.path.basename(fpath), open(fpath, 'rb'),ftype.split('/')[1]),
}
self.file_index += 1
try:
r = self.session.post(url_1, files=files)
if json.loads(r.text)['BaseResponse']['Ret'] != 0:
# 当file返回值不为0时则为上传失败,尝试第二服务器上传
r = self.session.post(url_2, files=files)
if json.loads(r.text)['BaseResponse']['Ret'] != 0:
print '[ERROR] Upload media failure.'
return None
mid = json.loads(r.text)['MediaId']
return mid
except Exception,e:
return None
def send_file_msg_by_uid(self, fpath, uid):
mid = self.upload_media(fpath)
if mid is None or not mid:
return False
url = self.base_uri + '/webwxsendappmsg?fun=async&f=json&pass_ticket=' + self.pass_ticket
msg_id = str(int(time.time() * 1000)) + str(random.random())[:5].replace('.', '')
data = {
'BaseRequest': self.base_request,
'Msg': {
'Type': 6,
'Content': ("<appmsg appid='wxeb7ec651dd0aefa9' sdkver=''><title>%s</title><des></des><action></action><type>6</type><content></content><url></url><lowurl></lowurl><appattach><totallen>%s</totallen><attachid>%s</attachid><fileext>%s</fileext></appattach><extinfo></extinfo></appmsg>" % (os.path.basename(fpath).encode('utf-8'), str(os.path.getsize(fpath)), mid, fpath.split('.')[-1])).encode('utf8'),
'FromUserName': self.my_account['UserName'],
'ToUserName': uid,
'LocalID': msg_id,
'ClientMsgId': msg_id, }, }
try:
r = self.session.post(url, data=json.dumps(data))
res = json.loads(r.text)
if res['BaseResponse']['Ret'] == 0:
return True
else:
return False
except Exception,e:
return False
def send_img_msg_by_uid(self, fpath, uid):
mid = self.upload_media(fpath, is_img=True)
if mid is None:
return False
url = self.base_uri + '/webwxsendmsgimg?fun=async&f=json'
data = {
'BaseRequest': self.base_request,
'Msg': {
'Type': 3,
'MediaId': mid,
'FromUserName': self.my_account['UserName'],
'ToUserName': uid,
'LocalID': str(time.time() * 1e7),
'ClientMsgId': str(time.time() * 1e7), }, }
if fpath[-4:] == '.gif':
url = self.base_uri + '/webwxsendemoticon?fun=sys'
data['Msg']['Type'] = 47
data['Msg']['EmojiFlag'] = 2
try:
r = self.session.post(url, data=json.dumps(data))
res = json.loads(r.text)
if res['BaseResponse']['Ret'] == 0:
return True
else:
return False
except Exception,e:
return False
def get_user_id(self, name):
if name == '':
return None
name = self.to_unicode(name)
for contact in self.contact_list:
if 'RemarkName' in contact and contact['RemarkName'] == name:
return contact['UserName']
elif 'NickName' in contact and contact['NickName'] == name:
return contact['UserName']
elif 'DisplayName' in contact and contact['DisplayName'] == name:
return contact['UserName']
for group in self.group_list:
if 'RemarkName' in group and group['RemarkName'] == name:
return group['UserName']
if 'NickName' in group and group['NickName'] == name:
return group['UserName']
if 'DisplayName' in group and group['DisplayName'] == name:
return group['UserName']
return ''
def send_msg(self, name, word, isfile=False):
uid = self.get_user_id(name)
if uid is not None:
if isfile:
with open(word, 'r') as f:
result = True
for line in f.readlines():
line = line.replace('\n', '')
print '-> ' + name + ': ' + line
if self.send_msg_by_uid(line, uid):
pass
else:
result = False
time.sleep(1)
return result
else:
word = self.to_unicode(word)
if self.send_msg_by_uid(word, uid):
return True
else:
return False
else:
if self.DEBUG:
print '[ERROR] This user does not exist .'
return True
@staticmethod
def search_content(key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if pm:
return pm.group(1)
return 'unknown'
def run(self):
self.get_uuid()
self.gen_qr_code(os.path.join(self.temp_pwd,'wxqr.png'))
print '[INFO] Please use WeChat to scan the QR code .'
result = self.wait4login()
if result != SUCCESS:
print '[ERROR] Web WeChat login failed. failed code=%s' % (result,)
return
if self.login():
print '[INFO] Web WeChat login succeed .'
else:
print '[ERROR] Web WeChat login failed .'
return
if self.init():
print '[INFO] Web WeChat init succeed .'
else:
print '[INFO] Web WeChat init failed'
return
self.status_notify()
self.get_contact()
print '[INFO] Get %d contacts' % len(self.contact_list)
print '[INFO] Start to process messages .'
self.proc_msg()
def get_uuid(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
'lang': 'zh_CN',
'_': int(time.time()) * 1000 + random.randint(1, 999),
}
r = self.session.get(url, params=params)
r.encoding = 'utf-8'
data = r.text
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def gen_qr_code(self, qr_file_path):
string = 'https://login.weixin.qq.com/l/' + self.uuid
qr = pyqrcode.create(string)
if self.conf['qr'] == 'png':
qr.png(qr_file_path, scale=8)
show_image(qr_file_path)
# img = Image.open(qr_file_path)
# img.show()
elif self.conf['qr'] == 'tty':
print(qr.terminal(quiet_zone=1))
def do_request(self, url):
r = self.session.get(url)
r.encoding = 'utf-8'
data = r.text
param = re.search(r'window.code=(\d+);', data)
code = param.group(1)
return code, data
def wait4login(self):
"""
http comet:
tip=1, 等待用户扫描二维码,
201: scaned
408: timeout
tip=0, 等待用户确认登录,
200: confirmed
"""
LOGIN_TEMPLATE = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s'
tip = 1
try_later_secs = 1
MAX_RETRY_TIMES = 10
code = UNKONWN
retry_time = MAX_RETRY_TIMES
while retry_time > 0:
url = LOGIN_TEMPLATE % (tip, self.uuid, int(time.time()))
code, data = self.do_request(url)
if code == SCANED:
print '[INFO] Please confirm to login .'
tip = 0
elif code == SUCCESS: # 确认登录成功
param = re.search(r'window.redirect_uri="(\S+?)";', data)
redirect_uri = param.group(1) + '&fun=new'
self.redirect_uri = redirect_uri
self.base_uri = redirect_uri[:redirect_uri.rfind('/')]
temp_host = self.base_uri[8:]
self.base_host = temp_host[:temp_host.find("/")]
return code
elif code == TIMEOUT:
print '[ERROR] WeChat login timeout. retry in %s secs later...' % (try_later_secs,)
tip = 1 # 重置
retry_time -= 1
time.sleep(try_later_secs)
else:
print ('[ERROR] WeChat login exception return_code=%s. retry in %s secs later...' %
(code, try_later_secs))
tip = 1
retry_time -= 1
time.sleep(try_later_secs)
return code
def login(self):
if len(self.redirect_uri) < 4:
print '[ERROR] Login failed due to network problem, please try again.'
return False
r = self.session.get(self.redirect_uri)
r.encoding = 'utf-8'
data = r.text
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.base_request = {
'Uin': self.uin,
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.device_id,
}
return True
def init(self):
url = self.base_uri + '/webwxinit?r=%i&lang=en_US&pass_ticket=%s' % (int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.base_request
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
self.sync_key = dic['SyncKey']
self.my_account = dic['User']
self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val'])
for keyVal in self.sync_key['List']])
return dic['BaseResponse']['Ret'] == 0
def status_notify(self):
url = self.base_uri + '/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % self.pass_ticket
self.base_request['Uin'] = int(self.base_request['Uin'])
params = {
'BaseRequest': self.base_request,
"Code": 3,
"FromUserName": self.my_account['UserName'],
"ToUserName": self.my_account['UserName'],
"ClientMsgId": int(time.time())
}
r = self.session.post(url, data=json.dumps(params))
r.encoding = 'utf-8'
dic = json.loads(r.text)
return dic['BaseResponse']['Ret'] == 0
def test_sync_check(self):
for host1 in ['webpush.', 'webpush2.']:
self.sync_host = host1+self.base_host
try:
retcode = self.sync_check()[0]
except:
retcode = -1
if retcode == '0':
return True
return False
def sync_check(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.device_id,
'synckey': self.sync_key_str,
'_': int(time.time()),
}
url = 'https://' + self.sync_host + '/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
try:
r = self.session.get(url, timeout=60)
r.encoding = 'utf-8'
data = r.text
pm = re.search(r'window.synccheck=\{retcode:"(\d+)",selector:"(\d+)"\}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
except:
return [-1, -1]
def sync(self):
url = self.base_uri + '/webwxsync?sid=%s&skey=%s&lang=en_US&pass_ticket=%s' \
% (self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.base_request,
'SyncKey': self.sync_key,
'rr': ~int(time.time())
}
try:
r = self.session.post(url, data=json.dumps(params), timeout=60)
r.encoding = 'utf-8'
dic = json.loads(r.text)
if dic['BaseResponse']['Ret'] == 0:
self.sync_key = dic['SyncKey']
self.sync_key_str = '|'.join([str(keyVal['Key']) + '_' + str(keyVal['Val'])
for keyVal in self.sync_key['List']])
return dic
except:
return None
def get_icon(self, uid, gid=None):
"""
获取联系人或者群聊成员头像
:param uid: 联系人id
:param gid: 群id,如果为非None获取群中成员头像,如果为None则获取联系人头像
"""
if gid is None:
url = self.base_uri + '/webwxgeticon?username=%s&skey=%s' % (uid, self.skey)
else:
url = self.base_uri + '/webwxgeticon?username=%s&skey=%s&chatroomid=%s' % (
uid, self.skey, self.encry_chat_room_id_list[gid])
r = self.session.get(url)
data = r.content
fn = 'icon_' + uid + '.jpg'
with open(os.path.join(self.temp_pwd,fn), 'wb') as f:
f.write(data)
return fn
def get_head_img(self, uid):
"""
获取群头像
:param uid: 群uid
"""
url = self.base_uri + '/webwxgetheadimg?username=%s&skey=%s' % (uid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'head_' + uid + '.jpg'
with open(os.path.join(self.temp_pwd,fn), 'wb') as f:
f.write(data)
return fn
def get_msg_img_url(self, msgid):
return self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
def get_msg_img(self, msgid):
"""
获取图片消息,下载图片到本地
:param msgid: 消息id
:return: 保存的本地图片文件路径
"""
url = self.base_uri + '/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'img_' + msgid + '.jpg'
with open(os.path.join(self.temp_pwd,fn), 'wb') as f:
f.write(data)
return fn
def get_voice_url(self, msgid):
return self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
def get_voice(self, msgid):
"""
获取语音消息,下载语音到本地
:param msgid: 语音消息id
:return: 保存的本地语音文件路径
"""
url = self.base_uri + '/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
r = self.session.get(url)
data = r.content
fn = 'voice_' + msgid + '.mp3'
with open(os.path.join(self.temp_pwd,fn), 'wb') as f:
f.write(data)
return fn
def set_remarkname(self,uid,remarkname):#设置联系人的备注名
url = self.base_uri + '/webwxoplog?lang=zh_CN&pass_ticket=%s' \
% (self.pass_ticket)
remarkname = self.to_unicode(remarkname)
params = {
'BaseRequest': self.base_request,
'CmdId': 2,
'RemarkName': remarkname,
'UserName': uid
}
try:
r = self.session.post(url, data=json.dumps(params), timeout=60)
r.encoding = 'utf-8'
dic = json.loads(r.text)
return dic['BaseResponse']['ErrMsg']
except:
return None
| apache-2.0 | -1,308,928,200,595,814,400 | 37.699608 | 420 | 0.485834 | false | 3.582256 | false | false | false |
tdyas/pants | src/python/pants/testutil/test_base.py | 1 | 33698 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import logging
import os
import unittest
import warnings
from abc import ABC, ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import contextmanager
from tempfile import mkdtemp
from textwrap import dedent
from typing import Any, Dict, Iterable, List, Optional, Sequence, Type, TypeVar, Union, cast
from pants.base.build_root import BuildRoot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.exceptions import TaskError
from pants.base.specs import AddressSpec, AddressSpecs, FilesystemSpecs, Specs
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target as TargetV1
from pants.engine.fs import GlobMatchErrorBehavior, PathGlobs, PathGlobsAndRoot, Snapshot
from pants.engine.internals.scheduler import SchedulerSession
from pants.engine.legacy.graph import HydratedField
from pants.engine.legacy.structs import SourceGlobs, SourcesField
from pants.engine.rules import RootRule
from pants.engine.selectors import Params
from pants.engine.target import Target
from pants.init.engine_initializer import EngineInitializer
from pants.init.util import clean_global_runtime_state
from pants.option.global_options import BuildFileImportsBehavior
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.source.source_root import SourceRootConfig
from pants.source.wrapped_globs import EagerFilesetWithSpec
from pants.subsystem.subsystem import Subsystem
from pants.task.goal_options_mixin import GoalOptionsMixin
from pants.testutil.base.context_utils import create_context_from_options
from pants.testutil.engine.util import init_native
from pants.testutil.option.fakes import create_options_for_optionables
from pants.testutil.subsystem import util as subsystem_util
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import (
recursive_dirname,
relative_symlink,
safe_file_dump,
safe_mkdir,
safe_mkdtemp,
safe_open,
safe_rmtree,
)
from pants.util.memo import memoized_method
from pants.util.meta import classproperty
class AbstractTestGenerator(ABC):
"""A mixin that facilitates test generation at runtime."""
@classmethod
@abstractmethod
def generate_tests(cls):
"""Generate tests for a given class.
This should be called against the composing class in its defining module, e.g.
class ThingTest(TestGenerator):
...
ThingTest.generate_tests()
"""
@classmethod
def add_test(cls, method_name, method):
"""A classmethod that adds dynamic test methods to a given class.
:param string method_name: The name of the test method (e.g. `test_thing_x`).
:param callable method: A callable representing the method. This should take a 'self' argument
as its first parameter for instance method binding.
"""
assert not hasattr(
cls, method_name
), f"a test with name `{method_name}` already exists on `{cls.__name__}`!"
assert method_name.startswith("test_"), f"{method_name} is not a valid test name!"
setattr(cls, method_name, method)
class TestBase(unittest.TestCase, metaclass=ABCMeta):
"""A baseclass useful for tests requiring a temporary buildroot.
:API: public
"""
_scheduler: Optional[SchedulerSession] = None
_build_graph = None
_address_mapper = None
def build_path(self, relpath):
"""Returns the canonical BUILD file path for the given relative build path.
:API: public
"""
if os.path.basename(relpath).startswith("BUILD"):
return relpath
else:
return os.path.join(relpath, "BUILD")
def create_dir(self, relpath):
"""Creates a directory under the buildroot.
:API: public
relpath: The relative path to the directory from the build root.
"""
path = os.path.join(self.build_root, relpath)
safe_mkdir(path)
self.invalidate_for(relpath)
return path
def create_workdir_dir(self, relpath):
"""Creates a directory under the work directory.
:API: public
relpath: The relative path to the directory from the work directory.
"""
path = os.path.join(self.pants_workdir, relpath)
safe_mkdir(path)
self.invalidate_for(relpath)
return path
def invalidate_for(self, *relpaths):
"""Invalidates all files from the relpath, recursively up to the root.
Many python operations implicitly create parent directories, so we assume that touching a
file located below directories that do not currently exist will result in their creation.
"""
if self._scheduler is None:
return
files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
return self._scheduler.invalidate_files(files)
def create_link(self, relsrc, reldst):
"""Creates a symlink within the buildroot.
:API: public
relsrc: A relative path for the source of the link.
reldst: A relative path for the destination of the link.
"""
src = os.path.join(self.build_root, relsrc)
dst = os.path.join(self.build_root, reldst)
relative_symlink(src, dst)
self.invalidate_for(reldst)
def create_file(self, relpath, contents="", mode="w"):
"""Writes to a file under the buildroot.
:API: public
relpath: The relative path to the file from the build root.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.build_root, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
self.invalidate_for(relpath)
return path
def create_files(self, path, files):
"""Writes to a file under the buildroot with contents same as file name.
:API: public
path: The relative path to the file from the build root.
files: List of file names.
"""
for f in files:
self.create_file(os.path.join(path, f), contents=f)
def create_workdir_file(self, relpath, contents="", mode="w"):
"""Writes to a file under the work directory.
:API: public
relpath: The relative path to the file from the work directory.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.pants_workdir, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
return path
def add_to_build_file(self, relpath, target):
"""Adds the given target specification to the BUILD file at relpath.
:API: public
relpath: The relative path to the BUILD file from the build root.
target: A string containing the target definition as it would appear in a BUILD file.
"""
self.create_file(self.build_path(relpath), target, mode="a")
def make_target(
self,
spec="",
target_type=TargetV1,
dependencies=None,
derived_from=None,
synthetic=False,
make_missing_sources=True,
**kwargs,
):
"""Creates a target and injects it into the test's build graph.
:API: public
:param string spec: The target address spec that locates this target.
:param type target_type: The concrete target subclass to create this new target from.
:param list dependencies: A list of target instances this new target depends on.
:param derived_from: The target this new target was derived from.
:type derived_from: :class:`pants.build_graph.target.Target`
"""
self._init_target_subsystem()
address = Address.parse(spec)
if make_missing_sources and "sources" in kwargs:
for source in kwargs["sources"]:
if "*" not in source:
self.create_file(os.path.join(address.spec_path, source), mode="a", contents="")
kwargs["sources"] = self.sources_for(kwargs["sources"], address.spec_path)
target = target_type(
name=address.target_name, address=address, build_graph=self.build_graph, **kwargs
)
dependencies = dependencies or []
self.build_graph.apply_injectables([target])
self.build_graph.inject_target(
target,
dependencies=[dep.address for dep in dependencies],
derived_from=derived_from,
synthetic=synthetic,
)
# TODO(John Sirois): This re-creates a little bit too much work done by the BuildGraph.
# Fixup the BuildGraph to deal with non BuildFileAddresses better and just leverage it.
traversables = [target.compute_dependency_address_specs(payload=target.payload)]
for dependency_spec in itertools.chain(*traversables):
dependency_address = Address.parse(dependency_spec, relative_to=address.spec_path)
dependency_target = self.build_graph.get_target(dependency_address)
if not dependency_target:
raise ValueError(
"Tests must make targets for dependency specs ahead of them "
"being traversed, {} tried to traverse {} which does not exist.".format(
target, dependency_address
)
)
if dependency_target not in target.dependencies:
self.build_graph.inject_dependency(
dependent=target.address, dependency=dependency_address
)
target.mark_transitive_invalidation_hash_dirty()
return target
def sources_for(
self, package_relative_path_globs: List[str], package_dir: str = "",
) -> EagerFilesetWithSpec:
sources_field = SourcesField(
address=BuildFileAddress(
rel_path=os.path.join(package_dir, "BUILD"), target_name="_bogus_target_for_test",
),
arg="sources",
source_globs=SourceGlobs(*package_relative_path_globs),
)
field = self.scheduler.product_request(HydratedField, [sources_field])[0]
return cast(EagerFilesetWithSpec, field.value)
@classmethod
def alias_groups(cls):
"""
:API: public
"""
return BuildFileAliases(targets={"target": TargetV1})
@classmethod
def rules(cls):
# Required for sources_for:
return [RootRule(SourcesField)]
@classmethod
def target_types(cls) -> Sequence[Type[Target]]:
return ()
@classmethod
def build_config(cls):
build_config = BuildConfiguration()
build_config.register_aliases(cls.alias_groups())
build_config.register_rules(cls.rules())
build_config.register_target_types(cls.target_types())
return build_config
def setUp(self):
"""
:API: public
"""
super().setUp()
# Avoid resetting the Runtracker here, as that is specific to fork'd process cleanup.
clean_global_runtime_state(reset_subsystem=True)
self.addCleanup(self._reset_engine)
safe_mkdir(self.build_root, clean=True)
safe_mkdir(self.pants_workdir)
self.addCleanup(safe_rmtree, self.build_root)
BuildRoot().path = self.build_root
self.addCleanup(BuildRoot().reset)
self.subprocess_dir = os.path.join(self.build_root, ".pids")
self.options = defaultdict(dict) # scope -> key-value mapping.
self.options[""] = {
"pants_workdir": self.pants_workdir,
"pants_supportdir": os.path.join(self.build_root, "build-support"),
"pants_distdir": os.path.join(self.build_root, "dist"),
"pants_configdir": os.path.join(self.build_root, "config"),
"pants_subprocessdir": self.subprocess_dir,
"cache_key_gen_version": "0-test",
}
self.options["cache"] = {
"read_from": [],
"write_to": [],
}
self._build_configuration = self.build_config()
self._inited_target = False
subsystem_util.init_subsystem(TargetV1.TagAssignments)
def buildroot_files(self, relpath=None):
"""Returns the set of all files under the test build root.
:API: public
:param string relpath: If supplied, only collect files from this subtree.
:returns: All file paths found.
:rtype: set
"""
def scan():
for root, dirs, files in os.walk(os.path.join(self.build_root, relpath or "")):
for f in files:
yield os.path.relpath(os.path.join(root, f), self.build_root)
return set(scan())
def _reset_engine(self):
if self._scheduler is not None:
self._build_graph.reset()
self._scheduler.invalidate_all_files()
@contextmanager
def isolated_local_store(self):
"""Temporarily use an anonymous, empty Store for the Scheduler.
In most cases we re-use a Store across all tests, since `file` and `directory` entries are
content addressed, and `process` entries are intended to have strong cache keys. But when
dealing with non-referentially transparent `process` executions, it can sometimes be
necessary to avoid this cache.
"""
self._scheduler = None
local_store_dir = os.path.realpath(safe_mkdtemp())
self._init_engine(local_store_dir=local_store_dir)
try:
yield
finally:
self._scheduler = None
safe_rmtree(local_store_dir)
@property
def build_root(self):
return self._build_root()
@property
def pants_workdir(self):
return self._pants_workdir()
@memoized_method
def _build_root(self):
return os.path.realpath(mkdtemp(suffix="_BUILD_ROOT"))
@memoized_method
def _pants_workdir(self):
return os.path.join(self._build_root(), ".pants.d")
def _init_engine(self, local_store_dir: Optional[str] = None) -> None:
if self._scheduler is not None:
return
options_bootstrapper = OptionsBootstrapper.create(args=["--pants-config-files=[]"])
local_store_dir = (
local_store_dir
or options_bootstrapper.bootstrap_options.for_global_scope().local_store_dir
)
# NB: This uses the long form of initialization because it needs to directly specify
# `cls.alias_groups` rather than having them be provided by bootstrap options.
graph_session = EngineInitializer.setup_legacy_graph_extended(
pants_ignore_patterns=[],
use_gitignore=False,
local_store_dir=local_store_dir,
build_file_prelude_globs=(),
build_file_imports_behavior=BuildFileImportsBehavior.error,
glob_match_error_behavior=GlobMatchErrorBehavior.error,
native=init_native(),
options_bootstrapper=options_bootstrapper,
build_root=self.build_root,
build_configuration=self.build_config(),
build_ignore_patterns=None,
).new_session(zipkin_trace_v2=False, build_id="buildid_for_test")
self._scheduler = graph_session.scheduler_session
self._build_graph, self._address_mapper = graph_session.create_build_graph(
Specs(address_specs=AddressSpecs([]), filesystem_specs=FilesystemSpecs([])),
self._build_root(),
)
@property
def scheduler(self) -> SchedulerSession:
if self._scheduler is None:
self._init_engine()
self.post_scheduler_init()
return cast(SchedulerSession, self._scheduler)
def post_scheduler_init(self):
"""Run after initializing the Scheduler, it will have the same lifetime."""
pass
@property
def address_mapper(self):
if self._address_mapper is None:
self._init_engine()
return self._address_mapper
@property
def build_graph(self):
if self._build_graph is None:
self._init_engine()
return self._build_graph
def reset_build_graph(self, reset_build_files=False, delete_build_files=False):
"""Start over with a fresh build graph with no targets in it."""
if delete_build_files or reset_build_files:
files = [f for f in self.buildroot_files() if os.path.basename(f) == "BUILD"]
if delete_build_files:
for f in files:
os.remove(os.path.join(self.build_root, f))
self.invalidate_for(*files)
if self._build_graph is not None:
self._build_graph.reset()
_P = TypeVar("_P")
def request_single_product(
self, product_type: Type["TestBase._P"], subject: Union[Params, Any]
) -> "TestBase._P":
result = assert_single_element(self.scheduler.product_request(product_type, [subject]))
return cast(TestBase._P, result)
def set_options_for_scope(self, scope, **kwargs):
self.options[scope].update(kwargs)
def context(
self,
for_task_types=None,
for_subsystems=None,
options=None,
target_roots=None,
console_outstream=None,
workspace=None,
scheduler=None,
address_mapper=None,
**kwargs,
):
"""
:API: public
:param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
"""
# Many tests use source root functionality via the SourceRootConfig.global_instance().
# (typically accessed via Target.target_base), so we always set it up, for convenience.
for_subsystems = set(for_subsystems or ())
for subsystem in for_subsystems:
if subsystem.options_scope is None:
raise TaskError(
"You must set a scope on your subsystem type before using it in tests."
)
optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems
for_task_types = for_task_types or ()
for task_type in for_task_types:
scope = task_type.options_scope
if scope is None:
raise TaskError("You must set a scope on your task type before using it in tests.")
optionables.add(task_type)
# If task is expected to inherit goal-level options, register those directly on the task,
# by subclassing the goal options registrar and settings its scope to the task scope.
if issubclass(task_type, GoalOptionsMixin):
subclass_name = "test_{}_{}_{}".format(
task_type.__name__,
task_type.goal_options_registrar_cls.options_scope,
task_type.options_scope,
)
optionables.add(
type(
subclass_name,
(task_type.goal_options_registrar_cls,),
{"options_scope": task_type.options_scope},
)
)
# Now expand to all deps.
all_optionables = set()
for optionable in optionables:
all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos())
# Now default the option values and override with any caller-specified values.
# TODO(benjy): Get rid of the options arg, and require tests to call set_options.
options = options.copy() if options else {}
for s, opts in self.options.items():
scoped_opts = options.setdefault(s, {})
scoped_opts.update(opts)
fake_options = create_options_for_optionables(all_optionables, options=options, **kwargs)
Subsystem.reset(reset_options=True)
Subsystem.set_options(fake_options)
scheduler = scheduler or self.scheduler
address_mapper = address_mapper or self.address_mapper
context = create_context_from_options(
fake_options,
target_roots=target_roots,
build_graph=self.build_graph,
build_configuration=self._build_configuration,
address_mapper=address_mapper,
console_outstream=console_outstream,
workspace=workspace,
scheduler=scheduler,
)
return context
def tearDown(self):
"""
:API: public
"""
super().tearDown()
Subsystem.reset()
@classproperty
def subsystems(cls):
"""Initialize these subsystems when running your test.
If your test instantiates a target type that depends on any subsystems, those subsystems need to
be initialized in your test. You can override this property to return the necessary subsystem
classes.
:rtype: list of type objects, all subclasses of Subsystem
"""
return TargetV1.subsystems()
def _init_target_subsystem(self):
if not self._inited_target:
subsystem_util.init_subsystems(self.subsystems)
self._inited_target = True
def target(self, spec):
"""Resolves the given target address to a V1 Target object.
:API: public
address: The BUILD target address to resolve.
Returns the corresponding V1 Target or else None if the address does not point to a defined Target.
"""
self._init_target_subsystem()
address = Address.parse(spec)
self.build_graph.inject_address_closure(address)
return self.build_graph.get_target(address)
def targets(self, address_spec):
"""Resolves a target spec to one or more V1 Target objects.
:API: public
spec: Either BUILD target address or else a target glob using the siblings ':' or
descendants '::' suffixes.
Returns the set of all Targets found.
"""
address_spec = CmdLineSpecParser(self.build_root).parse_spec(address_spec)
assert isinstance(address_spec, AddressSpec)
targets = []
for address in self.build_graph.inject_address_specs_closure([address_spec]):
targets.append(self.build_graph.get_target(address))
return targets
def create_library(
self,
*,
path: str,
target_type: str,
name: str,
sources: Optional[List[str]] = None,
java_sources: Optional[List[str]] = None,
provides: Optional[str] = None,
dependencies: Optional[List[str]] = None,
requirements: Optional[str] = None,
):
"""Creates a library target of given type at the BUILD file at path with sources.
:API: public
path: The relative path to the BUILD file from the build root.
target_type: valid pants target type.
name: Name of the library target.
sources: List of source file at the path relative to path.
java_sources: List of java sources.
provides: Provides with a format consistent with what should be rendered in the resulting BUILD
file, eg: "artifact(org='org.pantsbuild.example', name='hello-greet', repo=public)"
dependencies: List of dependencies: [':protobuf-2.4.1']
requirements: Python requirements with a format consistent with what should be in the resulting
build file, eg: "[python_requirement(foo==1.0.0)]"
"""
if sources:
self.create_files(path, sources)
sources_str = f"sources={repr(sources)}," if sources is not None else ""
if java_sources is not None:
formatted_java_sources = ",".join(f'"{str_target}"' for str_target in java_sources)
java_sources_str = f"java_sources=[{formatted_java_sources}],"
else:
java_sources_str = ""
provides_str = f"provides={provides}," if provides is not None else ""
dependencies_str = f"dependencies={dependencies}," if dependencies is not None else ""
requirements_str = f"requirements={requirements}," if requirements is not None else ""
self.add_to_build_file(
path,
dedent(
f"""
{target_type}(name='{name}',
{sources_str}
{java_sources_str}
{provides_str}
{dependencies_str}
{requirements_str}
)
"""
),
)
return self.target(f"{path}:{name}")
def create_resources(self, path, name, *sources):
"""
:API: public
"""
return self.create_library(path=path, target_type="resources", name=name, sources=sources,)
def assertUnorderedPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, unordered.
:API: public
"""
actual = list(itertools.islice(actual_iter, len(expected)))
self.assertEqual(sorted(expected), sorted(actual))
def assertPrefixEqual(self, expected, actual_iter):
"""Consumes len(expected) items from the given iter, and asserts that they match, in order.
:API: public
"""
self.assertEqual(expected, list(itertools.islice(actual_iter, len(expected))))
def assertInFile(self, string, file_path):
"""Verifies that a string appears in a file.
:API: public
"""
with open(file_path, "r") as f:
content = f.read()
self.assertIn(string, content, f'"{string}" is not in the file {f.name}:\n{content}')
@contextmanager
def assertRaisesWithMessage(self, exception_type, error_text):
"""Verifies than an exception message is equal to `error_text`.
:param type exception_type: The exception type which is expected to be raised within the body.
:param str error_text: Text that the exception message should match exactly with
`self.assertEqual()`.
:API: public
"""
with self.assertRaises(exception_type) as cm:
yield cm
self.assertEqual(error_text, str(cm.exception))
@contextmanager
def assertRaisesWithMessageContaining(self, exception_type, error_text):
"""Verifies that the string `error_text` appears in an exception message.
:param type exception_type: The exception type which is expected to be raised within the body.
:param str error_text: Text that the exception message should contain with `self.assertIn()`.
:API: public
"""
with self.assertRaises(exception_type) as cm:
yield cm
self.assertIn(error_text, str(cm.exception))
@contextmanager
def assertDoesNotRaise(self, exc_class: Type[BaseException] = Exception):
"""Verifies that the block does not raise an exception of the specified type.
:API: public
"""
try:
yield
except exc_class as e:
raise AssertionError(f"section should not have raised, but did: {e}") from e
def get_bootstrap_options(self, cli_options=()):
"""Retrieves bootstrap options.
:param cli_options: An iterable of CLI flags to pass as arguments to `OptionsBootstrapper`.
"""
args = tuple(["--pants-config-files=[]"]) + tuple(cli_options)
return OptionsBootstrapper.create(args=args).bootstrap_options.for_global_scope()
def make_snapshot(self, files: Dict[str, Union[str, bytes]]) -> Snapshot:
"""Makes a snapshot from a map of file name to file content."""
with temporary_dir() as temp_dir:
for file_name, content in files.items():
mode = "wb" if isinstance(content, bytes) else "w"
safe_file_dump(os.path.join(temp_dir, file_name), content, mode=mode)
return cast(
Snapshot,
self.scheduler.capture_snapshots((PathGlobsAndRoot(PathGlobs(("**",)), temp_dir),))[
0
],
)
def make_snapshot_of_empty_files(self, files: Iterable[str]) -> Snapshot:
"""Makes a snapshot with empty content for each file.
This is a convenience around `TestBase.make_snapshot`, which allows specifying the content
for each file.
"""
return self.make_snapshot({fp: "" for fp in files})
class LoggingRecorder:
"""Simple logging handler to record warnings."""
def __init__(self):
self._records = []
self.level = logging.DEBUG
def handle(self, record):
self._records.append(record)
def _messages_for_level(self, levelname):
return [
f"{record.name}: {record.getMessage()}"
for record in self._records
if record.levelname == levelname
]
def infos(self):
return self._messages_for_level("INFO")
def warnings(self):
return self._messages_for_level("WARNING")
def errors(self):
return self._messages_for_level("ERROR")
@contextmanager
def captured_logging(self, level=None):
root_logger = logging.getLogger()
old_level = root_logger.level
root_logger.setLevel(level or logging.NOTSET)
handler = self.LoggingRecorder()
root_logger.addHandler(handler)
try:
yield handler
finally:
root_logger.setLevel(old_level)
root_logger.removeHandler(handler)
@contextmanager
def warnings_catcher(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def assertWarning(self, w, category, warning_text):
single_warning = assert_single_element(w)
self.assertEqual(single_warning.category, category)
warning_message = single_warning.message
self.assertEqual(warning_text, str(warning_message))
def retrieve_single_product_at_target_base(self, product_mapping, target):
mapping_for_target = product_mapping.get(target)
single_base_dir = assert_single_element(list(mapping_for_target.keys()))
single_product = assert_single_element(mapping_for_target[single_base_dir])
return single_product
def populate_target_dict(self, target_map):
"""Return a dict containing targets with files generated according to `target_map`.
The keys of `target_map` are target address strings, while the values of `target_map` should be
a dict which contains keyword arguments fed into `self.make_target()`, along with a few special
keys. Special keys are:
- 'key': used to access the target in the returned dict. Defaults to the target address spec.
- 'filemap': creates files at the specified relative paths to the target.
An `OrderedDict` of 2-tuples must be used with the targets topologically ordered, if
they have dependencies on each other. Note that dependency cycles are not currently supported
with this method.
:param target_map: Dict mapping each target address to generate -> kwargs for
`self.make_target()`, along with a 'key' and optionally a 'filemap' argument.
:return: Dict mapping the required 'key' argument -> target instance for each element of
`target_map`.
:rtype: dict
"""
target_dict = {}
# Create a target from each specification and insert it into `target_dict`.
for address_spec, target_kwargs in target_map.items():
unprocessed_kwargs = target_kwargs.copy()
target_base = Address.parse(address_spec).spec_path
# Populate the target's owned files from the specification.
filemap = unprocessed_kwargs.pop("filemap", {})
for rel_path, content in filemap.items():
buildroot_path = os.path.join(target_base, rel_path)
self.create_file(buildroot_path, content)
# Ensure any dependencies exist in the target dict (`target_map` must then be an
# OrderedDict).
# The 'key' is used to access the target in `target_dict`, and defaults to `target_spec`.
target_address = Address.parse(address_spec)
key = unprocessed_kwargs.pop("key", target_address.target_name)
dep_targets = []
for dep_spec in unprocessed_kwargs.pop("dependencies", []):
existing_tgt_key = target_map[dep_spec]["key"]
dep_targets.append(target_dict[existing_tgt_key])
# Register the generated target.
generated_target = self.make_target(
spec=address_spec, dependencies=dep_targets, **unprocessed_kwargs
)
target_dict[key] = generated_target
return target_dict
| apache-2.0 | 4,055,148,794,678,700,000 | 37.336746 | 107 | 0.625586 | false | 4.31583 | true | false | false |
srusskih/SublimeBicycleRepair | bike/query/findDefinition.py | 1 | 9774 | from __future__ import generators
from bike.query.common import Match, MatchFinder, \
getScopeForLine, indexToCoordinates, \
translateSourceCoordsIntoASTNode, scanScopeForMatches, \
isAMethod, convertNodeToMatchObject, walkLinesContainingStrings
from bike.parsing.parserutils import generateLogicalLines,\
generateLogicalLinesAndLineNumbers, \
splitLogicalLines, makeLineParseable
import compiler
from compiler.ast import Getattr, Name, AssName, AssAttr
from bike.parsing.fastparserast import getRoot, Package, Class, \
Module, Function, Instance
import re
from bike.query.getTypeOf import getTypeOfExpr, UnfoundType, \
isWordInLine, resolveImportedModuleOrPackage
from bike.parsing import visitor
from bike.parsing.visitor import walkAndGenerate
from bike.parsing.parserutils import makeLineParseable,splitLogicalLines
from bike.parsing.newstuff import getSourceNodesContainingRegex
from bike.parsing.load import getSourceNode
from bike import log
class CantFindDefinitionException:
pass
def findAllPossibleDefinitionsByCoords(filepath,lineno,col):
#try:
node = translateSourceCoordsIntoASTNode(filepath,lineno,col)
#except:
# import traceback
# traceback.print_exc()
if node is None:
raise "selected node type not supported"
scope = getScopeForLine(getSourceNode(filepath),lineno)
match = findDefinitionFromASTNode(scope,node)
if match is not None:
yield match
if isinstance(node,Getattr) and (match is None or match.confidence != 100):
root = getRoot()
name = node.attrname
for match in scanPythonPathForMatchingMethodNames(name,filepath):
yield match
print >>log.progress,"done"
def findDefinitionFromASTNode(scope,node):
assert node is not None
if isinstance(node,Name) or isinstance(node,AssName):
while 1:
# try scope children
childscope = scope.getChild(node.name)
if childscope is not None:
return convertNodeToMatchObject(childscope,100)
if isinstance(scope,Package):
scope = scope.getChild("__init__")
# try arguments and assignments
match = scanScopeAST(scope,node.name,
AssignmentAndFnArgsSearcher(node.name))
if match is not None:
return match
# try imports
match = searchImportedModulesForDefinition(scope,node)
if match is not None:
return match
if not isinstance(scope,Module):
# try parent scope
scope = scope.getParent()
else:
break
assert isinstance(scope,Module)
elif isinstance(node,Getattr) or isinstance(node,AssAttr):
exprtype = getTypeOfExpr(scope,node.expr)
if not (exprtype is None or isinstance(exprtype,UnfoundType)):
if isinstance(exprtype,Instance):
exprtype = exprtype.getType()
match = findDefinitionOfAttributeFromASTNode(exprtype,
node.attrname)
else:
match = findDefinitionFromASTNode(exprtype,
Name(node.attrname))
if match is not None:
return match
elif isinstance(node,compiler.ast.Function) or \
isinstance(node,compiler.ast.Class):
if isAMethod(scope,node):
match = findDefinitionOfAttributeFromASTNode(scope,
node.name)
else:
match = findDefinitionFromASTNode(scope,Name(node.name))
if match is not None:
return match
type = getTypeOfExpr(scope,node)
if type is not None and (not isinstance(type,UnfoundType)) and \
(not isinstance(type,Instance)):
return convertNodeToMatchObject(type,100)
else:
return None
def findDefinitionOfAttributeFromASTNode(type,name):
assert isinstance(type,Class)
attrfinder = AttrbuteDefnFinder([type],name)
# first scan the method names:
for child in type.getChildNodes():
if child.name == name:
return convertNodeToMatchObject(child,100)
# then scan the method source for attribues
for child in type.getChildNodes():
if isinstance(child,Function):
try:
return scanScopeForMatches(child.module.getSourceNode(),
child, attrfinder,
name).next()
except StopIteration:
continue
class AttrbuteDefnFinder(MatchFinder):
def __init__(self,targetClasses,targetAttribute):
self.targetClasses = targetClasses
self.targetAttributeName = targetAttribute
def visitAssAttr(self, node):
for c in node.getChildNodes():
self.visit(c)
if node.attrname == self.targetAttributeName:
exprtype = getTypeOfExpr(self.scope,node.expr)
if isinstance(exprtype,Instance) and \
exprtype.getType() in self.targetClasses:
self.appendMatch(self.targetAttributeName)
#else:
# self.appendMatch(self.targetAttributeName,50)
self.popWordsUpTo(node.attrname)
def searchImportedModulesForDefinition(scope,node):
lines = scope.module.getSourceNode().getLines()
for lineno in scope.getImportLineNumbers():
logicalline = getLogicalLine(lines,lineno)
logicalline = makeLineParseable(logicalline)
ast = compiler.parse(logicalline)
class ImportVisitor:
def __init__(self,node):
self.target = node
self.match = None
assert isinstance(self.target,Name), \
"Getattr not supported"
def visitFrom(self, node):
module = resolveImportedModuleOrPackage(scope,node.modname)
if module is None: # couldn't find module
return
if node.names[0][0] == '*': # e.g. from foo import *
match = findDefinitionFromASTNode(module,self.target)
if match is not None:
self.match = match
return
for name, alias in node.names:
if alias is None and name == self.target.name:
match = findDefinitionFromASTNode(module,self.target)
if match is not None:
self.match = match
return
match = visitor.walk(ast, ImportVisitor(node)).match
if match:
return match
# loop
def getLogicalLine(lines,lineno):
return generateLogicalLines(lines[lineno-1:]).next()
class AssignmentAndFnArgsSearcher(MatchFinder):
def __init__(self,name):
self.targetname = name
self.match = None
def visitAssName(self, node):
if node.name == self.targetname:
idx = self.getNextIndexOfWord(self.targetname)
self.match = idx
return
def visitFunction(self, node):
self.popWordsUpTo(node.name)
for arg, default in self.zipArgs(node.argnames, node.defaults):
if arg == self.targetname:
idx = self.getNextIndexOfWord(self.targetname)
self.match = idx
return
self.popWordsUpTo(arg)
if default is not None:
self.visit(default)
self.visit(node.code)
def getMatch(self):
return self.match
# scans for lines containing keyword, and then runs the visitor over
# the parsed AST for that line
def scanScopeAST(scope,keyword,matchfinder):
lines = scope.generateLinesNotIncludingThoseBelongingToChildScopes()
match = None
for line,linenum in generateLogicalLinesAndLineNumbers(lines):
if isWordInLine(keyword, line):
doctoredline = makeLineParseable(line)
ast = compiler.parse(doctoredline)
matchfinder.reset(line)
match = visitor.walk(ast,matchfinder).getMatch()
if match is not None:
column,yoffset = indexToCoordinates(line,match)
m = createMatch(scope,linenum + yoffset,column)
return m
return None
def createMatch(scope,lineno,x):
m = Match()
m.sourcenode = scope.module.getSourceNode()
m.filename = m.sourcenode.filename
m.lineno = lineno
m.colno = x
m.confidence = 100
return m
# scan for methods globally (from perspective of 'perspectiveFilename')
def scanPythonPathForMatchingMethodNames(name, contextFilename):
class MethodFinder:
def __init__(self,srcnode):
self.matches = []
self.srcnode = srcnode
def visitFunction(self,node):
node = getScopeForLine(self.srcnode, self.lineno)
if isinstance(node.getParent(),Class):
if node.name == name:
self.matches.append(convertNodeToMatchObject(node,50))
for srcnode in getSourceNodesContainingRegex(name,contextFilename):
m = MethodFinder(srcnode)
walkLinesContainingStrings(srcnode.fastparseroot,m,[name])
for match in m.matches:
yield match
def getIndexOfWord(line,targetword):
words = re.split("(\w+)", line)
idx = 0
for word in words:
if word == targetword:
break
idx += len(word)
return idx
| mit | -1,420,466,848,213,629,000 | 34.285199 | 79 | 0.609576 | false | 4.454877 | false | false | false |
mdurrant-b3/acos-client | acos_client/tests/unit/v21/test_high_availability.py | 2 | 1714 | # Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import unittest2 as unittest
except ImportError:
import unittest
from acos_client import client
import responses
HOSTNAME = 'fake_a10'
BASE_URL = "https://{}:443/services/rest/v2.1/?format=json&method=".format(HOSTNAME)
AUTH_URL = "{}authenticate".format(BASE_URL)
HA_URL = '{}ha.sync_config&session_id={}'.format(BASE_URL, 'foobar')
class TestHighAvailability(unittest.TestCase):
def setUp(self):
self.client = client.Client(HOSTNAME, '21', 'fake_username', 'fake_password')
@responses.activate
def test_high_availability_sync(self):
responses.add(responses.POST, AUTH_URL, json={'session_id': 'foobar'})
responses.add(responses.POST, HA_URL)
resp = self.client.ha.sync('192.168.2.254', 'fake_username', 'fake_password')
self.assertIsNone(resp)
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[1].request.method, responses.POST)
self.assertEqual(responses.calls[1].request.url, HA_URL)
| apache-2.0 | 3,472,475,755,547,812,400 | 34.708333 | 85 | 0.705951 | false | 3.678112 | true | false | false |
JesusTorrado/cosmo_mini_toolbox | examples/plot_lik_example_summary.py | 1 | 1030 | import os
import sys
import matplotlib.pyplot as plt
sys.path.append("../src")
from Chain import Chain
from plot_lik import plot_lik_2D
base_folder = "./chains"
chain = "planck_WP"
chains = [Chain(os.path.join(base_folder, chain))]
params=["H0", "omega_b"]
labels=[r"$H_0$", r"$\omega_b$"]
fig, axarr = plt.subplots(2,2)
axes_locations = { "profile": axarr[0,1],
"mean": axarr[1,0], "marginal": axarr[1,1]}
for mode, axes in axes_locations.items():
ax, options = plot_lik_2D(mode, chains, params=params, labels=labels, format = "-loglik", dpi=200, fontsize_labels=14, fontsize_ticks=8,
save=0, axes=axes
)
axes.set_title(mode.title(), fontdict={'fontsize':16})
# Text:
text = ("Chain:\n %s\n\n"%[c.name() for c in chains] +
"Parameters:\n%s"%(params))
axarr[0,0].set_axis_off()
axarr[0,0].text(0, 1, text, weight="bold", verticalalignment="top")
# Plot
plt.tight_layout()
plt.savefig("summary.png", **options)
plt.show()
plt.close()
| gpl-3.0 | 2,794,667,350,369,179,000 | 27.611111 | 140 | 0.615534 | false | 2.82967 | false | false | false |
LudovicRousseau/pyscard | smartcard/sw/ErrorCheckingChain.py | 2 | 3520 | """The error checking chain is a list of status word
(sw1, sw2) error check strategies.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from sys import exc_info
class ErrorCheckingChain(object):
"""The error checking chain is a list of response apdu status word
(sw1, sw2) error check strategies. Each strategy in the chain is
called until an error is detected. A L{smartcard.sw.SWException}
exception is raised when an error is detected. No exception is
raised if no error is detected.
Implementation derived from Bruce Eckel, Thinking in Python. The
L{ErrorCheckingChain} implements the Chain Of Responsibility design
pattern.
"""
def __init__(self, chain, strategy):
"""constructor. Appends a strategy to the L{ErrorCheckingChain}
chain."""
self.strategy = strategy
self.chain = chain
self.chain.append(self)
self.excludes = []
def next(self):
"""Returns next error checking strategy."""
# Where this link is in the chain:
location = self.chain.index(self)
if not self.end():
return self.chain[location + 1]
def addFilterException(self, exClass):
"""Add an exception filter to the error checking chain.
@param exClass: the exception to exclude, e.g.
L{smartcard.sw.SWExceptions.WarningProcessingException} A filtered
exception will not be raised when the sw1,sw2 conditions that
would raise the excption are met.
"""
self.excludes.append(exClass)
if self.end():
return
self.next().addFilterException(exClass)
def end(self):
"""Returns True if this is the end of the error checking
strategy chain."""
return (self.chain.index(self) + 1 >= len(self.chain))
def __call__(self, data, sw1, sw2):
"""Called to test data, sw1 and sw2 for error on the chain."""
try:
self.strategy(data, sw1, sw2)
except tuple(self.excludes) as exc:
# The following addtional filter may look redundant, it isn't.
# It checks that type(exc) is *equal* to any of self.excludes,
# rather than equal-or-subclass to any of self.excludes.
# This maintains backward compatibility with the behaviour of
# pyscard <= 1.6.16.
# if exception is filtered, return
for exception in self.excludes:
if exception == exc_info()[0]:
return
# otherwise reraise exception
raise
# if not done, call next strategy
if self.end():
return
return self.next()(data, sw1, sw2)
| lgpl-2.1 | -1,702,786,570,087,140,400 | 36.052632 | 75 | 0.659943 | false | 4.16568 | false | false | false |
algorhythms/LeetCode | 139 Word Break.py | 3 | 3284 | """
Given a string s and a dictionary of words dict, determine if s can be segmented into a space-separated sequence of one or more dictionary words.
For example, given
s = "leetcode",
dict = ["leet", "code"].
Return true because "leetcode" can be segmented as "leet code".
"""
__author__ = 'Danyang'
class Solution:
def wordBreak_TLE(self, s, dict):
"""
TLE
dfs
O(n^2)
Algorithm: DFS. The reason is that DFS repeatedly calculate whether a certain part of string can be segmented.
Therefore we can use dynamic programming.
:param s: a string
:param dict: a set of string
:return: a boolean
"""
string_builder = ""
if s=="":
return True
# greedy
for i in range(len(s)):
string_builder += s[i]
if string_builder in dict:
try:
if self.wordBreak_TLE(s[i+1:], dict):
return True
else:
continue
except IndexError:
return True
return False
def wordBreak(self, s, dict):
"""
__ __________ ___ __ ______ ______ .__ __. _______.
| | | ____\ \ / / | | / | / __ \ | \ | | / |
| | | |__ \ V / | | | ,----'| | | | | \| | | (----`
| | | __| > < | | | | | | | | | . ` | \ \
| `----.| |____ / . \ | | | `----.| `--' | | |\ | .----) |
|_______||_______/__/ \__\ |__| \______| \______/ |__| \__| |_______/
Dynamic programming
The dynamic solution can tell us whether the string can be broken to words, but can not tell us what words the string is broken to.
O(n*m)
Google On Campus Presentation, demonstration questions. 4 Sep 2014, Nanyang Technological University, Singapore
dp[i] rolling dp (rather than using 2D dp[i, j]
dp[i] means s[:i] can be made up of sequence of lexicons
- l e e t c o d e
T F F F T F F F T
Lexicons = {the, theta, table, down, there, bled, own}
- t h e t a b l e d o w n t h e r e
T F F T F T F F T T F F T F F F F T
:param s: a string
:param dict: a set of string
:return: a boolean
"""
dp = [False] * (len(s)+1)
dp[0] = True # dummy
for i in range(len(dp)): # [0, len(s)+1)
# continue from matched condition
if dp[i]:
for word in dict:
try:
# trivial
if dp[i+len(word)]==True:
continue
# main
if s[i:i+len(word)]==word: # test whether [i, i+len) can construct a word. THE BEAUTY OF HALF OPEN
dp[i+len(word)] = True # record the checking
except IndexError:
continue
return dp[-1]
if __name__=="__main__":
assert Solution().wordBreak("aaaaaaa", ["aaaa", "aaa"])==True | mit | 8,804,632,450,728,369,000 | 33.333333 | 145 | 0.417174 | false | 3.673378 | false | false | false |
hrayr-artunyan/shuup | shuup/front/template_helpers/product.py | 2 | 3358 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from jinja2.utils import contextfunction
from shuup.core.models import (
AttributeVisibility, Product, ProductAttribute, ProductCrossSell,
ProductCrossSellType, Supplier
)
from shuup.utils.text import force_ascii
def get_visible_attributes(product):
return ProductAttribute.objects.filter(
product=product,
attribute__visibility_mode=AttributeVisibility.SHOW_ON_PRODUCT_PAGE
)
# Deprecated, see `get_product_cross_sells()`
@contextfunction
def get_products_bought_with(context, product, count=5):
related_product_cross_sells = (
ProductCrossSell.objects
.filter(product1=product, type=ProductCrossSellType.COMPUTED)
.order_by("-weight")[:(count * 4)])
products = []
for cross_sell in related_product_cross_sells:
product2 = cross_sell.product2
if product2.is_visible_to_user(context["request"].user) and product2.is_list_visible():
products.append(product2)
if len(products) >= count:
break
return products
@contextfunction
def is_visible(context, product):
request = context["request"]
shop_product = product.get_shop_instance(shop=request.shop)
for error in shop_product.get_visibility_errors(customer=request.customer): # pragma: no branch
return False
return True
@contextfunction
def get_product_cross_sells(
context, product, relation_type=ProductCrossSellType.RELATED,
count=4, orderable_only=True):
request = context["request"]
rtype = map_relation_type(relation_type)
related_product_ids = list((
ProductCrossSell.objects
.filter(product1=product, type=rtype)
.order_by("weight")[:(count * 4)]).values_list("product2_id", flat=True)
)
related_products = []
for product in Product.objects.filter(id__in=related_product_ids):
shop_product = product.get_shop_instance(request.shop)
if orderable_only:
for supplier in Supplier.objects.all():
if shop_product.is_orderable(supplier, request.customer, shop_product.minimum_purchase_quantity):
related_products.append(product)
break
elif shop_product.is_visible(request.customer):
related_products.append(product)
# Order related products by weight. Related product ids is in weight order.
# If same related product is linked twice to product then lowest weight stands.
related_products.sort(key=lambda prod: list(related_product_ids).index(prod.id))
return related_products[:count]
def map_relation_type(relation_type):
"""
Map relation type to enum value.
:type relation_type: ProductCrossSellType|str
:rtype: ProductCrossSellType
:raises: `LookupError` if unknown string is given
"""
if isinstance(relation_type, ProductCrossSellType):
return relation_type
attr_name = force_ascii(relation_type).upper()
try:
return getattr(ProductCrossSellType, attr_name)
except AttributeError:
raise LookupError('Unknown ProductCrossSellType %r' % (relation_type,))
| agpl-3.0 | -4,474,399,706,157,997,600 | 34.723404 | 113 | 0.692674 | false | 3.855339 | false | false | false |
Anmol-Singh-Jaggi/gDrive-auto-sync | gDrive-auto-sync/api_boilerplate.py | 1 | 1850 | """
This module is responsible for doing all the authentication.
Adapted from the Google API Documentation.
"""
from __future__ import print_function
import os
import httplib2
import apiclient
import oauth2client
try:
import argparse
flags = argparse.ArgumentParser(
parents=[oauth2client.tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
# Enter your project name here!!
APPLICATION_NAME = 'API Project'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.gdrive-credentials-cache')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gdrive-credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = oauth2client.client.flow_from_clientsecrets(
CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = oauth2client.tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = oauth2client.tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
file_service = apiclient.discovery.build('drive', 'v3', http=http).files()
| mit | 296,546,577,918,135,200 | 30.355932 | 74 | 0.692432 | false | 4.013015 | false | false | false |
stratus-ss/python_scripts | openshift_scripts/pipeline_related/log.py | 1 | 2198 | import sys
import os.path
import logging
import colorlog
import inspect
from logging.handlers import RotatingFileHandler
# make external modules only log above warning and upper
logging.getLogger("paramiko").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
# define root logging strategy
root = logging.getLogger()
root.setLevel(logging.DEBUG)
####################
# define new log level for SUCCESS
SUCCESS = logging.INFO + 1
logging.addLevelName( SUCCESS, 'SUCCESS')
####################
# log on stdout
stdout_formatter = colorlog.ColoredFormatter(
"%(asctime)s - %(log_color)s%(levelname)-7s%(reset)s %(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'white',
'INFO': 'white',
'SUCCESS': 'green',
'WARNING': 'yellow',
'ERROR': 'white,bg_red',
'CRITICAL': 'white,bg_red',
},
secondary_log_colors={},
style='%'
)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(stdout_formatter)
root.addHandler(ch)
####################
# also log in a dedicated log file (full date, no color)
file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = RotatingFileHandler('figaro_deploy.log', maxBytes=10000000, backupCount=5)
fh.setLevel(logging.DEBUG)
fh.setFormatter(file_formatter)
root.addHandler(fh)
def __get_log_msg(txt):
'''Get filename and line number where the log occurs'''
frame = inspect.currentframe().f_back.f_back
if frame and frame.f_back:
frame = frame.f_back
func = frame.f_code
return "[%s:%s] %s" % (os.path.basename(func.co_filename), frame.f_lineno, txt)
def debug(msg):
logging.debug(__get_log_msg(msg))
def info(msg):
logging.info(__get_log_msg(msg))
def success(msg):
logging.log(SUCCESS, __get_log_msg(msg))
def warning(msg):
logging.warning(__get_log_msg(msg))
def error(msg, exit_on_error = True):
logging.error(__get_log_msg(msg))
if exit_on_error:
exit(1)
def critical(msg):
logging.critical(__get_log_msg(msg))
exit(1)
| lgpl-3.0 | -2,924,091,142,885,948,400 | 26.822785 | 83 | 0.631483 | false | 3.461417 | false | false | false |
mF2C/COMPSs | compss/programming_model/bindings/python/src/pycompss/matlib/algebra/mean.py | 1 | 1823 | #!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Mathematical Library: Algebra: Mean
============================================
This file contains the arithmetic mean algorithm.
"""
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
def _list_lenght(l):
"""
Recursive function to get the size of any list
:return: List length
"""
if l:
if not isinstance(l[0], list):
return 1 + _list_lenght(l[1:])
else:
return _list_lenght(l[0]) + _list_lenght(l[1:])
return 0
@task(returns=float)
def _mean(data, n):
"""
Calculate the mean of a list,
:param data: List of elements
:param n: Number of elements
:return: Mean
"""
return sum(data) / float(n)
def mean(data, wait=False):
"""
Arithmetic mean.
:param data: chunked data
:param wait: if we want to wait for result. Default False
:return: mean of data.
"""
n = _list_lenght(data)
result = merge_reduce(reduce_add, [_mean(x, n) for x in data])
if wait:
from pycompss.api.api import compss_wait_on
result = compss_wait_on(result)
return result
| apache-2.0 | -762,812,293,143,996,200 | 24.319444 | 75 | 0.642348 | false | 3.588583 | false | false | false |
ssafar/effulgence2epub | src/gen_html.py | 2 | 1058 | #!/usr/bin/python
"""As usual, we eat chapters from stdin and output xhtml files, this time with
nice CSS and no tables. We don't copy the images to the relevant places since
that's not really amenable to parallelization (which is too much fun)."""
import os
import pkg_resources
import pyratemp
import common
chapter_template = pyratemp.Template(
string=pkg_resources.resource_string(__name__, "html_chapter_template.xhtml"))
if __name__ == "__main__":
chapters = common.get_chapters_from_stdin()
if not os.path.isdir("html_mirror"):
os.mkdir("html_mirror")
for introonly_chapter in chapters.chapter:
chapter = common.full_chapter_from_introonly(introonly_chapter)
chapter_html = chapter_template(chapter=chapter)
output_file_name = os.path.join("html_mirror",
common.chapter_to_internal_name(chapter))
with open(output_file_name, mode="w") as xhtml_file:
xhtml_file.write(chapter_html.encode('utf-8'))
| agpl-3.0 | -2,889,829,906,240,603,000 | 29.228571 | 82 | 0.651229 | false | 3.861314 | false | false | false |
j3rgus/assignments | math toys/hw2.py | 1 | 2240 | import numpy as np
from PIL import Image
from random import randint
from math import sqrt, factorial
################### A ####################
def perm(l):
if not l:
return [l]
else:
res = []
for i in range(len(l)):
r = l[:i] + l[i+1:]
for x in perm(r):
res.append(l[i:i+1] + x)
return res
################### B ####################
def drawPascalMod(n, d):
pallete = [(randint(0, 256),)*3 for i in range(d)]
img = Image.new('RGB', (n, n))
px = img.load()
A = np.full((n,n), 1, int);
for j in range(2,n):
for i in range(1,j):
A[i,j] = (A[i-1,j-1] + A[i,j-1]) % d
px[i,j] = pallete[A[i,j]]
img.save('pascal.png')
################### C ####################
def fact(n):
res = 1;
for i in range(2,n+1):
res = res * i
return res
def powerInt(x, y):
res = 1
for i in bin(y)[2:]:
res *= res
if i == '1':
res *= x
return res
def nroot(x, n, eps):
g = x/n
dx = g
while abs(dx) > eps or not dx:
dx = (1/n) * (x/powerInt(g, n-1) - g)
g = g + dx
return g
def exp(x, k):
res = 0
for i in range(k+1):
res += powerInt(x,i)/fact(i)
return res
def loge(x, k):
n = len(str(int(x)))
a = x / powerInt(10, n-1)
y = (a - 1) / (a + 1)
res = 0
for i in range(k+1):
res += powerInt(y, 2*i + 1) / (2 * i + 1)
return res * 2
def powerFractionApprox(x, y, prec):
n = int(y*prec)
d = prec
return nroot(powerInt(x, n), d, 0.00001)
def powerExpApprox(x, y, prec):
return exp(y * loge(x, 100), prec)
################### D ####################
def piApproxGL(k):
pi = 0
s = 1
for i in range(k+1):
pi += s / (2*i + 1)
s *= -1
return 4 * pi
def piApproxArch(k):
a = 2 * sqrt(3)
b = 3
for i in range(k+1):
a = 2*a*b / (a+b)
b = sqrt(a*b)
return a
def piApproxMonteCarlo(k):
count = 0
for i in range(k):
x = random()
y = random()
if x**2 + y**2 < 1:
count += 1
return 4*count/k
##########################################
if __name__=='__main__':
# print(piApproxMonteCarlo(10000))
# print(piApproxArch(10000))
# print(piApproxGL(10000))
# print(powerExpApprox(2, 2.2, 33))
# print(powerFractionApprox(2, 2.2, 100))
# print(nroot(676786786, 7878, 0.00000001))
# print(powerInt(12,2))
# print(perm([1,2,3,4]), len(perm([1,2,3,4])))
drawPascalMod(30, 5)
| gpl-3.0 | 7,143,100,578,823,331,000 | 18.823009 | 51 | 0.511607 | false | 2.22002 | false | false | false |
rbuffat/pyidf | tests/test_coolingtowervariablespeed.py | 1 | 8903 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.condenser_equipment_and_heat_exchangers import CoolingTowerVariableSpeed
log = logging.getLogger(__name__)
class TestCoolingTowerVariableSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coolingtowervariablespeed(self):
pyidf.validation_level = ValidationLevel.error
obj = CoolingTowerVariableSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_water_inlet_node_name = "node|Water Inlet Node Name"
obj.water_inlet_node_name = var_water_inlet_node_name
# node
var_water_outlet_node_name = "node|Water Outlet Node Name"
obj.water_outlet_node_name = var_water_outlet_node_name
# alpha
var_model_type = "CoolToolsCrossFlow"
obj.model_type = var_model_type
# object-list
var_model_coefficient_name = "object-list|Model Coefficient Name"
obj.model_coefficient_name = var_model_coefficient_name
# real
var_design_inlet_air_wetbulb_temperature = 20.0
obj.design_inlet_air_wetbulb_temperature = var_design_inlet_air_wetbulb_temperature
# real
var_design_approach_temperature = 0.0001
obj.design_approach_temperature = var_design_approach_temperature
# real
var_design_range_temperature = 0.0001
obj.design_range_temperature = var_design_range_temperature
# real
var_design_water_flow_rate = 0.0001
obj.design_water_flow_rate = var_design_water_flow_rate
# real
var_design_air_flow_rate = 0.0001
obj.design_air_flow_rate = var_design_air_flow_rate
# real
var_design_fan_power = 0.0001
obj.design_fan_power = var_design_fan_power
# object-list
var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name = "object-list|Fan Power Ratio Function of Air Flow Rate Ratio Curve Name"
obj.fan_power_ratio_function_of_air_flow_rate_ratio_curve_name = var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name
# real
var_minimum_air_flow_rate_ratio = 0.35
obj.minimum_air_flow_rate_ratio = var_minimum_air_flow_rate_ratio
# real
var_fraction_of_tower_capacity_in_free_convection_regime = 0.1
obj.fraction_of_tower_capacity_in_free_convection_regime = var_fraction_of_tower_capacity_in_free_convection_regime
# real
var_basin_heater_capacity = 0.0
obj.basin_heater_capacity = var_basin_heater_capacity
# real
var_basin_heater_setpoint_temperature = 2.0
obj.basin_heater_setpoint_temperature = var_basin_heater_setpoint_temperature
# object-list
var_basin_heater_operating_schedule_name = "object-list|Basin Heater Operating Schedule Name"
obj.basin_heater_operating_schedule_name = var_basin_heater_operating_schedule_name
# alpha
var_evaporation_loss_mode = "LossFactor"
obj.evaporation_loss_mode = var_evaporation_loss_mode
# real
var_evaporation_loss_factor = 19.19
obj.evaporation_loss_factor = var_evaporation_loss_factor
# real
var_drift_loss_percent = 20.2
obj.drift_loss_percent = var_drift_loss_percent
# alpha
var_blowdown_calculation_mode = "ConcentrationRatio"
obj.blowdown_calculation_mode = var_blowdown_calculation_mode
# real
var_blowdown_concentration_ratio = 2.0
obj.blowdown_concentration_ratio = var_blowdown_concentration_ratio
# object-list
var_blowdown_makeup_water_usage_schedule_name = "object-list|Blowdown Makeup Water Usage Schedule Name"
obj.blowdown_makeup_water_usage_schedule_name = var_blowdown_makeup_water_usage_schedule_name
# object-list
var_supply_water_storage_tank_name = "object-list|Supply Water Storage Tank Name"
obj.supply_water_storage_tank_name = var_supply_water_storage_tank_name
# node
var_outdoor_air_inlet_node_name = "node|Outdoor Air Inlet Node Name"
obj.outdoor_air_inlet_node_name = var_outdoor_air_inlet_node_name
# integer
var_number_of_cells = 1
obj.number_of_cells = var_number_of_cells
# alpha
var_cell_control = "MinimalCell"
obj.cell_control = var_cell_control
# real
var_cell_minimum_water_flow_rate_fraction = 0.50005
obj.cell_minimum_water_flow_rate_fraction = var_cell_minimum_water_flow_rate_fraction
# real
var_cell_maximum_water_flow_rate_fraction = 1.0
obj.cell_maximum_water_flow_rate_fraction = var_cell_maximum_water_flow_rate_fraction
# real
var_sizing_factor = 0.0001
obj.sizing_factor = var_sizing_factor
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coolingtowervariablespeeds[0].name, var_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].water_inlet_node_name, var_water_inlet_node_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].water_outlet_node_name, var_water_outlet_node_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].model_type, var_model_type)
self.assertEqual(idf2.coolingtowervariablespeeds[0].model_coefficient_name, var_model_coefficient_name)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_inlet_air_wetbulb_temperature, var_design_inlet_air_wetbulb_temperature)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_approach_temperature, var_design_approach_temperature)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_range_temperature, var_design_range_temperature)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_water_flow_rate, var_design_water_flow_rate)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_air_flow_rate, var_design_air_flow_rate)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].design_fan_power, var_design_fan_power)
self.assertEqual(idf2.coolingtowervariablespeeds[0].fan_power_ratio_function_of_air_flow_rate_ratio_curve_name, var_fan_power_ratio_function_of_air_flow_rate_ratio_curve_name)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].minimum_air_flow_rate_ratio, var_minimum_air_flow_rate_ratio)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].fraction_of_tower_capacity_in_free_convection_regime, var_fraction_of_tower_capacity_in_free_convection_regime)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].basin_heater_capacity, var_basin_heater_capacity)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].basin_heater_setpoint_temperature, var_basin_heater_setpoint_temperature)
self.assertEqual(idf2.coolingtowervariablespeeds[0].basin_heater_operating_schedule_name, var_basin_heater_operating_schedule_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].evaporation_loss_mode, var_evaporation_loss_mode)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].evaporation_loss_factor, var_evaporation_loss_factor)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].drift_loss_percent, var_drift_loss_percent)
self.assertEqual(idf2.coolingtowervariablespeeds[0].blowdown_calculation_mode, var_blowdown_calculation_mode)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].blowdown_concentration_ratio, var_blowdown_concentration_ratio)
self.assertEqual(idf2.coolingtowervariablespeeds[0].blowdown_makeup_water_usage_schedule_name, var_blowdown_makeup_water_usage_schedule_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].supply_water_storage_tank_name, var_supply_water_storage_tank_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].outdoor_air_inlet_node_name, var_outdoor_air_inlet_node_name)
self.assertEqual(idf2.coolingtowervariablespeeds[0].number_of_cells, var_number_of_cells)
self.assertEqual(idf2.coolingtowervariablespeeds[0].cell_control, var_cell_control)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].cell_minimum_water_flow_rate_fraction, var_cell_minimum_water_flow_rate_fraction)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].cell_maximum_water_flow_rate_fraction, var_cell_maximum_water_flow_rate_fraction)
self.assertAlmostEqual(idf2.coolingtowervariablespeeds[0].sizing_factor, var_sizing_factor) | apache-2.0 | 2,277,212,770,271,596,500 | 56.818182 | 183 | 0.716163 | false | 3.329469 | false | false | false |
kingrichard2005/qsarweb-public | qsar/helper.py | 1 | 4992 | import csv
import time
import math
import sys
import os
def placeDataIntoArray(fileName):
'''Read uploaded data into memory'''
try:
from numpy import * # provides complex math and array functions
with open(fileName, mode='rbU') as csvfile:
datareader = csv.reader(csvfile, delimiter=',', quotechar=' ')
dataArray = array([row for row in datareader], dtype=float64, order='C')
if (min(dataArray.shape) == 1): # flatten arrays of one row or column
return dataArray.flatten(order='C')
else:
return dataArray
except:
print 'error in placeDataIntoArray(...)'
def splitRawInput(sourceFileName, sessionid = "fakesession", cvSplit = .15, testSplit = .15):
'''
Creates separate training, cross-validation and test set
files from raw input set uploaded by the user. This
uses the 70/15/15 rule, i.e. 70% of raw data is used for
training, while 15% are used for both cross-validation and
test sets. Note, this function assumes a well-formatted input file,
TODO: document valid input file format
Returns a list of the locations of these three files on disk
relative to the qsar root application folder. The list of elements
returned is in the following order [trainX, trainY, testX, testY, cvX, cvY]
'''
try:
from numpy import * # provides complex math and array functions
# read raw data set
dataArray = placeDataIntoArray(sourceFileName);
static_location = os.path.join( os.getcwd(), 'qsar', 'static', 'qsar', 'uploads');
rawData = array( [e for e in dataArray], dtype=float64, order='C' );
# The last column of the data set is assumed to be the target (y) pIC50 values,
# we separate this data from the rest of the observation sets using Numpy's array
# slicing syntax
cvData = rawData[ 0 : int(len(rawData) * cvSplit), : ]
cv_pIC50 = cvData[ :, -1 ]
cvData = cvData[ :, :-1 ]
# update raw data set, i.e. filter cv elements that were extracted
rawData = rawData[ int(len(rawData) * cvSplit) : , : ]
testData = rawData[ 0 : int(len(rawData) * testSplit), : ]
test_pIC50 = testData[ :, -1 ]
testData = testData[ :, :-1 ]
# use remaining elements for training data set
trainData = rawData[ int(len(rawData) * testSplit) : , : ]
train_pIC50 = trainData[ :, -1 ]
trainData = trainData[ :, :-1 ]
# write all files, any existing file of the same name is overwritten
trainX = '\\'.join([static_location, '{0}_{1}_train.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(trainX, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, trainData.shape[0] ):
row = trainData[i]
fileW.writerow(row);
trainY = '\\'.join([static_location, '{0}_{1}_train_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(trainY, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, train_pIC50.shape[0] ):
row = train_pIC50[i]
fileW.writerow([row]);
testX = '\\'.join([static_location, '{0}_{1}_test.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(testX, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, testData.shape[0] ):
row = testData[i]
fileW.writerow(row);
testY = '\\'.join([static_location, '{0}_{1}_test_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(testY, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, test_pIC50.shape[0] ):
row = test_pIC50[i]
fileW.writerow([row]);
cvX = '\\'.join([static_location, '{0}_{1}_cv.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(cvX, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, cvData.shape[0] ):
row = cvData[i]
fileW.writerow(row);
cvY = '\\'.join([static_location, '{0}_{1}_cv_pIC50.csv'.format(sessionid,time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) ]);
with file(cvY, 'wb+') as fileOut:
fileW = csv.writer(fileOut)
for i in range( 0, cv_pIC50.shape[0] ):
row = cv_pIC50[i]
fileW.writerow([row]);
return [trainX, trainY, testX, testY, cvX, cvY]
except:
print 'error in splitRawInput( ... )'
return [] | mit | -4,856,937,899,934,640,000 | 46.103774 | 145 | 0.557893 | false | 3.617391 | true | false | false |
wbthomason/minigrade | minigrade.py | 1 | 17857 | from flask import Flask, render_template, request, jsonify, Response, abort, session, stream_with_context, redirect, g
from ast import literal_eval
import subprocess
import re
import requests
import json
import shutil
import time
import os
import sqlite3
import logging
import sys
import commands
import threading
minigrade = Flask(__name__)
PORT_NUMBER = 8000
# Put your own secret key here. You can't have mine!
minigrade.secret_key = <KEY>
urlmatch = re.compile('(?:git@|git://|https://)(?P<url>[\w@-]+\.[a-zA-Z]+[:/](?P<user>[a-zA-Z][a-zA-Z0-9-]+)/(?P<repo>.+))')
SERVER_IP = 'localhost'#'128.143.136.170'
logging.basicConfig(filename='grader.log',level=logging.DEBUG)
benchmark_mutex = threading.Lock()
def process_repo(repo):
logging.debug('Processing repo: ' + repo)
result = urlmatch.match(repo)
if not result:
return None
giturl = "https://" + result.group('url')
repository = result.group('repo')
if repository[-4:] == ".git":
repository = repository[:-4]
logging.debug('Returning: ' + str(repository))
return (giturl, repository, result.group('user'))
def sort_files_by_age(files):
filedata = [(filename, os.lstat(filename).st_ctime) for filename in files]
filedata = sorted(filedata, key = lambda x: x[1])
filedata = [filetuple[0] for filetuple in filedata]
filedata = filter(lambda x: not os.path.isdir(x), filedata)
return filedata
def cap_logs():
result_files = os.listdir('.')
if len(result_files) > 10:
filedata = sort_files_by_age(result_files)[:len(result_files) - 10]
for f in filedata:
os.remove(f)
def parse_httperf_output(output_str):
dur = -1
avg_resp = -1
io = -1
err = -1
for line in output_str.split('\n'):
# need test-duration(s), reply time(ms), Net I/O, errors
output_line = line.rstrip()
testduration = re.search(r'test-duration (\d+\.\d+) s', output_line)
replytime = re.search(r'Reply time \[ms\]: response (\d+\.\d+) .*', output_line)
netio = re.search(r'Net I/O: (\d+\.\d+) KB/s', output_line)
errorcount = re.search(r'Errors: total (\d+)', output_line)
if testduration:
#print "Test duration: %f s\n" % float(testduration.group(1))
dur = float(testduration.group(1))
elif replytime:
#print "Reply time: %f ms\n" % float(replytime.group(1))
avg_resp = float(replytime.group(1))
elif netio:
#print "Net I/O: %f MB\n" % float(netio.group(1)) * dur / 1024
io = float(netio.group(1)) * dur / 1024
elif errorcount:
#print "Error count: %d\n" % int(errorcount.group(1))
err = int(errorcount.group(1))
'''
print "Test duration: %f s" % dur
print "Reply time: %f ms" % avg_response
print "Net I/O: %f MB" % io
print "Error count: %d" % err
print "END HTTPERF\n"
'''
return dur, avg_resp, io, err
def grade_stream(assignment, repo):
yield "retry: 300000\n"
if 'email' not in session:
yield "data: inv: Please log in before running the autograder.\n\n"
raise StopIteration
#session['email'] = "[email protected]"
build = None
tests = []
repo_name = "NotADirectory"
cwd = os.getcwd()
try:
with open("tests/{}.test".format(assignment)) as testfile:
for idnum, testcase in enumerate(testfile):
test = literal_eval(' '.join(testcase.split(' ')[1:]))
if testcase.split(' ')[0] == "build":
build = test
else:
tests.append(test)
yield "data: tn: {} {}\n\n".format(test['name'], idnum)
except:
print "No test file for '{}'".format(assignment)
yield "data: inv: Error: No valid test file for {}\n\n".format(assignment)
raise StopIteration
try:
yield "data inv: Grading {} from {}...\n\n".format(assignment, repo)
logging.debug("Grading " + assignment + " from: " + repo);
os.chdir("results/{}".format(assignment))
if not os.path.isdir(session['email']):
os.mkdir(session['email'])
os.chdir(session['email'])
cap_logs()
result_files = sort_files_by_age(os.listdir('.'))
result_files.reverse()
# review the past results
for f in result_files:
yield "data: nextpast\n\n"
with open(f) as result:
for line in result:
yield "data: past: {}\n\n".format(line)
# start cloning the repository
# just skip it in ps3
if assignment == "PS3":
# ps3 remote benchmark
httperf_req_list_file_path = os.path.join(cwd, "tests/zhtta-test-NUL.txt")
cmd = "httperf --server %s --port 4414 --rate 10 --num-conns 60 --wlog=y,%s" % (repo, httperf_req_list_file_path) # actually IP address
#cmd = "ping -c 2 %s" % repo
yield "data: raw: Queuing for benchmark, please wait...\n\n"
benchmark_mutex.acquire()
logging.debug("Benchmark starts, please wait...");
yield "data: raw: Benchmark starts, please wait...\n\n"
import commands
yield "data: raw: {}\n\n".format(cmd)
ret_text = commands.getoutput(cmd)
benchmark_mutex.release()
for line in ret_text.split('\n'):
yield "data: raw: {}\n\n".format(line)
(dur, avg_resp, io, err) = parse_httperf_output(ret_text)
with open(str(time.time())+".result", 'w') as results:
results.write("Duration: %d s\n\n" % (dur))
results.write("Average Response Time: %d ms\n\n" % avg_resp)
results.write("IO: %dMB\n\n" % (io))
results.write("Errors: {}\n".format(err))
if dur != 1 and io > 280 and err == 0:
yield "data: tr: Pass %d %ds\n\n" % (0, dur)
yield "data: tr: Pass %d %dms\n\n" % (1, avg_resp)
yield "data: tr: Pass %d %dMB\n\n" % (2, io)
yield "data: tr: Pass %d %d errors\n\n" % (3, err)
update_top_runs(session['email'], str(dur), str(avg_resp))
else:
yield "data: tr: Fail %d %ds\n\n" % (0, dur)
yield "data: tr: Fail %d %dms\n\n" % (1, avg_resp)
yield "data: tr: Fail %d %dMB\n\n" % (2, io)
yield "data: tr: Fail %d %d errors\n\n" % (3, err)
#os.chdir(cwd)
#yield "data: done\n\n"
else:
with open(str(time.time())+".result", 'w') as results:
result = process_repo(repo)
if not result:
results.write("{} is not a valid git repository.\n".format(repo))
yield "data: inv: {} is not a valid git repository.\n\n".format(repo)
raise StopIteration
logging.debug("Processed repo...");
repo_url, repo_name, repo_user = result
if os.path.isdir(repo_name):
shutil.rmtree(repo_name)
try:
logging.debug("Cloning...")
yield "data inv: Cloning github repository...\n\n"
git = subprocess.check_output("git clone {}".format(repo_url).split(" "), stderr = subprocess.STDOUT)
logging.debug("Finished cloning...")
yield "data: raw: {}\n\n".format(git)
except Exception as e:
logging.debug("{} is not a valid repository, because we got {}\n".format(repo,e))
results.write("{} is not a valid repository, because we got {}\n".format(repo,e))
yield "data: inv: Error: {} is not a valid repository, because we got {}\n\n".format(repo,e)
raise StopIteration
logging.debug("Using repo {}.\n".format(repo))
results.write("Using repository {}.\n".format(repo))
os.chdir(repo_name)
# copying files to testing dir...
#yield "setting up files..."
#shutil.copy("/home/grader/minigrade/tests/testfiles/abc.txt", "abc.txt")
if build:
logging.debug("Building...")
success = re.compile(build['results'])
commands = build['cmd'].split(";")
for command in commands:
yield "data: raw: {}\n\n".format(command)
result = None
try:
result = subprocess.check_output(command, shell = True, stderr = subprocess.STDOUT)
except:
print "Error building"
if result:
for line in result.split('\n'):
yield "data: raw: {}\n\n".format(line)
else:
yield "data: raw: Error running {}\n\n".format(command)
if result and re.search(success, result):
results.write("Build success\n")
yield "data: tr: Pass 0\n\n"
else:
results.write("Build failed\n")
yield "data: tr: Fail 0\n\n"
yield "data: inv: Build failed!\n\n"
raise StopIteration
passed = 0
failed = 0
counter = 0
for idnum, test in enumerate(tests):
counter += 1
yield "data: raw: {}\n\n".format(test["cmd"])
success = re.compile(test['results'])
f = open("test_file{}".format(counter), 'w')
temp=""
for token in test['cmd'].split(';'):
temp = temp + './gash -c "{}"\n'.format(token)
print "{}: temp={}".format(counter, temp.rstrip())
f.write(temp.rstrip())
f.close()
cwd = os.getcwd()
print "cwd={}".format(cwd)
for dep in test['dep']:
print "dep={}".format(dep)
print "typeof(dep)={}".format(type(dep))
shutil.copy("/home/grader/minigrade/tests/testfiles/{}".format(dep), dep)
command = "/home/grader/minigrade/dockerscript.sh {} {} test_file{} output_file{}".format(cwd, cwd, counter, counter)
print "{}: command={}".format(counter, command)
returncode = subprocess.call(command, shell = True, stderr = subprocess.STDOUT)
os.chdir(cwd)
result =""
try:
r = open('{}/output_file{}'.format(cwd,counter), 'r')
result = ''.join(r.readlines()).rstrip()
r.close()
except:
print "{}: couldn't open output_file{}".format(counter, counter)
result="null"
print "{}: test {}".format(session['email'], counter)
print "returncode={}".format(returncode)
# only print the first 10 lines to prevent spamming
m = 0
for line in result.split('\n'):
if m < 10:
print "result from output_file{}={}".format(counter, line)
yield "data: raw: {}\n\n".format(line)
else:
break
m += 1
print "{}: done printing result".format(counter)
if m >= 10:
yield "data: raw: ...\n\n"
if (returncode == 0) and re.match(success, result):
results.write("Passed {}\n".format(test['name']))
passed += 1
yield "data: tr: Pass {}\n\n".format(idnum + 1)
else:
results.write("Failed {}\n".format(test['name']))
failed += 1
yield "data: tr: Fail {}\n\n".format(idnum + 1)
results.write("Total pass: {}\n".format(passed))
results.write("Total fail: {}\n".format(failed))
finally:
if os.path.isdir(repo_name):
shutil.rmtree(repo_name)
os.chdir(cwd)
yield "data: done\n\n"
@minigrade.route('/')
def index():
with open("grade.html") as sub_page:
return '\n'.join(sub_page.readlines())
@minigrade.route('/grade/')
def grade():
assignment = request.args.get("assign", "NoneSuch")
repo = request.args.get("repo", "NoneSuch")
logging.debug("Grading " + assignment + ": " + repo)
response = Response(stream_with_context(grade_stream(assignment, repo)), mimetype="text/event-stream")
logging.debug("Finished grading " + repo + ": " + str(response))
return response
@minigrade.route('/auth/login', methods=['POST', 'GET'])
def login():
if request.method == "GET":
return session['email'] if 'email' in session else "null"
# The request has to have an assertion for us to verify
if 'assertion' not in request.form:
abort(400)
# Send the assertion to Mozilla's verifier service.
data = {'assertion': request.form['assertion'], 'audience': 'http://' + SERVER_IP + ':'+ str(PORT_NUMBER)}
resp = requests.post('https://verifier.login.persona.org/verify', data=data, verify=True)
# Did the verifier respond?
if resp.ok:
# Parse the response
verification_data = json.loads(resp.content)
# Check if the assertion was valid
if verification_data['status'] == 'okay':
# Log the user in by setting a secure session cookie
session.update({'email': verification_data['email']})
logging.debug('Login as: ' + verification_data['email'])
return "Logged in as %s" % verification_data['email']
logging.debug('Login failure: ' + str(resp))
# Oops, something failed. Abort.
abort(500)
@minigrade.route('/auth/logout', methods=['POST'])
def logout():
session.pop('email', None)
return redirect('/')
# Server-side database methods
##########
database_path = <PATH>
@minigrade.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
if error:
print("There was an error closing the database: {}".format(error))
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(database_path)
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
"""Creates the database tables."""
with minigrade.app_context():
db = get_db()
with minigrade.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
"""Returns a query to the database as a list"""
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
get_db().commit()
return (rv[0] if rv else None) if one else rv
############
# Leaderboard functions
#################
leaderboard_path = <PATH>
import random
@minigrade.route('/leaderboard.html')
def leaderboard():
with open("leaderboard.html") as sub_page:
return '\n'.join(sub_page.readlines())
@minigrade.route('/leaders.data')
def leaders():
with open("leaders.data") as sub_page:
return '\n'.join(sub_page.readlines())
def update_top_runs(user, duration, response):
''' Run this to update the top runs with an entry of user-duration-response time entry'''
q = query_db("SELECT * FROM topruns WHERE username=?", [user], one=True)
if q is None:
query_db("INSERT INTO topruns VALUES (?, ?, ?)", [user, str(duration), str(response)])
else:
query_db("UPDATE topruns SET duration=?, response=? WHERE username=?", [str(duration), str(response), user])
# THIS LINE determines how many users are shown on the leaderboard.
update_leaderboard(5)
def get_top_runs(num):
''' Returns the top num runs in a list of 3xnum elements:
the first is best duration/response time,
the second is best duration, third is response time'''
runs = query_db("SELECT * FROM topruns")
data = [[],[],[]]
runs.sort(key=heuristic)
data[0] = runs[:num]
runs.sort(key=lambda x: float(x[1]))
data[1] = runs[:num]
runs.sort(key=lambda x: float(x[2]))
data[2] = runs[:num]
return data
def heuristic(run):
'''returns a function of a weighing bewteen duration and response time'''
tot_duration = float(run[1])
avg_response = float(run[2])
return tot_duration * avg_response
def update_leaderboard(num):
'''Updates the leaderboard with 'num' entries for webpages to see'''
head = "<h2>Leaderboard</h2>"
tbl_template=lambda x: '''
<h3>%s</h3>
<table id="leaderboard-dr" style='width:100%%%%;border-spacing:10px'>
<tr><th style="text-align:left">ID</th>
<th style="text-align:left">Duration Time</th>
<th style="text-align:left">Response Time</th>
</tr>
%%s
</table>
'''%x
titles = ["Best duration/response time", "Best duration", "Best Response Time"]
data = get_top_runs(num)
fin = ""
for i, title in enumerate(titles):
tmp = tbl_template(title)
row = ""
for tup in data[i]:
# should be (username, duration, response time)
row += "<tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(*tup)
fin += tmp % row
open(leaderboard_path, 'w').write(fin)
#Only run in chroot jail.
if __name__ == '__main__':
print "running..."
minigrade.run(host='0.0.0.0', debug=False, threaded=True, port=PORT_NUMBER)
#minigrade.run(debug=True, threaded=True, port=9080)
| mit | -4,476,878,641,102,194,700 | 38.332599 | 147 | 0.552668 | false | 3.685655 | true | false | false |
uaprom-summer-2015/Meowth | project/bl/__init__.py | 2 | 1048 | from .auth import UserBL, TokenBL
from .feed import CategoryBL, CityBL, VacancyBL
from .pages import PageBL, PageBlockBL, PageChunkBL
from .mail import MailTemplateBL
from .utils import registry
from .uploads import UploadedImageBL
__all__ = ["UserBL", "CategoryBL", "CityBL", "VacancyBL", "PageBL",
"PageBlockBL", "PageChunkBL", 'TokenBL', "UploadedImageBL", ]
def init_resource_registry():
registry['bl.category'] = lambda category: CategoryBL(category)
registry['bl.vacancy'] = lambda vacancy: VacancyBL(vacancy)
registry['bl.city'] = lambda city: CityBL(city)
registry['bl.user'] = lambda user: UserBL(user)
registry['bl.pagechunk'] = lambda pagechunk: PageChunkBL(pagechunk)
registry['bl.pageblock'] = lambda pageblock: PageBlockBL(pageblock)
registry['bl.page'] = lambda page: PageBL(page)
registry['bl.token'] = lambda token: TokenBL(token)
registry['bl.mailtemplate'] = lambda template: MailTemplateBL(template)
registry['bl.uploadedimage'] = lambda template: UploadedImageBL(template)
| bsd-3-clause | -6,181,116,566,507,573,000 | 46.636364 | 77 | 0.726145 | false | 3.481728 | false | false | false |
rodgzilla/buddhabrot | buddha_parallel_bulb.py | 1 | 7625 | from PIL import Image
import multiprocessing
import math
sequence_function = lambda z_n, c : z_n ** 2 + c
def is_in_cardoid_or_bulb(z):
"""Algorithm for the test:
https://en.wikipedia.org/wiki/Mandelbrot_set#Optimizations
"""
p = math.sqrt((z.real - 1. / 4) ** 2 + z.imag ** 2)
return z.real < p - 2 * (p ** 2) + 1. / 4 and \
((z.real + 1) ** 2) + (z.imag ** 2) < 1. / 16
# def iterate_over_region(width, height, min_x, max_x, min_y, max_y):
def iterate_over_region(args):
"""Compute the sequences on a given region. args is a 6-tuple
composed as follows (width, height, min_x, max_x, min_y, max_y).
It returns a 2 dimensionnal array of size width * height
containing the number of occurences of a given pixel in the
complex sequences.
"""
width, height, min_iter, max_iter, min_x, max_x, min_y, max_y = args
complex_plane = [[0] * height for _ in range(width)]
# For each pixel of the screen:
for x in xrange(min_x, max_x):
for y in xrange(min_y, max_y):
# Compute the corresponding complex number.
c = complex(((x * 3.) / width) - 2, ((y * 2.0) / height) - 1)
# We check if p is in the cardoid or the bulb (which means
# that it automatically belongs to the mandelbrot set.
if is_in_cardoid_or_bulb(c):
continue
z = c
# Creation of the set of complex number that we will use
# to remember de complex number sequence.
complex_sequence = set([])
# Compute at most max_iter terms of the complex number
# sequence
for i in xrange(max_iter):
complex_sequence.add(z)
z = sequence_function(z, c)
# If |z| > 2, we are sure that the sequence diverges.
if (z.real * z.real + z.imag * z.imag) > 4:
if len(complex_sequence) <= min_iter:
break
complex_sequence.add(z)
# For each diverging sequence, we increment the
# counter corresponding to the pixel of the screen
# through which it passed.
for term in complex_sequence:
pixel_x = math.floor(((term.real + 2) * width) / 3.)
pixel_y = math.floor(((term.imag + 1) * height) / 2.)
if 0 <= pixel_x < width and 0 <= pixel_y < height:
complex_plane[int(pixel_x)][int(pixel_y)] += 1
break
print "Computation for x in [", min_x, ",", max_x, "] DONE"
return complex_plane
def slice_screen(width, height, min_iter, max_iter, cpu_number,
slice_per_cpu):
"""We cut the screen in cpu_number slices of width (width /
cpu_number). If the number of cpu does not divide the width, the
last slices will contain the remaining pixels
"""
screen_sections = []
slice_size = width / (cpu_number * slice_per_cpu)
for i in range((cpu_number * slice_per_cpu) - 1):
screen_sections.append((width, height, min_iter, max_iter, i *
slice_size, (i + 1) * slice_size, 0, height))
screen_sections.append((width, height, min_iter, max_iter, i *
slice_size, width, 0, height))
return screen_sections
def fusion_results(width, height, results):
"""After the computation, we have to add the results of every
different slice to get the final array.
"""
final_result = [[0] * height for _ in range(width)]
for x in xrange(width):
for y in xrange(height):
final_result[x][y] = sum((slice[x][y] for slice in results))
return final_result
def iterate_over_screen(width, height, min_iter, max_iter,
slice_per_cpu):
"""This function uses the other functions to : create the process
pool, compute the size of the different slices of the screen, use
Pool.map to compute the orbits of the different complexe sequences
and then fusion all the results together.
"""
cpu_number = multiprocessing.cpu_count()
print "Launching computation on", cpu_number, "cores"
sliced_screen = slice_screen(width, height, min_iter, max_iter,
cpu_number, slice_per_cpu)
print "The screen is decomposed in", len(sliced_screen), "sections"
process_pool = multiprocessing.Pool(cpu_number)
res = process_pool.map(iterate_over_region, sliced_screen)
process_pool.close()
process_pool.join()
final_result = fusion_results(width, height, res)
return final_result
def render_picture(width, height, result):
"""This function renders the final picture and save it to
'test.bmp'. To render the picture, the function computes the
minimum and maximum values of the cells, the scale the range of
values to the interval [0, 255]. The final picture is rendered
using this value as a red component.
"""
minimum = result[0][0]
maximum = result[0][0]
print "Starting rendering"
print "The image size is", width, "x", height
for x in range(width):
for y in range(height):
if result[x][y] < minimum:
minimum = result[x][y]
if result[x][y] > maximum:
maximum = result[x][y]
img = Image.new('RGB', (width, height))
img.putdata([(((result[x][y] - minimum) * 255) / (maximum-minimum), 0, 0) \
for y in range(height) for x in range(width)])
img.save('test_bulb.bmp')
print "Rendering done"
def render_picture_bis(width, height, result):
"""This function renders the final picture and save it to
'test.bmp'. To render the picture, the function computes the
minimum and maximum values of the cells, the scale the range of
values to the interval [0, 255]. The final picture is rendered
using this value as a red component.
"""
minimum = result[0][0]
maximum = result[0][0]
print "Starting rendering"
print "The image size is", width, "x", height
for x in range(width):
for y in range(height):
if result[x][y] < minimum:
minimum = result[x][y]
if result[x][y] > maximum:
maximum = result[x][y]
middle = (minimum + maximum) / 2.
datas = []
for y in range(height):
for x in range(width):
if result[x][y] < middle:
red_component = ((result[x][y] - minimum) * 255) / (middle-minimum)
datas.append((int(red_component), 0, 0))
else:
green_component = ((result[x][y] - middle) * 127) / (maximum-middle)
datas.append((0, int(green_component), 0))
img = Image.new('RGB', (width, height))
img.putdata(datas)
img.save('test_bulb.bmp')
print "Rendering done"
if __name__ == '__main__':
# Height should be (2/3) * width.
width = 300
height = 200
# The minimal number of iterations is used to remove the noise in
# the picture.
min_iter = 300
max_iter = 3000
# In order to speed up the computation, we use more slices than
# the number of cpu. This allows the program to begin new
# calculation if a slice takes a long time. The memory used by the
# program is linear in this variable, be careful.
slice_per_cpu = 5
print "start"
res = iterate_over_screen(width, height, min_iter, max_iter, slice_per_cpu)
print "All computation done"
render_picture_bis(width, height, res)
| gpl-2.0 | -862,092,054,715,396,600 | 38.102564 | 84 | 0.585443 | false | 3.765432 | false | false | false |
stvstnfrd/edx-platform | common/djangoapps/track/tests/test_util.py | 5 | 1275 | # lint-amnesty, pylint: disable=missing-module-docstring
import json
from datetime import datetime
from django.test import TestCase
from pytz import UTC
from common.djangoapps.track.utils import DateTimeJSONEncoder
class TestDateTimeJSONEncoder(TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def test_datetime_encoding(self):
a_naive_datetime = datetime(2012, 5, 1, 7, 27, 10, 20000)
a_tz_datetime = datetime(2012, 5, 1, 7, 27, 10, 20000, tzinfo=UTC)
a_date = a_naive_datetime.date()
an_iso_datetime = '2012-05-01T07:27:10.020000+00:00'
an_iso_date = '2012-05-01'
obj = {
'number': 100,
'string': 'hello',
'object': {'a': 1},
'a_datetime': a_naive_datetime,
'a_tz_datetime': a_tz_datetime,
'a_date': a_date,
}
to_json = json.dumps(obj, cls=DateTimeJSONEncoder)
from_json = json.loads(to_json)
assert from_json['number'] == 100
assert from_json['string'] == 'hello'
assert from_json['object'] == {'a': 1}
assert from_json['a_datetime'] == an_iso_datetime
assert from_json['a_tz_datetime'] == an_iso_datetime
assert from_json['a_date'] == an_iso_date
| agpl-3.0 | -1,201,009,096,320,396,500 | 32.552632 | 97 | 0.604706 | false | 3.418231 | false | false | false |
matthijsvk/multimodalSR | code/Experiments/neon-master/tests/test_beamsearch.py | 1 | 8214 | # ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon.backends import gen_backend
from neon.initializers.initializer import Array
from neon.layers.recurrent import LSTM
from neon.layers.container import Seq2Seq
from neon.transforms import Tanh, Logistic
import numpy as np
from neon import NervanaObject
from neon.util.beamsearch import BeamSearch
def reformat_samples(seq2seq_obj, num_beams, batch_size):
samples = [[seq2seq_obj.candidates[bb][:, ex]
for bb in range(num_beams)]
for ex in range(batch_size)]
examples = []
for ex in range(batch_size):
examples.append(np.vstack([samples[ex][ii] for ii in range(num_beams)]))
return examples
def test_beamsearch(backend_default):
"""
Simlulated beam search on a minibatch of 2, for 4 time steps. The
LSTM states are real but the "softmax outputs" z are hardcoded and
not taken from the network.
There are 6 tokens the network outputs, and they have probabilities
like exp(1), exp(5), exp(7)
The test asserts that the score_lists assigned by _beamsearch_step(z_list)
are equal to the probabilities computed manually adding probabilities
to z_list.
"""
be = backend_default
batch_size = 2
be.bsz = batch_size
time_steps = 4
nout = 6
num_beams = 3
# create unused layers
activation = Tanh()
gate_activation = Logistic()
init_ary = np.eye(nout)
init = Array(init_ary)
encoder = LSTM(nout, init,
activation=activation, gate_activation=gate_activation,
name="Enc")
decoder = LSTM(nout, init,
activation=activation, gate_activation=gate_activation,
name="Dec")
class DummyFProp():
"""
Constructs an artificial beam search example with known correct outputs.
This is called inside a nested loop over steps, num_life. In the first
time step there is one life beam, after that, 3 life beams per step.
There are 4 time steps total. Each beamsearch_step builds one list over
num_life beams.
At t=0, the winners for ex0 are 1, 4, 5 (indexed by their position) and
winners for ex1 are 2,4,5. From there we continue the beam for ex0:
12, 13, 14 6+2=8 6+3=9 6+2=8
40, 43, 45 with scores 5+4=9 5+3=8 5+7=12 three new winners 45, 52, 55
50, 52, 55 5+4=9 5+6=11 5+5=10
for ex2
1 4 5 with scores 5 4 7
we get the three winners 1, 4, 5 and continue (just taking the
3 in order, no sorting)
10 12 13 14 (not unique!) 5+2=7 5+2=7 5+3=8
41 42 43 with scores 4+6=10 4+5=9 4+7=11 winners 43 51 52
51 52 53 7+4=11 7+6=13 7+3=10 scores 11 11 13
continue from the three winners 43 51 52
431 433 434 11+10=21 11+3=14 11+9=20
511 512 513 with scores 11+6=17 11+5=16 11+7=18 winners 431 434 520
520 521 522 13+8=21 13+4=17 13+6=19 scores 21 20 21
continue from three winners 431 511 513 (going along beams, the matches
in a beam)
4310 4312 4313 4314 21+2=23 21+2=23 21+3=24 21+10=31 (not unique!)
4341 4342 4343 with scores 20+10=30 20+5=25 20+7=27 winners 4314 4341 5204
5200 5202 5204 21+8=29 21+6=27 21+10=31 scores 31 30 31
overall winners are 4314 4341 5204
"""
def __init__(self):
self.i = -1
# t=0
# X x x <-- winners: 1, 4, 5 (for example 0)
z = be.array(np.exp(np.array([[1, 6, 2, 1, 5, 5],
[1, 5, 2, 2, 4, 7]]))).T
# t=1
# x x x <-- give we picked 4: new winners 2,3,4
z1 = be.array(np.exp(np.array([[1, 1, 2, 3, 2, 1],
[2, 1, 2, 3, 2, 1]]))).T
# x x x <-- give we picked 5:
# new winners 0,3,[5]
# score 12
z2 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 7],
[2, 6, 5, 7, 2, 4]]))).T
# x X X <-- give we picked 1:
# new winners 0,[2],[5]
# scores 12, 11
z3 = be.array(np.exp(np.array([[4, 1, 6, 3, 1, 5],
[1, 4, 6, 3, 2, 1]]))).T
# t=2
# example 0: given constructed (1, 5), score 11: 3, 4; scores 21, 20
z4 = be.array(np.exp(np.array([[1, 1, 2, 10, 9, 1],
[2, 10, 2, 3, 9, 1]]))).T
# example 0: given constructed (5, 5), score 12: none selected from this beam
z5 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 7],
[2, 6, 5, 7, 2, 4]]))).T
# example 0: given constructed (1, 2), score 12: 1; score 20
z6 = be.array(np.exp(np.array([[4, 8, 6, 3, 1, 5],
[8, 4, 6, 3, 1, 1]]))).T
# t=3
# example 0: given constructed (1, 5, 4), score 20: 1, score 30
z7 = be.array(np.exp(np.array([[1, 10, 2, 1, 1, 1],
[2, 1, 2, 3, 10, 1]]))).T
# example 0: given constructed (1, 2, 1), score 20: 5, score 30
z8 = be.array(np.exp(np.array([[4, 1, 2, 3, 1, 10],
[2, 10, 5, 7, 2, 4]]))).T
# example 0: given constructed (1, 5, 3), score 21: 4, score 31
z9 = be.array(np.exp(np.array([[4, 8, 6, 3, 10, 5],
[8, 4, 6, 3, 10, 1]]))).T
self.z_list = [z, z1, z2, z3, z4, z5, z6, z7, z8, z9]
def fprop(self, z, inference=True, init_state=None):
self.i += 1
return self.z_list[self.i]
def final_state():
return be.zeros_like(decoder.h[-1])
class InObj(NervanaObject):
def __init__(self):
self.shape = (nout, time_steps)
self.decoder_shape = (nout, time_steps)
decoder.fprop = DummyFProp().fprop
layers = Seq2Seq([encoder, decoder], decoder_connections=[0])
layers.decoder._recurrent[0].final_state = final_state
in_obj = InObj()
layers.configure(in_obj) # made zeros because zeros have shape
layers.allocate()
layers.allocate_deltas(None)
beamsearch = BeamSearch(layers)
inputs = be.iobuf(in_obj.shape)
beamsearch.beamsearch(inputs, num_beams=num_beams)
ex0 = np.array([[1, 5, 4, 1],
[1, 2, 1, 5],
[1, 5, 3, 4]])
ex1 = np.array([[5, 1, 4, 4],
[5, 1, 1, 1],
[5, 2, 0, 4]])
# extract all candidates
examples = reformat_samples(beamsearch, num_beams, batch_size)
assert np.allclose(examples[0], ex0)
assert np.allclose(examples[1], ex1)
if __name__ == '__main__':
be = gen_backend(backend='gpu', batch_size=2)
test_beamsearch(be)
| mit | 4,307,300,257,928,656,000 | 43.16129 | 98 | 0.498417 | false | 3.548164 | false | false | false |
ramiroluz/cookiecutter | cookiecutter/config.py | 1 | 1724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.config
-------------------
Global configuration handling
"""
from __future__ import unicode_literals
import copy
import logging
import os
import io
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from .exceptions import ConfigDoesNotExistException
from .exceptions import InvalidConfiguration
logger = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
'default_context': {}
}
def get_config(config_path):
"""
Retrieve the config from the specified path, returning it as a config dict.
"""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException
logger.debug('config_path is {0}'.format(config_path))
with io.open(config_path, encoding='utf-8') as file_handle:
try:
yaml_dict = yaml.safe_load(file_handle)
except yaml.scanner.ScannerError as e:
raise InvalidConfiguration(
'{0} is not a valid YAML file: line {1}: {2}'.format(
config_path,
e.problem_mark.line,
e.problem))
config_dict = copy.copy(DEFAULT_CONFIG)
config_dict.update(yaml_dict)
return config_dict
def get_user_config():
"""
Retrieve config from the user's ~/.cookiecutterrc, if it exists.
Otherwise, return None.
"""
# TODO: test on windows...
USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
if os.path.exists(USER_CONFIG_PATH):
return get_config(USER_CONFIG_PATH)
return copy.copy(DEFAULT_CONFIG)
| bsd-3-clause | -6,222,203,247,279,411,000 | 23.28169 | 79 | 0.639791 | false | 3.739696 | true | false | false |
Kinggerm/GetOrganelle | Utilities/join_spades_fastg_by_blast.py | 1 | 24526 | #!/usr/bin/env python
# coding:utf8
import time
import os
import sys
import platform
import subprocess
try:
# python2
import commands
except:
pass
from argparse import ArgumentParser
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0, os.path.join(PATH_OF_THIS_SCRIPT, ".."))
import GetOrganelleLib
from GetOrganelleLib.seq_parser import *
from GetOrganelleLib.pipe_control_func import executable, make_blast_db, execute_blast
from GetOrganelleLib.versions import get_versions
PATH_OF_THIS_SCRIPT = os.path.split(os.path.realpath(__file__))[0]
import platform
SYSTEM_NAME = ""
if platform.system() == "Linux":
SYSTEM_NAME = "linux"
elif platform.system() == "Darwin":
SYSTEM_NAME = "macOS"
else:
sys.stdout.write("Error: currently GetOrganelle is not supported for " + platform.system() + "! ")
exit()
GO_LIB_PATH = os.path.split(GetOrganelleLib.__file__)[0]
GO_DEP_PATH = os.path.realpath(os.path.join(GO_LIB_PATH, "..", "GetOrganelleDep", SYSTEM_NAME))
# V1_4
this_dir_split = '/'
if 'Win' in platform.architecture()[1]:
this_dir_split = '\\'
options = ''
short_candidates = {}
def require_commands():
global options
usage = 'python '+str(os.path.basename(__file__))+' -g input.fastg -f refernce.fasta'
parser = ArgumentParser(usage=usage)
parser.add_argument('-g', dest='in_fastg_file', type=str, help='followed by your input fastg file')
parser.add_argument('-f', dest='reference_fa_base', type=str, help='followed by Fasta index format')
parser.add_argument('--keep-temp', dest='keep_temp', default=False, action='store_true', help='Choose to disable deleting temp files produced by blast and this script')
parser.add_argument('--bt', dest='blast_hits_threshold', default=0.60, help='Default: 0.60', type=float)
parser.add_argument('--max-gap', dest='max_gap_to_add', default=1500, help='Default: 1500', type=int)
parser.add_argument('--con-all', dest='connect_inner_contig', default=False, action='store_true', help='Choose to activate connecting all possible contigs. Default: False')
parser.add_argument('--depth', dest='depth_to_connect', default=1.0, help='Default: 1.0', type=float)
parser.add_argument("--which-blast", dest="which_blast", default="",
help="Assign the path to BLAST binary files if not added to the path. "
"Default: try GetOrganelleDep/" + SYSTEM_NAME + "/ncbi-blast first, then $PATH")
# parser.add_argument('--merge-overlaps', default=False, action='store_true', help='Choose to activate automatically merging overlapping contigs')
# parser.add_argument('--min-os', dest='min_overlap_similarity', default=0.9, help='The similarity threshold to merge overlapping contigs. Default: 0.9', type=float)
# parser.add_argument('--min-ol', dest='min_overlap_length', default=15, help='The length threshold to merge overlapping contigs. Default: 15', type=int)
parser.add_argument("-v", "--version", action="version",
version="GetOrganelle v{version}".format(version=get_versions()))
try:
options = parser.parse_args()
except Exception as e:
sys.stdout.write('\n######################################'+str(e))
sys.stdout.write('\n"-h" for more usage')
exit()
else:
if not (options.in_fastg_file and options.reference_fa_base):
sys.stdout.write("\n######################################\nInsufficient arguments!")
sys.stdout.write("\n\"-h\" for more usage")
exit()
def check_db(which_blast=""):
global options
in_index = options.reference_fa_base + '.index'
if options.reference_fa_base:
time0 = time.time()
ref_fasta = read_fasta(options.reference_fa_base)
if len(ref_fasta[0]) > 1:
options.reference_fa_base += '.1st.fasta'
write_fasta(out_file=options.reference_fa_base, matrix=[[ref_fasta[0][0]], [ref_fasta[1][0]], ref_fasta[2]], overwrite=True)
sys.stdout.write('\nWarning: multi-seqs in reference file, only use the 1st sequence.')
elif len(ref_fasta[0]) == 0:
sys.stdout.write('\nError: illegal reference file!')
exit()
make_blast_db(input_file=options.reference_fa_base, output_base=in_index, which_blast=which_blast)
sys.stdout.write('\nMaking BLAST db cost '+str(time.time()-time0))
else:
sys.stdout.write('\nError: No reference input!')
exit()
return in_index
def blast_and_call_new_matrix(fasta_file, index_files, out_file, len_db, which_blast=""):
global options
time0 = time.time()
sys.stdout.write('\nMaking BLAST ...')
fasta_file += '.Temp'
execute_blast(query=fasta_file, blast_db=index_files, output=out_file, outfmt=6, threads=4, e_value="1e-20",
which_blast=which_blast)
time1 = time.time()
sys.stdout.write('\nBLAST to '+index_files.split(this_dir_split)[-1]+' cost '+str(time1-time0))
# ----------------------------------------
# find start and end points of query
# initialize candidates: fastq topologies and sequences
query_matrix = read_fasta(options.in_fastg_file)
len_fastg = len(query_matrix[0])
hits_candidates = {}
short_names = []
for i in range(len_fastg):
full_name = query_matrix[0][i]
short_name = '_'.join(full_name.split()[0].split('_')[1:]).split('_length')[0]
coverage = float(full_name.split('cov_')[1].split(';')[0].split('\'')[0].split(':')[0])
hits_candidates[short_name] = {False: set(), True: set(), 'coverage': coverage}
short_names.append(short_name)
for i in range(len_fastg):
full_name = query_matrix[0][i]
short_name = short_names[i]
connected_edges = set()
if ':' in full_name:
for edge in full_name.rstrip(';').split(':')[1].split(','):
edge_short_name = '_'.join(edge.split('_')[1:]).split('_length')[0]
if edge_short_name in hits_candidates:
if edge.endswith('\''):
connected_edges.add((edge_short_name, False))
else:
connected_edges.add((edge_short_name, True))
if full_name.split(';')[0].split(':')[0].endswith('\''):
sequence = query_matrix[1][i]
len_seq = len(sequence)
new_items = {'identity': [0 for j in range(len_seq)],
('index', False): i,
('seq', False): sequence,
('seq', True): complementary_seq(sequence),
'len_seq': len_seq,
False: connected_edges}
hits_candidates[short_name].update(new_items)
else:
sequence = query_matrix[1][i]
len_seq = len(sequence)
new_items = {'identity': [0 for j in range(len_seq)],
'start_block': {'q': (len_seq, len_seq), 'r':[]},
'end_block': {'q': (0, 0), 'r': []},
('index', True): i,
('seq', True): sequence,
('seq', False): complementary_seq(sequence),
'len_seq': len_seq,
True: connected_edges}
hits_candidates[short_name].update(new_items)
# -----------------------------------
# detect k-mer
k_mer = 0
try:
for short_name in hits_candidates:
for direction in [True, False]:
for next_edge_info in hits_candidates[short_name][direction]:
if k_mer:
if hits_candidates[short_name][('seq', direction)][-k_mer:] != hits_candidates[next_edge_info[0]][('seq', next_edge_info[1])][:k_mer]:
raise ValueError
else:
for k_mer in range(127, 19, -2):
if hits_candidates[short_name][('seq', direction)][-k_mer:] == hits_candidates[next_edge_info[0]][('seq', next_edge_info[1])][:k_mer]:
break
else:
raise ValueError
except ValueError:
k_mer = 0
pass
sys.stdout.write('\nDetected k-mer:'+str(k_mer))
# calculate edge connections according to hits_candidates and max_gap
#
# wait to improve:
# miss the directions for jointed edges!
def get_jointed_edges_within_distance(all_infos, this_edge, this_direction, length_left, jointed_edges, k_mer, recurse_depth=0):
for this_next_edge in all_infos[this_edge][this_direction]:
this_length_left = length_left - all_infos[this_next_edge[0]]['len_seq'] + k_mer
if this_length_left >= 0 and this_next_edge not in jointed_edges:
# try:
# arbitrarily set recurse_depth to 20
if recurse_depth < 20:
jointed_edges = get_jointed_edges_within_distance(all_infos, this_next_edge[0], this_direction==this_next_edge[1], this_length_left, jointed_edges, k_mer, recurse_depth+1)
# except RuntimeError:
# sys.stdout.write('\nWarning: RuntimeError!')
# pass
jointed_edges.add(this_next_edge)
return jointed_edges
edge_connections = {}
for edge in hits_candidates:
for direction in [False, True]:
edge_connections[(edge, direction)] = get_jointed_edges_within_distance(hits_candidates, edge, direction, options.max_gap_to_add+k_mer, set(), k_mer)
# compare candidates with blast results
blast_out_lines = open(out_file)
for line in blast_out_lines:
line_split = line.strip().split('\t')
query = '_'.join(line_split[0].split('_')[1:]).split('_length')[0]
q_start, q_end = int(line_split[6]), int(line_split[7])
r_start, r_end = int(line_split[8]), int(line_split[9])
identity = float(line_split[2])
for i in range(q_start-1, q_end):
hits_candidates[query]['identity'][i] = max(identity, hits_candidates[query]['identity'][i])
if q_start < hits_candidates[query]['start_block']['q'][0]:
hits_candidates[query]['start_block']['q'] = (q_start, q_end)
hits_candidates[query]['start_block']['r'] = [(r_start, r_end)]
elif q_start == hits_candidates[query]['start_block']['q'][0]:
if q_end > hits_candidates[query]['start_block']['q'][1]:
hits_candidates[query]['start_block']['q'] = (q_start, q_end)
hits_candidates[query]['start_block']['r'] = [(r_start, r_end)]
elif q_end == hits_candidates[query]['start_block']['q'][1]:
hits_candidates[query]['start_block']['r'].append((r_start, r_end))
if q_end > hits_candidates[query]['end_block']['q'][1]:
hits_candidates[query]['end_block']['q'] = (q_start, q_end)
hits_candidates[query]['end_block']['r'] = [(r_start, r_end)]
elif q_end == hits_candidates[query]['end_block']['q'][1]:
if q_start < hits_candidates[query]['end_block']['q'][0]:
hits_candidates[query]['end_block']['q'] = (q_start, q_end)
hits_candidates[query]['end_block']['r'] = [(r_start, r_end)]
elif q_start == hits_candidates[query]['end_block']['q'][0]:
hits_candidates[query]['end_block']['r'].append((r_start, r_end))
blast_out_lines.close()
time2 = time.time()
sys.stdout.write('\nParsing BLAST result cost '+str(time2-time1))
# ------------------------------------
# map terminal blocks of candidates to reference bases
# workout points to connect
# {base: [(query name, query identity, is_start_of_query, direction_in_reference)]}
ref_bases_dict = {}
for hit in hits_candidates.keys():
average_identity = sum(hits_candidates[hit]['identity'])/float(len(hits_candidates[hit]['identity']))
hits_candidates[hit]['identity'] = average_identity
if average_identity >= options.blast_hits_threshold:
for block in ['start_block', 'end_block']:
is_start_of_query = bool(block == 'start_block')
if options.connect_inner_contig or not bool(hits_candidates[hit][not is_start_of_query]):
if hits_candidates[hit]['coverage'] >= options.depth_to_connect:
query_loci = hits_candidates[hit][block]['q']
if is_start_of_query:
length_to_terminal = query_loci[0] - 1
else:
length_to_terminal = hits_candidates[hit]['len_seq'] - query_loci[1]
for reference_block in hits_candidates[hit][block]['r']:
direction_in_ref = bool(bool(reference_block[0] <= reference_block[1]) == is_start_of_query)
ref_block_to_mark = int(not is_start_of_query)
if reference_block[ref_block_to_mark] in ref_bases_dict:
ref_bases_dict[reference_block[ref_block_to_mark]].append((hit, length_to_terminal, is_start_of_query, direction_in_ref))
else:
ref_bases_dict[reference_block[ref_block_to_mark]] = [(hit, length_to_terminal, is_start_of_query, direction_in_ref)]
# ------------------------------------
# search for new connections
used_edge_numbers = []
for crazy_string in list(hits_candidates):
for numbers in ''.join(filter(lambda ch: ch in '0123456789-_', crazy_string)).split('_'):
for num in numbers.split('-'):
used_edge_numbers.append(int(num))
used_edge_numbers.sort()
variances_to_pass = {'edge': used_edge_numbers[-1]+1, 'index': len_fastg}
def make_connections(edge1, base1, edge2, base2, k_mer):
# if end to end and disable self-connection
if edge1[3] != edge2[3] and edge1[0] != edge2[0]:
# if not connected
if (edge2[0], edge2[2]) not in edge_connections[(edge1[0], not edge1[2])]:
# if Overlaps
if edge1[3] or base1 == base2:
overlap_or_gap_length = (base2-base1)%len_db+1 + edge1[1] + edge2[1]
edge_name = str(variances_to_pass['edge'])+'overlap'+str(overlap_or_gap_length)
new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_80'
forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer]
reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer]
else:
overlap_or_gap_length = (base2-base1)%len_db-1 - edge1[1] - edge2[1]
# if still overlaps
if overlap_or_gap_length < 0:
overlap_or_gap_length = -overlap_or_gap_length
edge_name = str(variances_to_pass['edge'])+'overlap'+str(overlap_or_gap_length)
new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_20'
forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer]
reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + '?'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer]
# if Gaps
else:
edge_name = str(variances_to_pass['edge'])+'gap'+str(overlap_or_gap_length)
new_full_name = 'EDGE_'+edge_name+'_length_'+str(overlap_or_gap_length+2*k_mer)+'_cov_5'
forward_edge_sequence = hits_candidates[edge1[0]][('seq', not edge1[2])][-k_mer:] + 'N'*overlap_or_gap_length + hits_candidates[edge2[0]][('seq', edge2[2])][:k_mer]
reverse_edge_sequence = hits_candidates[edge2[0]][('seq', not edge2[2])][-k_mer:] + 'N'*overlap_or_gap_length + hits_candidates[edge1[0]][('seq', edge1[2])][:k_mer]
variances_to_pass['edge'] += 1
# these_directions = {'to_edge1':False, 'edge1':not edge1[2],'to_edge2':True, 'edge2':edge2[2]}
# add new edge to matrix
query_matrix[0].append(new_full_name+':'+query_matrix[0][hits_candidates[edge2[0]][('index', edge2[2])]].split(';')[0].split(':')[0]+';')
edge2_full_name = query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]]
if ':' in edge2_full_name:
query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]] = edge2_full_name.rstrip(';')+','+new_full_name+'\';'
else:
query_matrix[0][hits_candidates[edge2[0]][('index', not edge2[2])]] = edge2_full_name.rstrip(';')+':'+new_full_name+'\';'
query_matrix[0].append(new_full_name+'\':'+query_matrix[0][hits_candidates[edge1[0]][('index', edge1[2])]].split(';')[0].split(':')[0]+';')
edge1_full_name = query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]]
if ':' in edge1_full_name:
query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]] = edge1_full_name.rstrip(';')+','+new_full_name+';'
else:
query_matrix[0][hits_candidates[edge1[0]][('index', not edge1[2])]] = edge1_full_name.rstrip(';')+':'+new_full_name+';'
query_matrix[1].append(forward_edge_sequence)
query_matrix[1].append(reverse_edge_sequence)
# add new edge to hits_candidates
hits_candidates[edge_name] = {('index', True): variances_to_pass['index'],
('index', False): variances_to_pass['index']+1,
('seq', True): forward_edge_sequence,
('seq', False): forward_edge_sequence,
'len_seq': overlap_or_gap_length+2*k_mer,
True: [(edge2[0], edge2[2])],
False: [(edge1[0], edge1[2])]}
variances_to_pass['index'] += 2
hits_candidates[edge1[0]][not edge1[2]].add((edge_name, True))
hits_candidates[edge2[0]][not edge2[2]].add((edge_name, False))
# add new edge to edge_connections (update)
edge_connections[(edge1[0], not edge1[2])] = get_jointed_edges_within_distance(hits_candidates, edge1[0], not edge1[2], options.max_gap_to_add+k_mer, set(), k_mer)
edge_connections[(edge2[0], not edge2[2])] = get_jointed_edges_within_distance(hits_candidates, edge2[0], not edge2[2], options.max_gap_to_add+k_mer, set(), k_mer)
edge_connections[(edge_name, True)] = get_jointed_edges_within_distance(hits_candidates, edge_name, True, options.max_gap_to_add+k_mer, set(), k_mer)
edge_connections[(edge_name, False)] = get_jointed_edges_within_distance(hits_candidates, edge_name, False, options.max_gap_to_add+k_mer, set(), k_mer)
ref_bases_list = sorted(list(ref_bases_dict))
len_ref_base = len(ref_bases_list)
for i in range(len_ref_base):
candidates = ref_bases_dict[ref_bases_list[i]]
# the same base
len_candidates = len(candidates)
if len_candidates >= 2:
for k in range(len_candidates):
for l in range(1, len_candidates):
make_connections(candidates[k], ref_bases_list[i], candidates[l], ref_bases_list[i], k_mer)
# next bases
for candidate_infos in candidates:
i_plus = i + 1
base = ref_bases_list[i_plus % len_ref_base]
while i_plus-i < len_ref_base and (base - ref_bases_list[i]) % len_db <= options.max_gap_to_add:
for hit_infos in ref_bases_dict[base]:
make_connections(candidate_infos, ref_bases_list[i], hit_infos, base, k_mer)
i_plus += 1
base = ref_bases_list[i_plus%len_ref_base]
sys.stdout.write('\nRedirecting contig path cost '+str(time.time()-time2))
return query_matrix
def del_complementary(fastg_file):
global options
time0 = time.time()
temp_matrix = read_fasta(fasta_dir=fastg_file)
i = 0
while i < len(temp_matrix[0]):
if temp_matrix[0][i].rstrip(';').split(':')[0].endswith('\''):
del temp_matrix[0][i]
del temp_matrix[1][i]
else:
i += 1
write_fasta(out_file=fastg_file + '.Temp', matrix=temp_matrix, overwrite=True)
sys.stdout.write('\nDel complementary cost'+str(time.time()-time0))
def remove_temp_files(fastg_file):
global options
if not options.keep_temp:
if options.in_fastg_file:
os.remove(fastg_file+'.Temp')
try:
os.remove(fastg_file+'.blast_in')
except OSError:
pass
try:
os.remove(options.reference_fa_base+'.index.nhr')
os.remove(options.reference_fa_base+'.index.nin')
os.remove(options.reference_fa_base+'.index.nsq')
except OSError:
pass
def main():
time0 = time.time()
sys.stdout.write(
"\nThis script would join the spades fastg contigs according to the reference."
"\nIt would add extra gap nodes (N) and/or overlap nodes (?) in between the connectible nodes and generate "
" a new fastg file."
"\n"
"\nThis is a BETA version:"
"\nAlthough it will not produce wrong connections, it usually replicates the same right connection."
"\nDon't be surprised if you find any other bugs.\n")
require_commands()
global options
if not options.which_blast:
try_this_bin = os.path.join(GO_DEP_PATH, "ncbi-blast", "blastn")
if os.path.isfile(try_this_bin) and executable(try_this_bin):
output, err = subprocess.Popen(
try_this_bin + " -version", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True).communicate()
if "not found" in output.decode("utf8"):
sys.stdout.write(output.decode("utf8") + "\n")
else:
options.which_blast = os.path.split(try_this_bin)[0]
if not executable(os.path.join(options.which_blast, "blastn")):
sys.stdout.write(os.path.join(options.which_blast, "blastn") + " not accessible!")
exit()
if not executable(os.path.join(options.which_blast, "makeblastdb")):
sys.stdout.write(os.path.join(options.which_blast, "makeblastdb") + " not accessible!")
exit()
# fastg to fasta
fasta_file = options.in_fastg_file
del_complementary(fasta_file)
# make blast database if not made
include_index = check_db(which_blast=options.which_blast)
len_db = len(read_fasta(options.reference_fa_base)[1][0])
# make blast
new_fasta_matrix = blast_and_call_new_matrix(fasta_file=fasta_file, index_files=include_index, out_file=fasta_file + '.blast_in', len_db=len_db, which_blast=options.which_blast)
# write out fastg
write_fasta(out_file=fasta_file + '.Ncontigs_added.' + fasta_file.split('.')[-1], matrix=new_fasta_matrix, overwrite=False)
remove_temp_files(fasta_file)
sys.stdout.write('\n\nTotal cost: '+str(time.time()-time0)+'\n\n')
if __name__ == '__main__':
main()
"""Copyright 2016 Jianjun Jin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.""" | gpl-3.0 | 2,539,933,049,302,687,000 | 55.125858 | 191 | 0.571312 | false | 3.548835 | false | false | false |
tuskar/tuskar | tuskar/api/controllers/v1/types/rack.py | 1 | 2776 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
import wsme
from wsme import types as wtypes
from tuskar.api.controllers.v1.types.base import Base
from tuskar.api.controllers.v1.types.link import Link
from tuskar.api.controllers.v1.types.relation import Relation
from tuskar.api.controllers.v1.types.chassis import Chassis
from tuskar.api.controllers.v1.types.node import Node
from tuskar.api.controllers.v1.types.capacity import Capacity
class Rack(Base):
"""A representation of Rack in HTTP body."""
id = int
name = wtypes.text
slots = int
subnet = wtypes.text
location = wtypes.text
state = wtypes.text
chassis = Chassis
capacities = [Capacity]
nodes = [Node]
links = [Link]
resource_class = Relation
@classmethod
def convert_with_links(self, rack, links):
kwargs = rack.as_dict() # returns a new dict, overwriting keys is safe
if rack.chassis_id:
kwargs['chassis'] = Chassis(id=rack.chassis_id,
links=[Link.build_ironic_link('chassis',
rack.chassis_id)])
else:
kwargs['chassis'] = Chassis()
if rack.resource_class_id:
l = [Link.build('self', pecan.request.host_url, 'resource_classes',
rack.resource_class_id)]
kwargs['resource_class'] = Relation(id=rack.resource_class_id,
links=l)
kwargs['capacities'] = [Capacity(name=c.name, value=c.value,
unit=c.unit)
for c in rack.capacities]
kwargs['nodes'] = [Node(id=n.node_id,
links=[Link.build_ironic_link('node', n.node_id)])
for n in rack.nodes]
return Rack(links=links, **kwargs)
@classmethod
def convert(self, rack, base_url, minimal=False):
links = [Link.build('self', pecan.request.host_url, 'rack',
rack.id)]
if minimal:
return Rack(links=links, id=str(rack.id)) | apache-2.0 | -7,753,077,215,130,675,000 | 37.569444 | 88 | 0.592219 | false | 3.92645 | false | false | false |
SchrodingersGat/kicad-library-utils | schlib/rules/S7_1.py | 2 | 3476 | # -*- coding: utf-8 -*-
from rules.rule import *
import re
class Rule(KLCRule):
"""
Create the methods check and fix to use with the kicad lib files.
"""
def __init__(self, component):
super(Rule, self).__init__(component, 'Power-flag symbols follow some special rules/KLC-exceptions')
self.makePinINVISIBLE = False
self.makePinPowerInput = False
self.fixTooManyPins = False
self.fixPinSignalName = False
self.fixNoFootprint = False
def check(self):
"""
Proceeds the checking of the rule.
"""
fail = False
if self.component.isPossiblyPowerSymbol():
if (len(self.component.pins) != 1):
self.error("Power-flag symbols have exactly one pin")
fail = True
self.fixTooManyPins = True
else:
if (self.component.pins[0]['electrical_type'] != 'W'):
self.error("The pin in power-flag symbols has to be of a POWER-INPUT")
fail = True
self.makePinPowerInput = True
if (not self.component.pins[0]['pin_type'].startswith('N')):
self.error("The pin in power-flag symbols has to be INVISIBLE")
fail = True
self.makePinINVISIBLE = True
if ((self.component.pins[0]['name'] != self.component.name) and ('~'+self.component.pins[0]['name'] != self.component.name)):
self.error("The pin name ("+self.component.pins[0]['name']+") in power-flag symbols has to be the same as the component name ("+self.component.name+")")
fail = True
self.fixPinSignalName = True
# footprint field must be empty
if self.component.fields[2]['name'] != '' and self.component.fields[2]['name'] != '""':
self.error("Graphical symbols have no footprint association (footprint was set to '"+self.component.fields[2]['name']+"')")
fail = True
self.fixNoFootprint = True
# FPFilters must be empty
if len(self.component.fplist) > 0:
self.error("Graphical symbols have no footprint filters")
fail = True
self.fixNoFootprint = True
return fail
def fix(self):
"""
Proceeds the fixing of the rule, if possible.
"""
if self.fixTooManyPins:
self.info("FIX for too many pins in power-symbol not supported")
if self.makePinPowerInput:
self.info("FIX: switching pin-type to power-input")
self.component.pins[0]['electrical_type'] = 'W'
if self.makePinINVISIBLE:
self.info("FIX: making pin invisible")
self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type']
if self.fixPinSignalName:
newname = self.component.name
if self.component.name.startswith('~'):
newname = self.component.name[1:len(self.component.name)]
self.info("FIX: change pin name to '"+newname+"'")
self.component.pins[0]['pin_type'] = 'N'+self.component.pins[0]['pin_type']
if self.fixNoFootprint:
self.info("FIX empty footprint association and FPFilters")
self.component.fplist.clear()
self.component.fields[2] = ''
| gpl-3.0 | -7,710,074,112,509,439,000 | 44.142857 | 172 | 0.556674 | false | 4.138095 | false | false | false |
jrising/computer | computer/paramiko_server.py | 1 | 1649 | import sys, time
import paramiko
from linux_server import SizelessLinuxServer
class ParamikoServer(SizelessLinuxServer):
def receive(self):
stdout = ""
while self.session.recv_ready():
stdout += self.session.recv(sys.maxint)
stderr = ""
while self.session.recv_stderr_ready():
stderr += self.session.recv_sterr(sys.maxint)
return stdout, stderr
def receive_all(self):
stdout = ""
stderr = ""
while stdout[-2:] != '$ ':
time.sleep(0.1)
stdout2, stderr2 = self.receive()
stdout += stdout2
stderr += stderr2
return stdout, stderr
def receive_each(self):
stdout = ""
while stdout[-2:] != '$ ':
time.sleep(0.1)
stdout, stderr = self.receive()
yield stdout, stderr
def disconnect(self):
self.client.close()
self.connected = False
def run_command(self, command, root=None, path=None):
"Returns (output, error) as strings."
stdout = ""
stderr = ""
for stdout2, stderr2 in self.run_command_each(command, root, path):
stdout += stdout2
stderr += stderr2
stdout = "\n".join(stdout.split('\r\n')[1:-1]) # drop command and prompt
return stdout, stderr
def run_command_each(self, command, root=None, path=None):
if root is not None:
self.cwd(self.fullpath(root, path))
print command
self.session.sendall(command + '\n')
for stdout, stderr in self.receive_each():
yield stdout, stderr
| mit | 7,623,619,950,642,216,000 | 26.032787 | 80 | 0.559127 | false | 4.185279 | false | false | false |
gronostajo/droogle | base.py | 1 | 5821 | from collections import Counter
import gzip
from operator import itemgetter
from os import listdir, path
import re
import cPickle as pickle
import json
from math import log, sqrt
from scipy.sparse import csr_matrix, lil_matrix, coo_matrix
import numpy as np
from sklearn.preprocessing import normalize
import unicodedata
__author__ = 'gronostaj'
def list_dirs(dirpath):
return [f for f in listdir(dirpath) if path.isdir(path.join(dirpath, f))]
def list_files(dirpath):
return [f for f in listdir(dirpath) if path.isfile(path.join(dirpath, f))]
class Serializer:
@staticmethod
def serialize(obj, serializer, filename, gz=False, **kwargs):
if gz:
with gzip.open('%s.gz' % filename, 'wb', 5) as f:
f.write(serializer.dumps(obj, **kwargs))
else:
with open(filename, 'wb') as f:
f.write(serializer.dumps(obj, **kwargs))
@staticmethod
def deserialize(serializer, filename):
gz = filename.endswith('.gz')
if gz:
with gzip.open(filename, 'rb') as f:
obj = serializer.load(f)
else:
with open(filename, 'rb') as f:
obj = serializer.load(f)
return obj
@staticmethod
def pickle(obj, filename, gz=True):
Serializer.serialize(obj, pickle, filename, gz)
@staticmethod
def unpickle(filename):
return Serializer.deserialize(pickle, filename)
@staticmethod
def to_json(obj, filename, gz=True):
Serializer.serialize(obj, json, filename, gz, sort_keys=True, indent=4, separators=(',', ': '))
@staticmethod
def from_json(filename):
return Serializer.deserialize(json, filename)
class Droogle:
SUFFIXES = ('%s.pickle', '%s.pickle.gz', '%s.json', '%s.json.gz')
_WORDMAP = 'wordmap'
_MATRIX = 'matrix'
_CHUNKS = 'chunks'
def __init__(self, indexdir):
dbs = {}
for req in (Droogle._WORDMAP, Droogle._MATRIX, Droogle._CHUNKS):
satisfying = [
path.join(indexdir, suffix % req)
for suffix in Droogle.SUFFIXES
if path.isfile(path.join(indexdir, suffix % req))
]
if not satisfying:
raise FileMissingError(req)
else:
dbs[req] = satisfying[0]
self.dbs = {
k: Serializer.unpickle(f)
if f.endswith('.pickle') or f.endswith('.pickle.gz')
else Serializer.from_json(f)
for k, f in dbs.iteritems()
}
@staticmethod
def _sanitize(str):
return re.sub(r'[^\x00-\x7F]+', ' ', str.lower())
@staticmethod
def _bagofwords(str):
return Counter(re.findall(r'\w+', str))
@staticmethod
def _indexstring(filename, str, separator):
bags = {}
chunks = {}
wordset = set()
for i, chunk in enumerate(re.split(separator, str)):
bag = Droogle._bagofwords(Droogle._sanitize(chunk))
bags['%s_%d' % (filename, i)] = dict(bag)
chunks['%s_%d' % (filename, i)] = chunk
wordset = wordset | set(bag.keys())
return bags, chunks, wordset
@staticmethod
def index(dirpath, inputfiles, separator):
bags = {}
chunks = {}
wordset = set()
for inputfile in inputfiles:
print("- Parsing file %s" % inputfile)
with open(path.join(dirpath, inputfile), 'r') as f:
thisbag, thischunks, thisset = Droogle._indexstring(inputfile, f.read(), separator)
bags.update(thisbag)
chunks.update(thischunks)
wordset = wordset | thisset
print("- Building matrix")
wordmap = {w: i for i, w in enumerate(wordset)}
chunkmap = {c: i for i, c in enumerate(bags.keys())}
matrix = lil_matrix((len(wordset), len(bags)))
chunks = {chunkmap[n]: c for n, c in chunks.items()}
for chunkname, chunkid in chunkmap.iteritems():
bag = dict(bags[chunkname])
for word, quantity in bag.iteritems():
wordid = wordmap[word]
matrix[wordid, chunkid] = quantity
matrix = csr_matrix(matrix)
print("- Optimizing matrix")
nonzero = np.diff(matrix.indptr)
idf = lil_matrix(np.array(map(lambda c: log(len(wordset) / c), nonzero)))
matrix = matrix.transpose().multiply(idf)
normalize(matrix, copy=False)
matrix = matrix.transpose()
print("- Saving files")
Serializer.to_json(wordmap, path.join(dirpath, "%s.json" % Droogle._WORDMAP))
Serializer.pickle(matrix, path.join(dirpath, "%s.pickle" % Droogle._MATRIX))
Serializer.pickle(chunks, path.join(dirpath, "%s.pickle" % Droogle._CHUNKS))
return len(bags), len(wordset)
def query(self, string):
bag = Droogle._bagofwords(Droogle._sanitize(string))
norm = sqrt(reduce(lambda v, x: v + x ** 2, bag.values()))
bag = {k: v / norm for k, v in dict(bag).iteritems()}
bagmap = {
self.dbs[Droogle._WORDMAP][word]: count
for word, count in bag.iteritems()
if word in self.dbs[Droogle._WORDMAP]
}
bagmap = zip(*bagmap.items())
lookup = coo_matrix(
(bagmap[1], ([0] * len(bagmap[0]), bagmap[0])),
dtype='double',
shape=(1, self.dbs[Droogle._MATRIX].shape[0])
).dot(self.dbs[Droogle._MATRIX])
results = [(self.dbs[Droogle._CHUNKS][i], lookup[0, i]) for i in xrange(self.dbs[Droogle._MATRIX].shape[1])]
return map(itemgetter(0), sorted(results, key=itemgetter(1), reverse=True))
class FileMissingError(Exception):
def __init__(self, filename):
self.filename = filename | gpl-2.0 | 904,110,846,443,576,600 | 30.989011 | 116 | 0.579282 | false | 3.674874 | false | false | false |
Damnever/Chat-Room | taskq/utils.py | 1 | 4730 | # -*- coding: utf-8 -*-
"""
Logging support with colors, just available in *unix.
Borrow from `tornado.log`:
https://github.com/tornadoweb/tornado/blob/master/tornado/log.py
"""
from __future__ import print_function, division
import sys
import functools
import logging
import logging.handlers
try:
import curses
except ImportError:
curses = None
# try:
# import cPickle as pickle
# except ImportError:
import pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
logger = logging.getLogger("TASKQ")
dumps = functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
loads = pickle.loads
class NoSuchIdExists(Exception):
pass
class ClassUnpickler(object):
"""This class aim to resolve pickle.Unpickler.find_class.
And it implement context manager protocol, `as` substatement
return a Unpickler object initialize by StringIO buffer.
```
with Unpicle(s) as unpickler:
obj = unpickler.load()
```
"""
def __init__(self, buffer, cls):
self._f = StringIO(buffer)
self._cls = cls
def __enter__(self):
def _resolve_class(module, name):
return self._cls
unpickler = pickle.Unpickler(self._f)
unpickler.find_class = _resolve_class
return unpickler
def __exit__(self, exc_type, exc_value, exc_traceback):
if hasattr(self, '_f'):
self._f.close()
def singleton(cls):
instance = cls()
instance.__call__ = lambda: instance
return instance
def enable_pretty_logging(log_level="info", logger=logger):
logger.setLevel(getattr(logging, log_level.upper()))
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(LogFormatter())
logger.addHandler(stream_handler)
class LogFormatter(logging.Formatter):
DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s"
DEFAULT_DATE_FORMAT = "%y-%m-%d %H:%M:%S"
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
colors=DEFAULT_COLORS):
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
for levelno, code in colors.items():
self._colors[levelno] = unicode(curses.tparm(fg_color, code),
"ascii")
self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ""
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring)
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%s): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ""
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace('\n', '\n ')
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
if isinstance(s, (unicode, type(None))):
return s
if not isinstance(s, bytes):
raise TypeError("Excepted bytes, unicode, None; got %r" % type(s))
try:
return s.decode("utf-8")
except UnicodeDecodeError:
return repr(s)
if __name__ == "__main__":
enable_pretty_logging()
print(_stderr_supports_color())
print(hasattr(sys.stderr, 'isatty'))
print(curses)
try:
1 / 0
except ZeroDivisionError:
# logger.error("error", exc_info=sys.exc_info())
logger.error("error", exc_info=True)
| bsd-3-clause | 59,938,001,376,948,150 | 27.154762 | 87 | 0.598097 | false | 3.899423 | false | false | false |
luisza/vcl_django | reservations/models.py | 1 | 5527 | from __future__ import unicode_literals
from django.db import models
from authentication.models import User, Usergroup
from compute.models import Computer
from image.models import Image, Imagerevision
from managementnode.models import Managementnode
from core.models import State
class Request(models.Model):
stateid = models.ForeignKey(State, db_column='stateid', related_name="rel_si")
userid = models.ForeignKey(User, db_column='userid')
laststateid = models.ForeignKey(State, db_column='laststateid', related_name="rel_laststateid" )
logid = models.IntegerField()
forimaging = models.IntegerField()
test = models.IntegerField()
preload = models.IntegerField()
start = models.DateTimeField()
end = models.DateTimeField()
daterequested = models.DateTimeField()
datemodified = models.DateTimeField(blank=True, null=True)
checkuser = models.IntegerField()
class Meta:
db_table = 'request'
class Serverrequest(models.Model):
name = models.CharField(max_length=255)
serverprofileid = models.SmallIntegerField()
requestid = models.OneToOneField(Request, db_column='requestid')
fixedip = models.CharField(db_column='fixedIP', max_length=15, blank=True, null=True) # Field name made lowercase.
fixedmac = models.CharField(db_column='fixedMAC', max_length=17, blank=True, null=True) # Field name made lowercase.
admingroupid = models.ForeignKey(Usergroup, db_column='admingroupid', blank=True, null=True, related_name="rel_agi")
logingroupid = models.ForeignKey(Usergroup, db_column='logingroupid', blank=True, null=True, related_name="rel_login")
monitored = models.IntegerField()
class Meta:
db_table = 'serverrequest'
# Create your models here.
class Reservation(models.Model):
requestid = models.ForeignKey(Request, db_column='requestid')
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid')
remoteip = models.CharField(db_column='remoteIP', max_length=15, blank=True, null=True) # Field name made lowercase.
lastcheck = models.DateTimeField(blank=True, null=True)
pw = models.CharField(max_length=40, blank=True, null=True)
connectip = models.CharField(db_column='connectIP', max_length=15, blank=True, null=True) # Field name made lowercase.
connectport = models.SmallIntegerField(blank=True, null=True)
class Meta:
db_table = 'reservation'
class Reservationaccounts(models.Model):
reservationid = models.ForeignKey(Reservation, db_column='reservationid')
userid = models.ForeignKey(User, db_column='userid')
password = models.CharField(max_length=50, blank=True, null=True)
class Meta:
db_table = 'reservationaccounts'
unique_together = (('reservationid', 'userid'),)
class Blockrequest(models.Model):
name = models.CharField(max_length=80)
imageid = models.ForeignKey(Image, db_column='imageid')
nummachines = models.IntegerField(db_column='numMachines') # Field name made lowercase.
groupid = models.ForeignKey(Usergroup, db_column='groupid', blank=True, null=True)
repeating = models.CharField(max_length=7)
ownerid = models.ForeignKey(User, db_column='ownerid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid', blank=True, null=True)
expiretime = models.DateTimeField(db_column='expireTime') # Field name made lowercase.
processing = models.IntegerField()
status = models.CharField(max_length=9)
comments = models.TextField(blank=True, null=True)
class Meta:
db_table = 'blockRequest'
class Blocktimes(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateTimeField()
end = models.DateTimeField()
processed = models.IntegerField()
skip = models.IntegerField()
class Meta:
db_table = 'blockTimes'
class Blockcomputers(models.Model):
blocktimeid = models.ForeignKey(Blocktimes, db_column='blockTimeid') # Field name made lowercase.
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
reloadrequestid = models.IntegerField()
class Meta:
db_table = 'blockComputers'
unique_together = (('blocktimeid', 'computerid'),)
class Blockwebdate(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
start = models.DateField()
end = models.DateField()
days = models.IntegerField(blank=True, null=True)
weeknum = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'blockWebDate'
class Blockwebtime(models.Model):
blockrequestid = models.ForeignKey(Blockrequest, db_column='blockRequestid') # Field name made lowercase.
starthour = models.IntegerField()
startminute = models.IntegerField()
startmeridian = models.CharField(max_length=2)
endhour = models.IntegerField()
endminute = models.IntegerField()
endmeridian = models.CharField(max_length=2)
order = models.IntegerField()
class Meta:
db_table = 'blockWebTime' | apache-2.0 | -5,905,803,189,161,662,000 | 39.057971 | 123 | 0.713226 | false | 3.919858 | false | false | false |
vnaboychenko/stackalytics | tests/api/test_releases.py | 7 | 2555 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tests.api import test_api
class TestAPIReleases(test_api.TestAPI):
def test_releases_empty(self):
with test_api.make_runtime_storage({}):
response = self.app.get('/api/1.0/releases')
self.assertEqual(200, response.status_code)
def test_releases(self):
with test_api.make_runtime_storage(
{'releases': [
{'release_name': 'prehistory', 'end_date': 1365033600},
{'release_name': 'havana', 'end_date': 1381968000},
{'release_name': 'icehouse', 'end_date': 1397692800}]}):
response = self.app.get('/api/1.0/releases')
releases = json.loads(response.data)['releases']
self.assertEqual(3, len(releases))
self.assertIn({'id': 'all', 'text': 'All'}, releases)
self.assertIn({'id': 'icehouse', 'text': 'Icehouse'}, releases)
def test_releases_search(self):
with test_api.make_runtime_storage(
{'releases': [
{'release_name': 'prehistory', 'end_date': 1365033600},
{'release_name': 'havana', 'end_date': 1381968000},
{'release_name': 'icehouse', 'end_date': 1397692800}]}):
response = self.app.get('/api/1.0/releases?query=hav')
releases = json.loads(response.data)['releases']
self.assertEqual(1, len(releases))
self.assertIn({'id': 'havana', 'text': 'Havana'}, releases)
def test_release_details(self):
with test_api.make_runtime_storage(
{'releases': [
{'release_name': 'prehistory', 'end_date': 1365033600},
{'release_name': 'icehouse', 'end_date': 1397692800}]}):
response = self.app.get('/api/1.0/releases/icehouse')
release = json.loads(response.data)['release']
self.assertEqual({'id': 'icehouse', 'text': 'Icehouse'}, release)
| apache-2.0 | -5,059,603,577,272,623,000 | 43.051724 | 77 | 0.598043 | false | 3.757353 | true | false | false |
ossCare/svnPlus | tagProtect/Python/setup.py | 1 | 2721 | #
# Copyright 2015,2016,2017 Joseph C. Pietras
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
import re
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def theVersion():
vfile = "svnplus/tagprotect.py" # relative path , file with version string
verStr = ""
f = open(os.path.join(os.path.dirname(__file__), vfile))
if f:
regex = re.compile(r'^VERSION\s*=\s*')
for line in f:
if regex.match(line) is not None:
#print('line: {0}'.format(line), file=sys.stderr)
verStr = re.sub(r'\s*$', r'', line)
#print('verStr: {0}'.format(verStr), file=sys.stderr)
verStr = re.sub(r'^VERSION\s*=\s*', r'', verStr)
#print('verStr: {0}'.format(verStr), file=sys.stderr)
verStr = re.sub(r'^"(.+)"$', r'\1', verStr)
#print('verStr: {0}'.format(verStr), file=sys.stderr)
verStr = re.sub(r"^'(.+)'$", r'\1', verStr)
#print('verStr: {0}'.format(verStr), file=sys.stderr)
break
else:
print('failed to open will NOT read', file=sys.stderr)
if verStr != "":
print('version is: {0} from file "{1}"'.format(verStr, vfile), file=sys.stderr)
return verStr
exit(1)
setup(name='svnplus',
author_email='[email protected]',
author='Joseph C. Pietras',
classifiers=['Intended Audience :: Information Technology', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English'],
data_files=[ ("/root/svnplus", ['LICENSE', 'pre-commit', 'pre-commit.conf']) ],
description='''This is a subversion hook. It provides a protection mechanism for subversion repositories so that previously committed "tags" are immutable.''',
include_package_data=True,
keywords='subversion hook tagprotect immutable',
license='Apache Software License 2.0',
long_description=read('README'),
packages=['svnplus'],
url='https://github.com/ossCare/svnPlus',
version=theVersion(),
zip_safe=False)
| gpl-2.0 | 7,970,045,648,241,159,000 | 41.515625 | 166 | 0.626975 | false | 3.637701 | false | false | false |
Mehnen/BOINSO | core/migrations/0003_satellite.py | 1 | 1044 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20150428_1430'),
]
operations = [
migrations.CreateModel(
name='Satellite',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('version', models.IntegerField(default=1)),
('name', models.CharField(max_length=140)),
('nickname', models.CharField(max_length=140)),
('tle', models.FileField(null=True, upload_to='')),
('status', models.IntegerField(default=0, max_length=2, choices=[(0, 'operation status unknown'), (1, 'operational'), (2, 'non operational'), (3, 'partially operational'), (4, 'on standby'), (5, 'spare'), (6, 'extended mission')])),
],
options={
},
bases=(models.Model,),
),
]
| apache-2.0 | -602,068,294,722,521,700 | 36.285714 | 248 | 0.550766 | false | 4.062257 | false | false | false |
aajanki/youtube-dl | youtube_dl/extractor/viki.py | 5 | 10269 | from __future__ import unicode_literals
import time
import hmac
import hashlib
import itertools
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
)
from .common import InfoExtractor
class VikiBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s'
_APP = '65535a'
_APP_VERSION = '2.2.5.1428709186'
_APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)'
def _prepare_call(self, path, timestamp=None):
path += '?' if '?' not in path else '&'
if not timestamp:
timestamp = int(time.time())
query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp)
sig = hmac.new(
self._APP_SECRET.encode('ascii'),
query.encode('ascii'),
hashlib.sha1
).hexdigest()
return self._API_URL_TEMPLATE % (query, sig)
def _call_api(self, path, video_id, note, timestamp=None):
resp = self._download_json(
self._prepare_call(path, timestamp), video_id, note)
error = resp.get('error')
if error:
if error == 'invalid timestamp':
resp = self._download_json(
self._prepare_call(path, int(resp['current_timestamp'])),
video_id, '%s (retry)' % note)
error = resp.get('error')
if error:
self._raise_error(resp['error'])
return resp
def _raise_error(self, error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
_VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
}
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '190f3ef426005ba3a080a63325955bc3',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:52617e4f729c7d03bfd4bcbbb6e946f2',
'duration': 4155,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
}
}, {
# youtube external
'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
'md5': '216d1afdc0c64d1febc1e9f2bd4b864b',
'info_dict': {
'id': '50562v',
'ext': 'mp4',
'title': 'Poor Nastya [COMPLETE] - Episode 1',
'description': '',
'duration': 607,
'timestamp': 1274949505,
'upload_date': '20101213',
'uploader': 'ad14065n',
'uploader_id': 'ad14065n',
'like_count': int,
'age_limit': 13,
}
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
title = None
titles = video.get('titles')
if titles:
title = titles.get('en') or titles[titles.keys()[0]]
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = video.get('container', {}).get('titles')
if container_titles:
container_title = container_titles.get('en') or container_titles[container_titles.keys()[0]]
title = '%s - %s' % (container_title, title)
descriptions = video.get('descriptions')
description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
uploader = video.get('author')
like_count = int_or_none(video.get('likes', {}).get('count'))
age_limit = parse_age_limit(video.get('rating'))
thumbnails = []
for thumbnail_id, thumbnail in video.get('images', {}).items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail.get('url'),
})
subtitles = {}
for subtitle_lang, _ in video.get('subtitle_completions', {}).items():
subtitles[subtitle_lang] = [{
'ext': subtitles_format,
'url': self._prepare_call(
'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)),
} for subtitles_format in ('srt', 'vtt')]
result = {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'like_count': like_count,
'age_limit': age_limit,
'thumbnails': thumbnails,
'subtitles': subtitles,
}
streams = self._call_api(
'videos/%s/streams.json' % video_id, video_id,
'Downloading video streams JSON')
if 'external' in streams:
result.update({
'_type': 'url_transparent',
'url': streams['external']['url'],
})
return result
formats = []
for format_id, stream_dict in streams.items():
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
formats = self._extract_m3u8_formats(
format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol)
else:
formats.append({
'url': format_dict['url'],
'format_id': '%s-%s' % (format_id, protocol),
'height': height,
})
self._sort_formats(formats)
result['formats'] = formats
return result
class VikiChannelIE(VikiBaseIE):
IE_NAME = 'viki:channel'
_VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
},
'playlist_count': 70,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
}, {
'url': 'http://www.viki.com/news/24569c-showbiz-korea',
'only_matching': True,
}, {
'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005',
'only_matching': True,
}, {
'url': 'http://www.viki.com/artists/2141c-shinee',
'only_matching': True,
}]
_PER_PAGE = 25
def _real_extract(self, url):
channel_id = self._match_id(url)
channel = self._call_api(
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
titles = channel['titles']
title = titles.get('en') or titles[titles.keys()[0]]
descriptions = channel['descriptions']
description = descriptions.get('en') or descriptions[descriptions.keys()[0]]
entries = []
for video_type in ('episodes', 'clips', 'movies'):
for page_num in itertools.count(1):
page = self._call_api(
'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d'
% (channel_id, video_type, self._PER_PAGE, page_num), channel_id,
'Downloading %s JSON page #%d' % (video_type, page_num))
for video in page['response']:
video_id = video['id']
entries.append(self.url_result(
'http://www.viki.com/videos/%s' % video_id, 'Viki'))
if not page['pagination']['next']:
break
return self.playlist_result(entries, channel_id, title, description)
| unlicense | -8,650,367,813,478,250,000 | 34.410345 | 121 | 0.510761 | false | 3.510769 | false | false | false |
scream7/leetcode | algorithms/python/232.py | 1 | 1090 | class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.stack1.append(x)
def pop(self):
"""
:rtype: nothing
"""
if not len(self.stack2):
while len(self.stack1):
self.stack2.append(self.stack1.pop())
self.stack2.pop()
def peek(self):
"""
:rtype: int
"""
if not len(self.stack2):
while len(self.stack1):
self.stack2.append(self.stack1.pop())
return self.stack2[-1]
def empty(self):
"""
:rtype: bool
"""
return len(self.stack1) == 0 and len(self.stack2) ==0
if __name__ == '__main__':
q = Queue()
# q.push(1)
# q.push(2)
# print q.peek()
# q.push(3)
# print q.peek()
# q.push(1)
# q.push(2)
# print q.peek()
q.push(1)
print q.peek()
| apache-2.0 | -8,064,676,924,825,861,000 | 19.566038 | 61 | 0.438532 | false | 3.416928 | false | false | false |
gregpuzzles1/Sandbox | WordGolf-0.2.4/game/scorespage.py | 1 | 6378 | """This is page for showing the high scores"""
import pygame
import sys
import os
import serge.visual
import serge.actor
import serge.common
import serge.sound
import serge.engine
import serge.blocks.visualblocks
import serge.blocks.scores
import game
import common
from theme import get as G
### The high score table ###
class ScoresPage(serge.actor.Actor, common.MoveableBackground):
"""Show the scores page"""
def __init__(self, game, world):
"""Initialise the page"""
super(ScoresPage, self).__init__('scores')
#
self.addLogo(world)
#
self.actions = serge.blocks.layout.HorizontalBar('actions', height=100)
self.actions.moveTo(320, 400)
self.actions.setLayerName('ui')
world.addActor(self.actions)
#
#b = serge.actor.Actor('button', 'return')
#b.visual = serge.blocks.visualblocks.SpriteText('Return', BUTTON_TEXT, 'button_back', font_size=BIG_TEXT)
#b.linkEvent('left-click', self.handleReturn)
#self.actions.addActor(b)
#
b = serge.actor.Actor('button', 'play')
b.visual = serge.blocks.visualblocks.SpriteText('Play', G('text-button-colour'), 'button_back', font_size=G('large-text-size'))
b.linkEvent('left-click', self.handlePlay)
self.actions.addActor(b)
#
b = serge.actor.Actor('button', 'reset')
b.visual = serge.blocks.visualblocks.SpriteText('Reset', G('text-button-colour'), 'button_back', font_size=G('large-text-size'))
b.linkEvent('left-click', self.handleReset)
self.actions.addActor(b)
#
b = serge.actor.Actor('button', 'quit')
b.visual = serge.blocks.visualblocks.SpriteText('Quit', G('text-button-colour'), 'button_back', font_size=G('large-text-size'))
b.linkEvent('left-click', self.handleQuit)
self.actions.addActor(b)
#
self.background = serge.actor.Actor('scores-page')
self.background.setSpriteName('scores-page')
self.background.moveTo(320, 240)
self.background.setLayerName('course')
world.addActor(self.background)
#
# The scores
self.shots = serge.blocks.layout.VerticalBar('ui-grid', width=G('score-grid-width'), height=G('score-grid-height'))
self.shots.setOrigin(G('score-grid-offset-x'), G('score-grid-offset-y'))
self.shots.setLayerName('ui')
t = serge.actor.Actor('text', 'header')
t.visual = serge.visual.Text('', G('text-button-colour'), font_size=G('normal-text-size'), justify='left')
self.shots.addActor(t)
#
self.shots_row = []
for row in range(5):
t = serge.actor.Actor('text', row)
t.visual = serge.visual.Text('', G('text-button-colour'), font_size=G('large-text-size'), justify='left')
self.shots.addActor(t)
self.shots_row.append(t)
world.addActor(self.shots)
#
self.game = game
self.world = world
#
self.setUpTable()
self.game_start = None
#
self.addEffects()
def setUpTable(self):
"""Set up the high score table"""
var = 'HOME' if not sys.platform.startswith('win') else 'HOMEPATH'
self.score_filename = os.path.join(os.getenv(var), '.bogolf.scores')
if os.path.isfile(self.score_filename):
self.log.info('Loading scores from %s' % self.score_filename)
self.table = serge.serialize.Serializable.fromFile(self.score_filename)
else:
self.log.info('New scores file at %s' % self.score_filename)
self.resetTable()
def saveTable(self):
"""Save the high score table"""
self.table.toFile(self.score_filename)
def handleReturn(self, obj, arg):
"""Handle that we requested to return"""
serge.sound.Register.playSound('letter')
self.world.getEngine().setCurrentWorldByName('end')
def handlePlay(self, obj, arg):
"""Handle that we requested to play"""
serge.sound.Register.playSound('letter')
self.world.getEngine().setCurrentWorldByName('start')
def handleQuit(self, obj, arg):
"""Handle clicking on quit"""
self.log.info('Quiting now')
serge.sound.Register.playSound('end-game')
serge.engine.CurrentEngine().stop()
def handleReset(self, obj, arg):
"""Handle clicking on reset"""
self.log.info('Resetting high scores')
serge.sound.Register.playSound('letter')
self.table.resetCategory('%s - %d holes - shots' % (self.gamestart.selected_game_name, self.gamestart.selected_holes))
self.updateTable()
def resetTable(self):
"""Reset the scores table"""
self.table = serge.blocks.scores.HighScoreTable()
for game in (('easy', 'medium', 'hard')):
for holes in (1, 3, 6, 9, 12, 15, 18):
self.table.addCategory('%s - %d holes - shots' % (game, holes),
number=5, sort_columns=[1,2], directions=['descending', 'descending'])
self.table.addCategory('%s - %d holes - time' % (game, holes),
number=5, sort_columns=[2,1], directions=['descending', 'descending'])
self.saveTable()
def activateWorld(self):
"""When we are activated"""
if self.gamestart:
self.updateTable()
def updateTable(self):
"""Update the current scores table"""
for row in range(5):
self.shots_row[row].setText('')
results = self.table.getCategory('%s - %d holes - shots' % (self.gamestart.selected_game_name, self.gamestart.selected_holes))
for row, (name, shots, time, date) in enumerate(results):
if shots == 0:
result = 'Even par'
else:
result = '%d %s par' % (abs(shots), 'over' if shots > 0 else 'under')
if self.pad.this_score == row+1:
self.shots_row[row].visual.setColour((255,255,255))
else:
self.shots_row[row].visual.setColour(G('text-button-colour'))
self.shots_row[row].setText('%d - %s in %s seconds' % (row+1, result, self.niceTime(time)))
| gpl-3.0 | -8,383,739,616,639,641,000 | 39.624204 | 136 | 0.588429 | false | 3.61975 | false | false | false |
jcpowermac/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vms.py | 2 | 73025 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vms
short_description: Module to manage Virtual Machines in oVirt/RHV
version_added: "2.2"
author:
- Ondra Machacek (@machacekondra)
description:
- This module manages whole lifecycle of the Virtual Machine(VM) in oVirt/RHV.
- Since VM can hold many states in oVirt/RHV, this see notes to see how the states of the VM are handled.
options:
name:
description:
- Name of the Virtual Machine to manage.
- If VM don't exists C(name) is required. Otherwise C(id) or C(name) can be used.
id:
description:
- ID of the Virtual Machine to manage.
state:
description:
- Should the Virtual Machine be running/stopped/present/absent/suspended/next_run/registered.
When C(state) is I(registered) and the unregistered VM's name
belongs to an already registered in engine VM in the same DC
then we fail to register the unregistered template.
- I(present) state will create/update VM and don't change its state if it already exists.
- I(running) state will create/update VM and start it.
- I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted.
- Please check I(notes) to more detailed description of states.
- I(registered) is supported since 2.4.
choices: [ absent, next_run, present, registered, running, stopped, suspended ]
default: present
cluster:
description:
- Name of the cluster, where Virtual Machine should be created.
- Required if creating VM.
allow_partial_import:
description:
- Boolean indication whether to allow partial registration of Virtual Machine when C(state) is registered.
version_added: "2.4"
vnic_profile_mappings:
description:
- "Mapper which maps an external virtual NIC profile to one that exists in the engine when C(state) is registered.
vnic_profile is described by the following dictionary:"
- "C(source_network_name): The network name of the source network."
- "C(source_profile_name): The prfile name related to the source network."
- "C(target_profile_id): The id of the target profile id to be mapped to in the engine."
version_added: "2.5"
cluster_mappings:
description:
- "Mapper which maps cluster name between VM's OVF and the destination cluster this VM should be registered to,
relevant when C(state) is registered.
Cluster mapping is described by the following dictionary:"
- "C(source_name): The name of the source cluster."
- "C(dest_name): The name of the destination cluster."
version_added: "2.5"
role_mappings:
description:
- "Mapper which maps role name between VM's OVF and the destination role this VM should be registered to,
relevant when C(state) is registered.
Role mapping is described by the following dictionary:"
- "C(source_name): The name of the source role."
- "C(dest_name): The name of the destination role."
version_added: "2.5"
domain_mappings:
description:
- "Mapper which maps aaa domain name between VM's OVF and the destination aaa domain this VM should be registered to,
relevant when C(state) is registered.
The aaa domain mapping is described by the following dictionary:"
- "C(source_name): The name of the source aaa domain."
- "C(dest_name): The name of the destination aaa domain."
version_added: "2.5"
affinity_group_mappings:
description:
- "Mapper which maps affinty name between VM's OVF and the destination affinity this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
affinity_label_mappings:
description:
- "Mappper which maps affinity label name between VM's OVF and the destination label this VM should be registered to,
relevant when C(state) is registered."
version_added: "2.5"
lun_mappings:
description:
- "Mapper which maps lun between VM's OVF and the destination lun this VM should contain, relevant when C(state) is registered.
lun_mappings is described by the following dictionary:
- C(logical_unit_id): The logical unit number to identify a logical unit,
- C(logical_unit_port): The port being used to connect with the LUN disk.
- C(logical_unit_portal): The portal being used to connect with the LUN disk.
- C(logical_unit_address): The address of the block storage host.
- C(logical_unit_target): The iSCSI specification located on an iSCSI server
- C(logical_unit_username): Username to be used to connect to the block storage host.
- C(logical_unit_password): Password to be used to connect to the block storage host.
- C(storage_type): The storage type which the LUN reside on (iscsi or fcp)"
version_added: "2.5"
reassign_bad_macs:
description:
- "Boolean indication whether to reassign bad macs when C(state) is registered."
version_added: "2.5"
template:
description:
- Name of the template, which should be used to create Virtual Machine.
- Required if creating VM.
- If template is not specified and VM doesn't exist, VM will be created from I(Blank) template.
template_version:
description:
- Version number of the template to be used for VM.
- By default the latest available version of the template is used.
version_added: "2.3"
use_latest_template_version:
description:
- Specify if latest template version should be used, when running a stateless VM.
- If this parameter is set to I(yes) stateless VM is created.
type: bool
version_added: "2.3"
storage_domain:
description:
- Name of the storage domain where all template disks should be created.
- This parameter is considered only when C(template) is provided.
- IMPORTANT - This parameter is not idempotent, if the VM exists and you specfiy different storage domain,
disk won't move.
version_added: "2.4"
disk_format:
description:
- Specify format of the disk.
- If C(cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
- If C(raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
- Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
- This parameter is considered only when C(template) and C(storage domain) is provided.
choices: [ cow, raw ]
default: cow
version_added: "2.4"
memory:
description:
- Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- Default value is set by engine.
memory_guaranteed:
description:
- Amount of minimal guaranteed memory of the Virtual Machine.
Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB).
- C(memory_guaranteed) parameter can't be lower than C(memory) parameter.
- Default value is set by engine.
cpu_shares:
description:
- Set a CPU shares for this Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_cores:
description:
- Number of virtual CPUs cores of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_sockets:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
cpu_threads:
description:
- Number of virtual CPUs sockets of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
version_added: "2.5"
type:
description:
- Type of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
choices: [ desktop, server ]
operating_system:
description:
- Operating system of the Virtual Machine.
- Default value is set by oVirt/RHV engine.
choices:
- debian_7
- freebsd
- freebsdx64
- other
- other_linux
- other_linux_ppc64
- other_ppc64
- rhel_3
- rhel_4
- rhel_4x64
- rhel_5
- rhel_5x64
- rhel_6
- rhel_6x64
- rhel_6_ppc64
- rhel_7x64
- rhel_7_ppc64
- sles_11
- sles_11_ppc64
- ubuntu_12_04
- ubuntu_12_10
- ubuntu_13_04
- ubuntu_13_10
- ubuntu_14_04
- ubuntu_14_04_ppc64
- windows_10
- windows_10x64
- windows_2003
- windows_2003x64
- windows_2008
- windows_2008x64
- windows_2008r2x64
- windows_2008R2x64
- windows_2012x64
- windows_2012R2x64
- windows_7
- windows_7x64
- windows_8
- windows_8x64
- windows_xp
boot_devices:
description:
- List of boot devices which should be used to boot. For example C([ cdrom, hd ]).
- Default value is set by oVirt/RHV engine.
choices: [ cdrom, hd, network ]
host:
description:
- Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler.
- This parameter is used only when C(state) is I(running) or I(present).
high_availability:
description:
- If I(yes) Virtual Machine will be set as highly available.
- If I(no) Virtual Machine won't be set as highly available.
- If no value is passed, default value is set by oVirt/RHV engine.
type: bool
lease:
description:
- Name of the storage domain this virtual machine lease reside on.
- NOTE - Supported since oVirt 4.1.
version_added: "2.4"
delete_protected:
description:
- If I(yes) Virtual Machine will be set as delete protected.
- If I(no) Virtual Machine won't be set as delete protected.
- If no value is passed, default value is set by oVirt/RHV engine.
stateless:
description:
- If I(yes) Virtual Machine will be set as stateless.
- If I(no) Virtual Machine will be unset as stateless.
- If no value is passed, default value is set by oVirt/RHV engine.
clone:
description:
- If I(yes) then the disks of the created virtual machine will be cloned and independent of the template.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
clone_permissions:
description:
- If I(yes) then the permissions of the template (only the direct ones, not the inherited ones)
will be copied to the created virtual machine.
- This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before.
type: bool
default: 'no'
cd_iso:
description:
- ISO file from ISO storage domain which should be attached to Virtual Machine.
- If you pass empty string the CD will be ejected from VM.
- If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM.
- If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently.
force:
description:
- Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
in different situations.
type: bool
default: 'no'
nics:
description:
- List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary.
- C(name) - Name of the NIC.
- C(profile_name) - Profile name where NIC should be attached.
- C(interface) - Type of the network interface. One of following I(virtio), I(e1000), I(rtl8139), default is I(virtio).
- C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
To manage NICs of the VM in more depth please use M(ovirt_nics) module instead.
disks:
description:
- List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary.
- C(name) - Name of the disk. Either C(name) or C(id) is reuqired.
- C(id) - ID of the disk. Either C(name) or C(id) is reuqired.
- C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio).
- C(bootable) - I(True) if the disk should be bootable, default is non bootable.
- C(activate) - I(True) if the disk should be activated, default is activated.
- NOTE - This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
To manage disks of the VM in more depth please use M(ovirt_disks) module instead.
sysprep:
description:
- Dictionary with values for Windows Virtual Machine initialization using sysprep.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user.
- C(org_name) - Organization name to be set to Windows Virtual Machine.
- C(domain) - Domain to be set to Windows Virtual Machine.
- C(timezone) - Timezone to be set to Windows Virtual Machine.
- C(ui_language) - UI language of the Windows Virtual Machine.
- C(system_locale) - System localization of the Windows Virtual Machine.
- C(input_locale) - Input localization of the Windows Virtual Machine.
- C(windows_license_key) - License key to be set to Windows Virtual Machine.
- C(user_name) - Username to be used for set password to Windows Virtual Machine.
- C(root_password) - Password to be set for username to Windows Virtual Machine.
cloud_init:
description:
- Dictionary with values for Unix-like Virtual Machine initialization using cloud init.
- C(host_name) - Hostname to be set to Virtual Machine when deployed.
- C(timezone) - Timezone to be set to Virtual Machine when deployed.
- C(user_name) - Username to be used to set password to Virtual Machine when deployed.
- C(root_password) - Password to be set for user specified by C(user_name) parameter.
- C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine.
- C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine.
- C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the
cloud-init script generated by any other options.
- C(dns_servers) - DNS servers to be configured on Virtual Machine.
- C(dns_search) - DNS search domains to be configured on Virtual Machine.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
cloud_init_nics:
description:
- List of dictionaries representing network interafaces to be setup by cloud init.
- This option is used, when user needs to setup more network interfaces via cloud init.
- If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
are merged with C(cloud_init_nics) parameters.
- Dictionary can contain following values.
- C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of C(none), C(dhcp) or C(static).
- C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine.
- C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine.
- C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine.
- C(nic_name) - Set name to network interface of Virtual Machine.
- C(nic_on_boot) - If I(True) network interface will be set to start on boot.
version_added: "2.3"
kernel_path:
description:
- Path to a kernel image used to boot the virtual machine.
- Kernel image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
initrd_path:
description:
- Path to an initial ramdisk to be used with the kernel specified by C(kernel_path) option.
- Ramdisk image must be stored on either the ISO domain or on the host's storage.
version_added: "2.3"
kernel_params:
description:
- Kernel command line parameters (formatted as string) to be used with the kernel specified by C(kernel_path) option.
version_added: "2.3"
instance_type:
description:
- Name of virtual machine's hardware configuration.
- By default no instance type is used.
version_added: "2.3"
description:
description:
- Description of the Virtual Machine.
version_added: "2.3"
comment:
description:
- Comment of the Virtual Machine.
version_added: "2.3"
timezone:
description:
- Sets time zone offset of the guest hardware clock.
- For example C(Etc/GMT)
version_added: "2.3"
serial_policy:
description:
- Specify a serial number policy for the Virtual Machine.
- Following options are supported.
- C(vm) - Sets the Virtual Machine's UUID as its serial number.
- C(host) - Sets the host's UUID as the Virtual Machine's serial number.
- C(custom) - Allows you to specify a custom serial number in C(serial_policy_value).
version_added: "2.3"
serial_policy_value:
description:
- Allows you to specify a custom serial number.
- This parameter is used only when C(serial_policy) is I(custom).
version_added: "2.3"
vmware:
description:
- Dictionary of values to be used to connect to VMware and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(username) - The username to authenticate against the VMware.
- C(password) - The password to authenticate against the VMware.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(vpx://wmware_user@vcenter-host/DataCenter/Cluster/esxi-host?no_verify=1)
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
xen:
description:
- Dictionary of values to be used to connect to XEN and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(xen+ssh://[email protected]). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
kvm:
description:
- Dictionary of values to be used to connect to kvm and import
a virtual machine to oVirt.
- Dictionary can contain following values.
- C(name) - The name of the KVM virtual machine.
- C(username) - The username to authenticate against the KVM.
- C(password) - The password to authenticate against the KVM.
- C(url) - The URL to be passed to the I(virt-v2v) tool for conversion.
For example I(qemu:///system). This is required parameter.
- C(drivers_iso) - The name of the ISO containing drivers that can
be used during the I(virt-v2v) conversion process.
- C(sparse) - Specifies the disk allocation policy of the resulting
virtual machine. I(true) for sparse, I(false) for preallocated.
Default value is I(true).
- C(storage_domain) - Specifies the target storage domain for
converted disks. This is required parameter.
version_added: "2.3"
notes:
- If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
if the shutdown operation fails.
When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
we start the VM. Then we suspend the VM.
When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it.
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Creates a new Virtual Machine from template named 'rhel7_template'
ovirt_vms:
state: present
name: myvm
template: rhel7_template
- name: Register VM
ovirt_vms:
state: registered
storage_domain: mystorage
cluster: mycluster
name: myvm
- name: Register VM using id
ovirt_vms:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM, allowing partial import
ovirt_vms:
state: registered
storage_domain: mystorage
allow_partial_import: "True"
cluster: mycluster
id: 1111-1111-1111-1111
- name: Register VM with vnic profile mappings and reassign bad macs
ovirt_vms:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
vnic_profile_mappings:
- source_network_name: mynetwork
source_profile_name: mynetwork
target_profile_id: 3333-3333-3333-3333
- source_network_name: mynetwork2
source_profile_name: mynetwork2
target_profile_id: 4444-4444-4444-4444
reassign_bad_macs: "True"
- name: Register VM with mappings
ovirt_vms:
state: registered
storage_domain: mystorage
cluster: mycluster
id: 1111-1111-1111-1111
role_mappings:
- source_name: Role_A
dest_name: Role_B
domain_mappings:
- source_name: Domain_A
dest_name: Domain_B
lun_mappings:
- source_storage_type: iscsi
source_logical_unit_id: 1IET_000d0001
source_logical_unit_port: 3260
source_logical_unit_portal: 1
source_logical_unit_address: 10.34.63.203
source_logical_unit_target: iqn.2016-08-09.brq.str-01:omachace
dest_storage_type: iscsi
dest_logical_unit_id: 1IET_000d0002
dest_logical_unit_port: 3260
dest_logical_unit_portal: 1
dest_logical_unit_address: 10.34.63.204
dest_logical_unit_target: iqn.2016-08-09.brq.str-02:omachace
affinity_group_mappings:
- source_name: Affinity_A
dest_name: Affinity_B
affinity_label_mappings:
- source_name: Label_A
dest_name: Label_B
cluster_mappings:
- source_name: cluster_A
dest_name: cluster_B
- name: Creates a stateless VM which will always use latest template version
ovirt_vms:
name: myvm
template: rhel7
cluster: mycluster
use_latest_template_version: true
# Creates a new server rhel7 Virtual Machine from Blank template
# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
# and attach bootable disk with name rhel7_disk and attach virtio NIC
- ovirt_vms:
state: present
cluster: brq01
name: myvm
memory: 2GiB
cpu_cores: 2
cpu_sockets: 2
cpu_shares: 1024
type: server
operating_system: rhel_7x64
disks:
- name: rhel7_disk
bootable: True
nics:
- name: nic1
- name: Run VM with cloud init
ovirt_vms:
name: rhel7
template: rhel7
cluster: Default
memory: 1GiB
high_availability: true
cloud_init:
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_name: eth1
nic_on_boot: true
host_name: example.com
custom_script: |
write_files:
- content: |
Hello, world!
path: /tmp/greeting.txt
permissions: '0644'
user_name: root
root_password: super_password
- name: Run VM with cloud init, with multiple network interfaces
ovirt_vms:
name: rhel7_4
template: rhel7
cluster: mycluster
cloud_init_nics:
- nic_name: eth0
nic_boot_protocol: dhcp
nic_on_boot: true
- nic_name: eth1
nic_boot_protocol: static
nic_ip_address: 10.34.60.86
nic_netmask: 255.255.252.0
nic_gateway: 10.34.63.254
nic_on_boot: true
- name: Run VM with sysprep
ovirt_vms:
name: windows2012R2_AD
template: windows2012R2
cluster: Default
memory: 3GiB
high_availability: true
sysprep:
host_name: windowsad.example.com
user_name: Administrator
root_password: SuperPassword123
- name: Migrate/Run VM to/on host named 'host1'
ovirt_vms:
state: running
name: myvm
host: host1
- name: Change VMs CD
ovirt_vms:
name: myvm
cd_iso: drivers.iso
- name: Eject VMs CD
ovirt_vms:
name: myvm
cd_iso: ''
- name: Boot VM from CD
ovirt_vms:
name: myvm
cd_iso: centos7_x64.iso
boot_devices:
- cdrom
- name: Stop vm
ovirt_vms:
state: stopped
name: myvm
- name: Upgrade memory to already created VM
ovirt_vms:
name: myvm
memory: 4GiB
- name: Hot plug memory to already created and running VM (VM won't be restarted)
ovirt_vms:
name: myvm
memory: 4GiB
# When change on the VM needs restart of the VM, use next_run state,
# The VM will be updated and rebooted if there are any changes.
# If present state would be used, VM won't be restarted.
- ovirt_vms:
state: next_run
name: myvm
boot_devices:
- network
- name: Import virtual machine from VMware
ovirt_vms:
state: stopped
cluster: mycluster
name: vmware_win10
timeout: 1800
poll_interval: 30
vmware:
url: vpx://[email protected]/Folder1/Cluster1/2.3.4.5?no_verify=1
name: windows10
storage_domain: mynfs
username: user
password: password
- name: Create vm from template and create all disks on specific storage domain
ovirt_vms:
name: vm_test
cluster: mycluster
template: mytemplate
storage_domain: mynfs
nics:
- name: nic1
- name: Remove VM, if VM is running it will be stopped
ovirt_vms:
state: absent
name: myvm
'''
RETURN = '''
id:
description: ID of the VM which is managed
returned: On success if VM is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm:
description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
returned: On success if VM is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
convert_to_bytes,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
get_id_by_name,
ovirt_full_argument_spec,
search_by_name,
wait,
)
class VmsModule(BaseModule):
def __get_template_with_version(self):
"""
oVirt/RHV in version 4.1 doesn't support search by template+version_number,
so we need to list all templates with specific name and then iterate
through it's version until we find the version we look for.
"""
template = None
if self.param('template'):
templates_service = self._connection.system_service().templates_service()
templates = templates_service.list(search='name=%s' % self.param('template'))
if self.param('template_version'):
templates = [
t for t in templates
if t.version.version_number == self.param('template_version')
]
if not templates:
raise ValueError(
"Template with name '%s' and version '%s' was not found'" % (
self.param('template'),
self.param('template_version')
)
)
template = sorted(templates, key=lambda t: t.version.version_number, reverse=True)[0]
return template
def __get_storage_domain_and_all_template_disks(self, template):
if self.param('template') is None:
return None
if self.param('storage_domain') is None:
return None
disks = list()
for att in self._connection.follow_link(template.disk_attachments):
disks.append(
otypes.DiskAttachment(
disk=otypes.Disk(
id=att.disk.id,
format=otypes.DiskFormat(self.param('disk_format')),
storage_domains=[
otypes.StorageDomain(
id=get_id_by_name(
self._connection.system_service().storage_domains_service(),
self.param('storage_domain')
)
)
]
)
)
)
return disks
def build_entity(self):
template = self.__get_template_with_version()
disk_attachments = self.__get_storage_domain_and_all_template_disks(template)
return otypes.Vm(
id=self.param('id'),
name=self.param('name'),
cluster=otypes.Cluster(
name=self.param('cluster')
) if self.param('cluster') else None,
disk_attachments=disk_attachments,
template=otypes.Template(
id=template.id,
) if template else None,
use_latest_template_version=self.param('use_latest_template_version'),
stateless=self.param('stateless') or self.param('use_latest_template_version'),
delete_protected=self.param('delete_protected'),
high_availability=otypes.HighAvailability(
enabled=self.param('high_availability')
) if self.param('high_availability') is not None else None,
lease=otypes.StorageDomainLease(
storage_domain=otypes.StorageDomain(
id=get_id_by_name(
service=self._connection.system_service().storage_domains_service(),
name=self.param('lease')
)
)
) if self.param('lease') is not None else None,
cpu=otypes.Cpu(
topology=otypes.CpuTopology(
cores=self.param('cpu_cores'),
sockets=self.param('cpu_sockets'),
threads=self.param('cpu_threads'),
)
) if (
any((self.param('cpu_cores'), self.param('cpu_sockets'), self.param('cpu_threads')))
) else None,
cpu_shares=self.param('cpu_shares'),
os=otypes.OperatingSystem(
type=self.param('operating_system'),
boot=otypes.Boot(
devices=[
otypes.BootDevice(dev) for dev in self.param('boot_devices')
],
) if self.param('boot_devices') else None,
) if (
self.param('operating_system') or self.param('boot_devices')
) else None,
type=otypes.VmType(
self.param('type')
) if self.param('type') else None,
memory=convert_to_bytes(
self.param('memory')
) if self.param('memory') else None,
memory_policy=otypes.MemoryPolicy(
guaranteed=convert_to_bytes(self.param('memory_guaranteed')),
) if self.param('memory_guaranteed') else None,
instance_type=otypes.InstanceType(
id=get_id_by_name(
self._connection.system_service().instance_types_service(),
self.param('instance_type'),
),
) if self.param('instance_type') else None,
description=self.param('description'),
comment=self.param('comment'),
time_zone=otypes.TimeZone(
name=self.param('timezone'),
) if self.param('timezone') else None,
serial_number=otypes.SerialNumber(
policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
value=self.param('serial_policy_value'),
) if (
self.param('serial_policy') is not None or
self.param('serial_policy_value') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self.param('cluster'), get_link_name(self._connection, entity.cluster)) and equal(convert_to_bytes(self.param('memory')), entity.memory) and
equal(convert_to_bytes(self.param('memory_guaranteed')), entity.memory_policy.guaranteed) and
equal(self.param('cpu_cores'), entity.cpu.topology.cores) and
equal(self.param('cpu_sockets'), entity.cpu.topology.sockets) and
equal(self.param('cpu_threads'), entity.cpu.topology.threads) and
equal(self.param('type'), str(entity.type)) and
equal(self.param('operating_system'), str(entity.os.type)) and
equal(self.param('high_availability'), entity.high_availability.enabled) and
equal(self.param('lease'), get_link_name(self._connection, getattr(entity.lease, 'storage_domain', None))) and
equal(self.param('stateless'), entity.stateless) and
equal(self.param('cpu_shares'), entity.cpu_shares) and
equal(self.param('delete_protected'), entity.delete_protected) and
equal(self.param('use_latest_template_version'), entity.use_latest_template_version) and
equal(self.param('boot_devices'), [str(dev) for dev in getattr(entity.os, 'devices', [])]) and
equal(self.param('instance_type'), get_link_name(self._connection, entity.instance_type), ignore_case=True) and
equal(self.param('description'), entity.description) and
equal(self.param('comment'), entity.comment) and
equal(self.param('timezone'), getattr(entity.time_zone, 'name', None)) and
equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None))
)
def pre_create(self, entity):
# If VM don't exists, and template is not specified, set it to Blank:
if entity is None:
if self.param('template') is None:
self._module.params['template'] = 'Blank'
def post_update(self, entity):
self.post_create(entity)
def post_create(self, entity):
# After creation of the VM, attach disks and NICs:
self.changed = self.__attach_disks(entity)
self.changed = self.__attach_nics(entity)
def pre_remove(self, entity):
# Forcibly stop the VM, if it's not in DOWN state:
if entity.status != otypes.VmStatus.DOWN:
if not self._module.check_mode:
self.changed = self.action(
action='stop',
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)['changed']
def __suspend_shutdown_common(self, vm_service):
if vm_service.get().status in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]:
self._wait_for_UP(vm_service)
def _pre_shutdown_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _pre_suspend_action(self, entity):
vm_service = self._service.vm_service(entity.id)
self.__suspend_shutdown_common(vm_service)
if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
vm_service.start()
self._wait_for_UP(vm_service)
return vm_service.get()
def _post_start_action(self, entity):
vm_service = self._service.service(entity.id)
self._wait_for_UP(vm_service)
self._attach_cd(vm_service.get())
self._migrate_vm(vm_service.get())
def _attach_cd(self, entity):
cd_iso = self.param('cd_iso')
if cd_iso is not None:
vm_service = self._service.service(entity.id)
current = vm_service.get().status == otypes.VmStatus.UP
cdroms_service = vm_service.cdroms_service()
cdrom_device = cdroms_service.list()[0]
cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
cdrom = cdrom_service.get(current=current)
if getattr(cdrom.file, 'id', '') != cd_iso:
if not self._module.check_mode:
cdrom_service.update(
cdrom=otypes.Cdrom(
file=otypes.File(id=cd_iso)
),
current=current,
)
self.changed = True
return entity
def _migrate_vm(self, entity):
vm_host = self.param('host')
vm_service = self._service.vm_service(entity.id)
if vm_host is not None:
# In case VM is preparing to be UP, wait to be up, to migrate it:
if entity.status == otypes.VmStatus.UP:
hosts_service = self._connection.system_service().hosts_service()
current_vm_host = hosts_service.host_service(entity.host.id).get().name
if vm_host != current_vm_host:
if not self._module.check_mode:
vm_service.migrate(host=otypes.Host(name=vm_host))
self._wait_for_UP(vm_service)
self.changed = True
return entity
def _wait_for_UP(self, vm_service):
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def _wait_for_vm_disks(self, vm_service):
disks_service = self._connection.system_service().disks_service()
for da in vm_service.disk_attachments_service().list():
disk_service = disks_service.disk_service(da.disk.id)
wait(
service=disk_service,
condition=lambda disk: disk.status == otypes.DiskStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
def wait_for_down(self, vm):
"""
This function will first wait for the status DOWN of the VM.
Then it will find the active snapshot and wait until it's state is OK for
stateless VMs and statless snaphot is removed.
"""
vm_service = self._service.vm_service(vm.id)
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
if vm.stateless:
snapshots_service = vm_service.snapshots_service()
snapshots = snapshots_service.list()
snap_active = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.ACTIVE
][0]
snap_stateless = [
snap for snap in snapshots
if snap.snapshot_type == otypes.SnapshotType.STATELESS
]
# Stateless snapshot may be already removed:
if snap_stateless:
"""
We need to wait for Active snapshot ID, to be removed as it's current
stateless snapshot. Then we need to wait for staless snapshot ID to
be read, for use, because it will become active snapshot.
"""
wait(
service=snapshots_service.snapshot_service(snap_active.id),
condition=lambda snap: snap is None,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
wait(
service=snapshots_service.snapshot_service(snap_stateless[0].id),
condition=lambda snap: snap.snapshot_status == otypes.SnapshotStatus.OK,
wait=self.param('wait'),
timeout=self.param('timeout'),
)
return True
def __attach_disks(self, entity):
if not self.param('disks'):
return
vm_service = self._service.service(entity.id)
disks_service = self._connection.system_service().disks_service()
disk_attachments_service = vm_service.disk_attachments_service()
self._wait_for_vm_disks(vm_service)
for disk in self.param('disks'):
# If disk ID is not specified, find disk by name:
disk_id = disk.get('id')
if disk_id is None:
disk_id = getattr(
search_by_name(
service=disks_service,
name=disk.get('name')
),
'id',
None
)
# Attach disk to VM:
disk_attachment = disk_attachments_service.attachment_service(disk_id)
if get_entity(disk_attachment) is None:
if not self._module.check_mode:
disk_attachments_service.add(
otypes.DiskAttachment(
disk=otypes.Disk(
id=disk_id,
),
active=disk.get('activate', True),
interface=otypes.DiskInterface(
disk.get('interface', 'virtio')
),
bootable=disk.get('bootable', False),
)
)
self.changed = True
def __get_vnic_profile_id(self, nic):
"""
Return VNIC profile ID looked up by it's name, because there can be
more VNIC profiles with same name, other criteria of filter is cluster.
"""
vnics_service = self._connection.system_service().vnic_profiles_service()
clusters_service = self._connection.system_service().clusters_service()
cluster = search_by_name(clusters_service, self.param('cluster'))
profiles = [
profile for profile in vnics_service.list()
if profile.name == nic.get('profile_name')
]
cluster_networks = [
net.id for net in self._connection.follow_link(cluster.networks)
]
try:
return next(
profile.id for profile in profiles
if profile.network.id in cluster_networks
)
except StopIteration:
raise Exception(
"Profile '%s' was not found in cluster '%s'" % (
nic.get('profile_name'),
self.param('cluster')
)
)
def __attach_nics(self, entity):
# Attach NICs to VM, if specified:
nics_service = self._service.service(entity.id).nics_service()
for nic in self.param('nics'):
if search_by_name(nics_service, nic.get('name')) is None:
if not self._module.check_mode:
nics_service.add(
otypes.Nic(
name=nic.get('name'),
interface=otypes.NicInterface(
nic.get('interface', 'virtio')
),
vnic_profile=otypes.VnicProfile(
id=self.__get_vnic_profile_id(nic),
) if nic.get('profile_name') else None,
mac=otypes.Mac(
address=nic.get('mac_address')
) if nic.get('mac_address') else None,
)
)
self.changed = True
def _get_role_mappings(module):
roleMappings = list()
for roleMapping in module.params['role_mappings']:
roleMappings.append(
otypes.RegistrationRoleMapping(
from_=otypes.Role(
name=roleMapping['source_name'],
) if roleMapping['source_name'] else None,
to=otypes.Role(
name=roleMapping['dest_name'],
) if roleMapping['dest_name'] else None,
)
)
return roleMappings
def _get_affinity_group_mappings(module):
affinityGroupMappings = list()
for affinityGroupMapping in module.params['affinity_group_mappings']:
affinityGroupMappings.append(
otypes.RegistrationAffinityGroupMapping(
from_=otypes.AffinityGroup(
name=affinityGroupMapping['source_name'],
) if affinityGroupMapping['source_name'] else None,
to=otypes.AffinityGroup(
name=affinityGroupMapping['dest_name'],
) if affinityGroupMapping['dest_name'] else None,
)
)
return affinityGroupMappings
def _get_affinity_label_mappings(module):
affinityLabelMappings = list()
for affinityLabelMapping in module.params['affinity_label_mappings']:
affinityLabelMappings.append(
otypes.RegistrationAffinityLabelMapping(
from_=otypes.AffinityLabel(
name=affinityLabelMapping['source_name'],
) if affinityLabelMapping['source_name'] else None,
to=otypes.AffinityLabel(
name=affinityLabelMapping['dest_name'],
) if affinityLabelMapping['dest_name'] else None,
)
)
return affinityLabelMappings
def _get_domain_mappings(module):
domainMappings = list()
for domainMapping in module.params['domain_mappings']:
domainMappings.append(
otypes.RegistrationDomainMapping(
from_=otypes.Domain(
name=domainMapping['source_name'],
) if domainMapping['source_name'] else None,
to=otypes.Domain(
name=domainMapping['dest_name'],
) if domainMapping['dest_name'] else None,
)
)
return domainMappings
def _get_lun_mappings(module):
lunMappings = list()
for lunMapping in module.params['lun_mappings']:
lunMappings.append(
otypes.RegistrationLunMapping(
from_=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['source_storage_type'])
if (lunMapping['source_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['source_logical_unit_id'],
)
],
),
) if lunMapping['source_logical_unit_id'] else None,
to=otypes.Disk(
lun_storage=otypes.HostStorage(
type=otypes.StorageType(lunMapping['dest_storage_type'])
if (lunMapping['dest_storage_type'] in
['iscsi', 'fcp']) else None,
logical_units=[
otypes.LogicalUnit(
id=lunMapping['dest_logical_unit_id'],
port=lunMapping['dest_logical_unit_port'],
portal=lunMapping['dest_logical_unit_portal'],
address=lunMapping['dest_logical_unit_address'],
target=lunMapping['dest_logical_unit_target'],
password=lunMapping['dest_logical_unit_password'],
username=lunMapping['dest_logical_unit_username'],
)
],
),
) if lunMapping['dest_logical_unit_id'] else None,
),
),
return lunMappings
def _get_cluster_mappings(module):
clusterMappings = list()
for clusterMapping in module.params['cluster_mappings']:
clusterMappings.append(
otypes.RegistrationClusterMapping(
from_=otypes.Cluster(
name=clusterMapping['source_name'],
),
to=otypes.Cluster(
name=clusterMapping['dest_name'],
) if clusterMapping['dest_name'] else None,
)
)
return clusterMappings
def _get_vnic_profile_mappings(module):
vnicProfileMappings = list()
for vnicProfileMapping in module.params['vnic_profile_mappings']:
vnicProfileMappings.append(
otypes.VnicProfileMapping(
source_network_name=vnicProfileMapping['source_network_name'],
source_network_profile_name=vnicProfileMapping['source_profile_name'],
target_vnic_profile=otypes.VnicProfile(
id=vnicProfileMapping['target_profile_id'],
) if vnicProfileMapping['target_profile_id'] else None,
)
)
return vnicProfileMappings
def import_vm(module, connection):
vms_service = connection.system_service().vms_service()
if search_by_name(vms_service, module.params['name']) is not None:
return False
events_service = connection.system_service().events_service()
last_event = events_service.list(max=1)[0]
external_type = [
tmp for tmp in ['kvm', 'xen', 'vmware']
if module.params[tmp] is not None
][0]
external_vm = module.params[external_type]
imports_service = connection.system_service().external_vm_imports_service()
imported_vm = imports_service.add(
otypes.ExternalVmImport(
vm=otypes.Vm(
name=module.params['name']
),
name=external_vm.get('name'),
username=external_vm.get('username', 'test'),
password=external_vm.get('password', 'test'),
provider=otypes.ExternalVmProviderType(external_type),
url=external_vm.get('url'),
cluster=otypes.Cluster(
name=module.params['cluster'],
) if module.params['cluster'] else None,
storage_domain=otypes.StorageDomain(
name=external_vm.get('storage_domain'),
) if external_vm.get('storage_domain') else None,
sparse=external_vm.get('sparse', True),
host=otypes.Host(
name=module.params['host'],
) if module.params['host'] else None,
)
)
# Wait until event with code 1152 for our VM don't appear:
vms_service = connection.system_service().vms_service()
wait(
service=vms_service.vm_service(imported_vm.vm.id),
condition=lambda vm: len([
event
for event in events_service.list(
from_=int(last_event.id),
search='type=1152 and vm.id=%s' % vm.id,
)
]) > 0 if vm is not None else False,
fail_condition=lambda vm: vm is None,
timeout=module.params['timeout'],
poll_interval=module.params['poll_interval'],
)
return True
def _get_initialization(sysprep, cloud_init, cloud_init_nics):
initialization = None
if cloud_init or cloud_init_nics:
initialization = otypes.Initialization(
nic_configurations=[
otypes.NicConfiguration(
boot_protocol=otypes.BootProtocol(
nic.pop('nic_boot_protocol').lower()
) if nic.get('nic_boot_protocol') else None,
name=nic.pop('nic_name', None),
on_boot=nic.pop('nic_on_boot', None),
ip=otypes.Ip(
address=nic.pop('nic_ip_address', None),
netmask=nic.pop('nic_netmask', None),
gateway=nic.pop('nic_gateway', None),
) if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None
) else None,
)
for nic in cloud_init_nics
if (
nic.get('nic_gateway') is not None or
nic.get('nic_netmask') is not None or
nic.get('nic_ip_address') is not None or
nic.get('nic_boot_protocol') is not None or
nic.get('nic_on_boot') is not None
)
] if cloud_init_nics else None,
**cloud_init
)
elif sysprep:
initialization = otypes.Initialization(
**sysprep
)
return initialization
def control_state(vm, vms_service, module):
if vm is None:
return
force = module.params['force']
state = module.params['state']
vm_service = vms_service.vm_service(vm.id)
if vm.status == otypes.VmStatus.IMAGE_LOCKED:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
elif vm.status == otypes.VmStatus.SAVING_STATE:
# Result state is SUSPENDED, we should wait to be suspended:
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
)
elif (
vm.status == otypes.VmStatus.UNASSIGNED or
vm.status == otypes.VmStatus.UNKNOWN
):
# Invalid states:
module.fail_json(msg="Not possible to control VM, if it's in '{}' status".format(vm.status))
elif vm.status == otypes.VmStatus.POWERING_DOWN:
if (force and state == 'stopped') or state == 'absent':
vm_service.stop()
wait(
service=vm_service,
condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
)
else:
# If VM is powering down, wait to be DOWN or UP.
# VM can end in UP state in case there is no GA
# or ACPI on the VM or shutdown operation crashed:
wait(
service=vm_service,
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'next_run', 'present', 'registered', 'running', 'stopped', 'suspended']),
name=dict(type='str'),
id=dict(type='str'),
cluster=dict(type='str'),
allow_partial_import=dict(type='bool'),
template=dict(type='str'),
template_version=dict(type='int'),
use_latest_template_version=dict(type='bool'),
storage_domain=dict(type='str'),
disk_format=dict(type='str', default='cow', choices=['cow', 'raw']),
disks=dict(type='list', default=[]),
memory=dict(type='str'),
memory_guaranteed=dict(type='str'),
cpu_sockets=dict(type='int'),
cpu_cores=dict(type='int'),
cpu_shares=dict(type='int'),
cpu_threads=dict(type='int'),
type=dict(type='str', choices=['server', 'desktop']),
operating_system=dict(type='str',
choices=[
'rhel_6_ppc64', 'other', 'freebsd', 'windows_2003x64', 'windows_10',
'rhel_6x64', 'rhel_4x64', 'windows_2008x64', 'windows_2008R2x64',
'debian_7', 'windows_2012x64', 'ubuntu_14_04', 'ubuntu_12_04',
'ubuntu_13_10', 'windows_8x64', 'other_linux_ppc64', 'windows_2003',
'other_linux', 'windows_10x64', 'windows_2008', 'rhel_3', 'rhel_5',
'rhel_4', 'other_ppc64', 'sles_11', 'rhel_6', 'windows_xp', 'rhel_7x64',
'freebsdx64', 'rhel_7_ppc64', 'windows_7', 'rhel_5x64',
'ubuntu_14_04_ppc64', 'sles_11_ppc64', 'windows_8',
'windows_2012R2x64', 'windows_2008r2x64', 'ubuntu_13_04',
'ubuntu_12_10', 'windows_7x64',
]),
cd_iso=dict(type='str'),
boot_devices=dict(type='list'),
vnic_profile_mappings=dict(default=[], type='list'),
cluster_mappings=dict(default=[], type='list'),
role_mappings=dict(default=[], type='list'),
affinity_group_mappings=dict(default=[], type='list'),
affinity_label_mappings=dict(default=[], type='list'),
lun_mappings=dict(default=[], type='list'),
domain_mappings=dict(default=[], type='list'),
reassign_bad_macs=dict(default=None, type='bool'),
high_availability=dict(type='bool'),
lease=dict(type='str'),
stateless=dict(type='bool'),
delete_protected=dict(type='bool'),
force=dict(type='bool', default=False),
nics=dict(type='list', default=[]),
cloud_init=dict(type='dict'),
cloud_init_nics=dict(type='list', default=[]),
sysprep=dict(type='dict'),
host=dict(type='str'),
clone=dict(type='bool', default=False),
clone_permissions=dict(type='bool', default=False),
kernel_path=dict(type='str'),
initrd_path=dict(type='str'),
kernel_params=dict(type='str'),
instance_type=dict(type='str'),
description=dict(type='str'),
comment=dict(type='str'),
timezone=dict(type='str'),
serial_policy=dict(type='str', choices=['vm', 'host', 'custom']),
serial_policy_value=dict(type='str'),
vmware=dict(type='dict'),
xen=dict(type='dict'),
kvm=dict(type='dict'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['id', 'name']],
)
check_sdk(module)
check_params(module)
try:
state = module.params['state']
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms_module = VmsModule(
connection=connection,
module=module,
service=vms_service,
)
vm = vms_module.search_entity(list_params={'all_content': True})
control_state(vm, vms_service, module)
if state in ('present', 'running', 'next_run'):
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
sysprep = module.params['sysprep']
cloud_init = module.params['cloud_init']
cloud_init_nics = module.params['cloud_init_nics'] or []
if cloud_init is not None:
cloud_init_nics.append(cloud_init)
# In case VM don't exist, wait for VM DOWN state,
# otherwise don't wait for any state, just update VM:
ret = vms_module.create(
entity=vm,
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
# Run the VM if it was just created, else don't run it:
if state == 'running':
initialization = _get_initialization(sysprep, cloud_init, cloud_init_nics)
ret = vms_module.action(
action='start',
post_action=vms_module._post_start_action,
action_condition=lambda vm: (
vm.status not in [
otypes.VmStatus.MIGRATING,
otypes.VmStatus.POWERING_UP,
otypes.VmStatus.REBOOT_IN_PROGRESS,
otypes.VmStatus.WAIT_FOR_LAUNCH,
otypes.VmStatus.UP,
otypes.VmStatus.RESTORING_STATE,
]
),
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
# Start action kwargs:
use_cloud_init=cloud_init is not None or len(cloud_init_nics) > 0,
use_sysprep=sysprep is not None,
vm=otypes.Vm(
placement_policy=otypes.VmPlacementPolicy(
hosts=[otypes.Host(name=module.params['host'])]
) if module.params['host'] else None,
initialization=initialization,
os=otypes.OperatingSystem(
cmdline=module.params.get('kernel_params'),
initrd=module.params.get('initrd_path'),
kernel=module.params.get('kernel_path'),
) if (
module.params.get('kernel_params') or
module.params.get('initrd_path') or
module.params.get('kernel_path')
) else None,
) if (
module.params.get('kernel_params') or
module.params.get('initrd_path') or
module.params.get('kernel_path') or
module.params.get('host') or
initialization
) else None,
)
if state == 'next_run':
# Apply next run configuration, if needed:
vm = vms_service.vm_service(ret['id']).get()
if vm.next_run_configuration_exists:
ret = vms_module.action(
action='reboot',
entity=vm,
action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
)
elif state == 'stopped':
if module.params['xen'] or module.params['kvm'] or module.params['vmware']:
vms_module.changed = import_vm(module, connection)
ret = vms_module.create(
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
if module.params['force']:
ret = vms_module.action(
action='stop',
post_action=vms_module._attach_cd,
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
else:
ret = vms_module.action(
action='shutdown',
pre_action=vms_module._pre_shutdown_action,
post_action=vms_module._attach_cd,
action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
wait_condition=vms_module.wait_for_down,
)
elif state == 'suspended':
vms_module.create(
result_state=otypes.VmStatus.DOWN if vm is None else None,
clone=module.params['clone'],
clone_permissions=module.params['clone_permissions'],
)
ret = vms_module.action(
action='suspend',
pre_action=vms_module._pre_suspend_action,
action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
)
elif state == 'absent':
ret = vms_module.remove()
elif state == 'registered':
storage_domains_service = connection.system_service().storage_domains_service()
# Find the storage domain with unregistered VM:
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
vms_service = storage_domain_service.vms_service()
# Find the the unregistered VM we want to register:
vms = vms_service.list(unregistered=True)
vm = next(
(vm for vm in vms if (vm.id == module.params['id'] or vm.name == module.params['name'])),
None
)
changed = False
if vm is None:
vm = vms_module.search_entity()
if vm is None:
raise ValueError(
"VM '%s(%s)' wasn't found." % (module.params['name'], module.params['id'])
)
else:
# Register the vm into the system:
changed = True
vm_service = vms_service.vm_service(vm.id)
vm_service.register(
allow_partial_import=module.params['allow_partial_import'],
cluster=otypes.Cluster(
name=module.params['cluster']
) if module.params['cluster'] else None,
vnic_profile_mappings=_get_vnic_profile_mappings(module)
if module.params['vnic_profile_mappings'] else None,
reassign_bad_macs=module.params['reassign_bad_macs']
if module.params['reassign_bad_macs'] is not None else None,
registration_configuration=otypes.RegistrationConfiguration(
cluster_mappings=_get_cluster_mappings(module),
role_mappings=_get_role_mappings(module),
domain_mappings=_get_domain_mappings(module),
lun_mappings=_get_lun_mappings(module),
affinity_group_mappings=_get_affinity_group_mappings(module),
affinity_label_mappings=_get_affinity_label_mappings(module),
) if (module.params['cluster_mappings']
or module.params['role_mappings']
or module.params['domain_mappings']
or module.params['lun_mappings']
or module.params['affinity_group_mappings']
or module.params['affinity_label_mappings']) else None
)
if module.params['wait']:
vm = vms_module.wait_for_import()
else:
# Fetch vm to initialize return.
vm = vm_service.get()
ret = {
'changed': changed,
'id': vm.id,
'vm': get_dict_of_struct(vm)
}
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 | 4,588,817,234,274,543,000 | 42.364014 | 158 | 0.571996 | false | 4.239231 | false | false | false |
Alwnikrotikz/pyglet | tests/clock/SCHEDULE_ONCE.py | 33 | 1396 | #!/usr/bin/env python
'''Test that a scheduled function gets called every once with the correct
time delta.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: TICK.py 310 2006-12-23 15:56:35Z Alex.Holkner $'
import time
import unittest
from pyglet import clock
__noninteractive = True
class SCHEDULE_ONCE(unittest.TestCase):
callback_1_count = 0
callback_2_count = 0
callback_3_count = 0
def callback_1(self, dt):
self.assertTrue(abs(dt - 0.1) < 0.01)
self.callback_1_count += 1
def callback_2(self, dt):
self.assertTrue(abs(dt - 0.35) < 0.01)
self.callback_2_count += 1
def callback_3(self, dt):
self.assertTrue(abs(dt - 0.07) < 0.01)
self.callback_3_count += 1
def clear(self):
self.callback_1_count = 0
self.callback_2_count = 0
self.callback_3_count = 0
def test_schedule_once(self):
self.clear()
clock.set_default(clock.Clock())
clock.schedule_once(self.callback_1, 0.1)
clock.schedule_once(self.callback_2, 0.35)
clock.schedule_once(self.callback_3, 0.07)
t = 0
while t < 1:
t += clock.tick()
self.assertTrue(self.callback_1_count == 1)
self.assertTrue(self.callback_2_count == 1)
self.assertTrue(self.callback_3_count == 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,906,539,223,165,805,300 | 24.851852 | 73 | 0.603152 | false | 3.224018 | true | false | false |
gizmo-cda/g2x-submarine-v2 | scripts/video/get_stream.py | 1 | 1243 | #!/usr/bin/env python3
import requests
frames = []
data = b""
found_first = False
response = requests.get("http://navigation.local:8080/stream/video.h264", stream=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
starting_offset = len(data)
if starting_offset >= 2:
if data[-1] == b"\x00":
print("last byte is zero, backing up one")
starting_offset -= 1
if data[-2] == b"\x00":
print("second to last byte is zero, backing up one more")
starting_offset -= 1
data = data + chunk
offset = data.find(b"\x00\x00\x01", starting_offset)
if offset != -1:
print("found frame")
remaining = data[offset:]
if not found_first:
print("dropping partial first frame")
found_first = True
else:
print("adding frame", len(frames) + 1)
frames.append(data[:offset])
if len(frames) == 120:
break
data = remaining
with open("navigation.h264", "wb") as out:
out.write(b"\x00")
for frame in frames:
out.write(frame)
| bsd-3-clause | -6,991,393,197,337,940,000 | 27.906977 | 86 | 0.517297 | false | 4.088816 | false | false | false |
Caranarq/01_Dmine | 02_Aire/P0221/P0221.py | 1 | 2750 | # -*- coding: utf-8 -*-
"""
Started on fri, jul 27th, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0221'
M.NombreParametro = 'Emisiones anuales CO'
M.DescParam = 'Monóxido de carbono'
M.UnidadesParam = 'ton'
M.TituloParametro = 'GEI' # Para nombrar la columna del parametro
M.PeriodoParam = '2008'
M.TipoInt = 2 # 1: Binaria; 2: Multivariable, 3: Integral
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'sum'
# Descripciones del proceso de Minería
M.nomarchivodataset = 'P0218'
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Toneladas de gases de efecto invernadero, clasificadas por gas, para el año 2008'
M.ClaveDataset = 'SEMARNAT'
M.ActDatos = '2008'
M.Agregacion = 'Se sumó la cantidad de emisiones anuales de CO para los municipios que componen ' \
'cada ciudad del SUN' \
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
list(dataset)
# Generar dataset para parámetro y Variable de Integridad
var1 = 'CO'
par_dataset = dataset[var1]
par_dataset = dataset[var1].astype('float')
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
| gpl-3.0 | -8,705,888,511,100,321,000 | 36.067568 | 118 | 0.637988 | false | 2.959008 | false | true | false |
nheijmans/ZooKeeper | malzoo/core/services/distributor.py | 2 | 2858 | #!/usr/bin/python
"""
Distributor checks the queue and decides were the data should go to next.
Workers can add hashes or files to the queue for services to search or analyze
and the distrbutor will facilitate in that by offering a central point.
In the future it will also make sure the samples are distributed to the corect
worker and workers are being started according to the amount of samples.
"""
from malzoo.common.abstract import Distributor
from malzoo.core.services.apis import *
from malzoo.core.tools.general_info import GeneralInformation
from malzoo.core.tools.database import MongoDatabase
from malzoo.core.tools.signatures import Signatures
class DistributeBot(Distributor):
"""
The distributeBot wants to receive the following info in a dict: md5, file(path), tag
"""
def distribute(self,sample):
viper = ViperService()
mongodb = MongoDatabase()
filename = sample['filename']
yarasigs = Signatures()
match = yarasigs.scan(sample['filename'], rule='unwanted.yara')
if not match:
if 'md5' in sample:
if self.conf.get('settings','duplicatecheck') == 'viper':
known = viper.search({'md5':sample['md5']})
elif self.conf.get('settings','duplicatecheck') == 'mongo':
known = mongodb.search({'md5':sample['md5']})
else:
known = False
if known:
self.log('distributor - {0} - already in db'.format(sample['md5']))
else:
general_info = GeneralInformation(sample['filename'])
ft = general_info.get_filetype()
package = {'tags':sample['tag'],'file':sample['filename']}
if self.conf.getboolean('viper','enabled'):
viper.submit(package)
#determine to which worker the file is assigned based on the mime
match = yarasigs.scan(sample['filename'], rule='filetypes.yara')
if match == 'office_docs':
self.doc_q.put(sample)
elif match == 'executable':
self.pe_q.put(sample)
elif ft == 'application/zip' and match != 'java_archive':
self.zip_q.put(sample)
else:
self.other_q.put(sample)
#add the package to the modules for custom operations
self.mod_q.put(sample)
else:
self.log('distributor - {0} - no md5 given'.format(filename))
else:
self.log('distributor - {0} - matched with yara unwanted signature'.format(filename))
return
| gpl-2.0 | 1,797,698,076,426,995,000 | 42.969231 | 97 | 0.559832 | false | 4.356707 | false | false | false |
dsantin/PaellaInteractiveVideo | build.py | 1 | 2728 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import shutil
import json
import argparse
from subprocess import call
pluginDir = 'plugins/'
paellaDir = 'src/'
javascriptFile = 'javascript/paella_player.js'
cssFile = 'plugins/plugins.css'
arguments = argparse.ArgumentParser(description="Compile plugins, javascript and style sheet files.")
arguments.add_argument('--src',help='Source directory')
arguments.add_argument('--js',help='Javascript output file, with path')
arguments.add_argument('--css',help='Stylesheet output file, with path')
arguments.add_argument('--debug',action='store_true',help='do not minimize output javascript code')
arguments.add_argument('--install',action='store_true',help='generate production output files')
arguments.add_argument('--noplugins',action='store_true',help='add plugins')
intermediatePath = 'tmp'
if (not os.path.exists(intermediatePath)):
os.makedirs(intermediatePath)
args = arguments.parse_args()
if args.src:
pluginDir = args.src
if args.js:
javascriptFile = args.js
if args.css:
cssFile = args.css
if args.install:
jsOut = open(javascriptFile,'w')
cssOut = open(cssFile,'w')
else:
jsOut = open(os.path.join(intermediatePath,'javascript_output.o'),'w')
cssOut = open(os.path.join(intermediatePath,'css_output.o'),'w')
paellaFiles = os.listdir(paellaDir)
paellaFiles.sort()
for file in paellaFiles:
outPath = os.path.join(intermediatePath,file)
outFile = open(outPath,'w')
jsPath = paellaDir + file
outFile.write(open(jsPath).read())
outFile.write('\n\n')
outFile.close()
pluginFiles = os.listdir(pluginDir)
pluginFiles.sort()
f = open(pluginDir + 'ignore.json')
ignoreFiles = json.loads(f.read())
if not args.noplugins:
for file in pluginFiles:
jsPath = pluginDir + file
fileName, fileExtension = os.path.splitext(jsPath);
cssPath = fileName + '.css'
if fileExtension=='.js' and not(file in ignoreFiles):
outPath = os.path.join(intermediatePath,file)
outFile = open(outPath,'w')
outFile.write(open(jsPath).read())
outFile.write('\n\n')
outFile.close()
if os.path.exists(cssPath):
cssOut.write(open(cssPath).read())
cssOut.write('\n\n')
cssOut.close()
intermediateFiles = os.listdir(intermediatePath)
intermediateFiles.sort()
for file in intermediateFiles:
filePath = os.path.join(intermediatePath,file)
fileName, fileExtension = os.path.splitext(filePath)
if not args.debug and fileExtension=='.js':
command = "java -jar yuicompressor.jar " + filePath + " -o " + filePath
print command
subprocess.check_call(command,shell=True)
print "adding " + filePath + " to " + javascriptFile
jsOut.write(open(filePath).read())
jsOut.close()
shutil.rmtree(intermediatePath)
| gpl-2.0 | -8,803,945,977,534,689,000 | 27.416667 | 101 | 0.733871 | false | 3.124857 | false | false | false |
Celthi/youtube-dl-GUI | setup.py | 2 | 1279 | from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
DATA=[('imageformats',[
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qjpeg4.dll',
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qgif4.dll',
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qico4.dll',
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qmng4.dll',
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qsvg4.dll',
'C:\Users\yasoob\Anaconda\Lib\site-packages\PyQt4\plugins\imageformats\qtiff4.dll',
]), ('', ['C:\Users\yasoob\Documents\GitHub\youtube-dl-GUI\\ffmpeg.exe'])]
for files in os.listdir(os.path.join(os.getcwd(),'UI')):
f1 = os.path.join(os.getcwd(),'UI', files)
if os.path.isfile(f1): # skip directories
f2 = 'UI', [f1]
DATA.append(f2)
setup(
options = {'py2exe': {'compressed': True,"includes":["sip"]}},
windows = [{
'script': "main.py",
"icon_resources": [(0, os.path.join(os.getcwd(),"resources","converted_icon.ico"))],
"dest_base":"youtube-gl",
}],
zipfile = None,
data_files = DATA,
) | mit | -5,551,135,083,274,225,000 | 40.7 | 103 | 0.630962 | false | 2.94023 | false | false | false |
ntamas/python-selecta | selecta/utils.py | 1 | 2999 | from itertools import chain
from string import printable
import unicodedata
__all__ = ["each_index_of_string", "identity", "is_printable",
"list_packer", "safeint"]
def each_index_of_string(string, corpus):
"""Finds all occurrences of a given string in a corpus.
Args:
string (str): the string to search
corpus (str): the string to search in
Yields:
a start index for each occurrence of the string within the corpus,
in ascending order
"""
start = -1
while True:
start = corpus.find(string, start+1)
if start < 0:
return
yield start
def flatten(iterable):
"""Flattens an iterable yielding iterables into a single iterable."""
return chain.from_iterable(iterable)
def identity(arg):
"""An identity function that simply returns its argument."""
return arg
try:
# Python 2.x
from string import maketrans
_is_printable_helper = maketrans(printable, ' '*len(printable))
except ImportError:
# Python 3.x
_is_printable_helper = printable.maketrans(printable, ' '*len(printable))
def is_printable(string):
"""Returns whether the given string consists of printable characters only.
If the string is a Unicode string, this function uses the ``unicodedata``
to decide which characters are printable. If the string contains raw bytes,
it considers the characters in ``string.printable`` as printable."""
if isinstance(string, unicode):
return all(unicodedata.category(char) != 'Cc' for char in string)
else:
return all(_is_printable_helper[ord(char)] == ' ' for char in string)
def list_packer(*args):
"""An identity function that creates a list from its arguments."""
return args
def _safe_conversion(value, converter, default=None):
"""Pipes a value through a converter function and returns the converted
value or the default value if there was an exception during the conversion.
Args:
value: the value to convert
converter (callable): a callable that converts the value to the result.
Must accept a single argument only.
default: the default value to return in case of an unsuccessful
conversion.
Returns: the converted value if the conversion was successful or the default
value otherwise.
"""
try:
return converter(value)
except:
return default
def safeint(value, default=None):
"""Tries to convert a value given as a string to an integer. Returns the
default value if the value cannot be converted.
Args:
value (str): the value to turn into an integer
default (object): the default value to return if the given value cannot
be converted into an integer
Returns (int or object): the integer value converted from the given value,
or the default value if the conversion was unsuccessful.
"""
return _safe_conversion(value, int, default)
| mit | -3,022,395,576,088,505,300 | 30.568421 | 80 | 0.674892 | false | 4.48281 | false | false | false |
Sotera/aggregate-micro-paths | hive-streaming/conf/config.py | 1 | 3321 | # Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import six
if six.PY2
from ConfigParser import SafeConfigParser
else
from configparser import SafeConfigParser
class AggregateMicroPathConfig:
config_file = ""
table_name = ""
table_schema_id = ""
table_schema_dt = ""
table_schema_lat = ""
table_schema_lon = ""
time_filter = 0
distance_filter = 0
tripLat1 = 0
tripLon1 = 0
tripLat2 = 0
tripLon2 = 0
tripname = ""
resolutionLat = 0
resolutionLon = 0
tripLatMin = 0
tripLatMax = 0
tripLonMin = 0
tripLonMax = 0
triplineBlankets = []
def __init__(self, config, basePath = "./"):
configParser = SafeConfigParser()
configParser.read(basePath + config)
self.config_file = config
self.database_name = configParser.get("AggregateMicroPath", "database_name")
self.table_name = configParser.get("AggregateMicroPath", "table_name")
self.table_schema_id = configParser.get("AggregateMicroPath", "table_schema_id")
self.table_schema_dt = configParser.get("AggregateMicroPath", "table_schema_dt")
self.table_schema_lat = configParser.get("AggregateMicroPath", "table_schema_lat")
self.table_schema_lon = configParser.get("AggregateMicroPath", "table_schema_lon")
self.time_filter = long(configParser.get("AggregateMicroPath", "time_filter"))
self.distance_filter = long(configParser.get("AggregateMicroPath", "distance_filter"))
self.tripLat1 = float(configParser.get("AggregateMicroPath", "lower_left_lat"))
self.tripLon1 = float(configParser.get("AggregateMicroPath", "lower_left_lon"))
self.tripLat2 = float(configParser.get("AggregateMicroPath", "upper_right_lat"))
self.tripLon2 = float(configParser.get("AggregateMicroPath", "upper_right_lon"))
self.tripname = configParser.get("AggregateMicroPath", "trip_name")
self.resolutionLat = float(configParser.get("AggregateMicroPath", "resolution_lat"))
self.resolutionLon = float(configParser.get("AggregateMicroPath", "resolution_lon"))
self.tripLatMin = int(math.floor(self.tripLat1/self.resolutionLat))#6
self.tripLatMax = int(math.ceil(self.tripLat2/self.resolutionLat)) #7
self.tripLonMin = int(math.floor(self.tripLon1/self.resolutionLon)) #8
self.tripLonMax = int(math.ceil(self.tripLon2/self.resolutionLon)) #9
self.triplineBlankets.append([self.tripLat1,self.tripLon1,self.tripLat2,self.tripLon2,self.tripname,self.resolutionLat,self.resolutionLon,self.tripLatMin,self.tripLatMax,self.tripLonMin,self.tripLonMax])
self.temporal_split = configParser.get("AggregateMicroPath", "temporal_split")
| apache-2.0 | 7,993,782,075,275,329,000 | 45.125 | 211 | 0.700391 | false | 3.694105 | true | false | false |
lann/gerritrestclient | gerritrestclient/endpoint.py | 1 | 1981 | import functools
import inspect
import urllib
def maybe_quote(str_or_none):
if str_or_none is None:
return None
return urllib.quote(str_or_none)
class EndpointMixin(object):
_endpoint_parent = None
@classmethod
def endpoint_parts(cls):
parts = []
parent = cls._endpoint_parent
if parent:
if isinstance(parent, str):
parent = getattr(inspect.getmodule(cls), parent)
cls._endpoint_parent = parent
names.extend(parent.endpoint_parts())
part = [cls]
if hasattr(cls, 'endpoint_name'):
part.append(cls.endpoint_name)
else:
part.append('%ss' % cls.__name__.lower())
parts.append(part)
return parts
@classmethod
def endpoint_path(cls, *args):
args = list(args)
optional = object()
args.append(optional)
path_parts = []
for part_cls, part_name in cls.endpoint_parts():
path_parts.append(part_name)
part_arg = args.pop(0)
if part_arg is optional:
part_arg = ''
if isinstance(part_arg, part_cls):
part_arg = part_arg.endpoint_id()
if isinstance(part_arg, int):
part_arg = str(part_arg)
if not isinstance(part_arg, (int, basestring)):
raise TypeError, 'unsupported path argument %s' % type(part_arg)
path_parts.append(part_arg)
path_parts.extend(args[:-1])
return '/'.join(path_parts)
def endpoint_id(self):
return self.id
def endpoint_setup(parent=None, name=None):
def decorator(cls):
if parent:
cls._endpoint_parent = parent
if name:
cls.endpoint_name = name
return cls
return decorator
| mit | -1,497,542,085,637,198,800 | 24.397436 | 80 | 0.516406 | false | 4.382743 | false | false | false |
lmandres/MeSH-Analysis-Methodology | searchlib/eutils.py | 1 | 41139 | '''
Created on Jul 7, 2011
@author: Leo Andres (user)
'''
import http
import re
import socket
#import sys
import time
#import urllib
import urllib.error
import urllib.parse
import urllib.request
#import urllib2
class QueryEUtilsBase:
http_error_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted', 'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices', 'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified', 'Document has not changed since given time'),
305: ('Use Proxy', 'You must use proxy specified in Location to access this resource.'),
307: ('Temporary Redirect', 'Object moved temporarily -- see URI list'),
400: ('Bad Request', 'Bad request syntax or unsupported method'),
401: ('Unauthorized', 'No permission -- see authorization schemes'),
402: ('Payment Required', 'No payment -- see charging schemes'),
403: ('Forbidden', 'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed', 'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone', 'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable', 'Cannot satisfy request range.'),
417: ('Expectation Failed', 'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented', 'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable', 'The server cannot process the request due to a high load'),
504: ('Gateway Timeout', 'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.')}
base_eutils_url = None
maximum_tries = None
sleep_delay = None
timeout = None
maximum_url_length = None
def __init__(self, eutils_url_in):
self.base_eutils_url = eutils_url_in
def set_base_eutils_url(self, eutils_url_in):
self.base_eutils_url = eutils_url_in
def get_base_eutils_url(self):
return self.base_eutils_url
def set_maximum_tries(self, maximum_tries_in):
self.maximum_tries = int(maximum_tries_in)
def get_maximum_tries(self):
return self.maximum_tries
def set_sleep_delay(self, sleep_delay_in):
self.sleep_delay = int(sleep_delay_in)
def get_sleep_delay(self):
return self.sleep_delay
def set_timeout(self, timeout_in):
self.timeout = int(timeout_in)
def get_timeout(self):
return self.timeout
def set_maximum_url_length(self, maximum_url_length_in):
self.maximum_url_length = int(maximum_url_length_in)
def get_maximum_url_lengt(self):
return self.maximum_url_length
def run_eutils_request(self, eutils_variables_in):
attempt_number = 0
eutils_request_variables = {}
for dict_key in eutils_variables_in:
if eutils_variables_in[dict_key] != None:
eutils_request_variables[dict_key] = eutils_variables_in[dict_key]
print('\nDoing EUtilities request at ' + time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime()) + '\n' + self.base_eutils_url + '?' + urllib.parse.urlencode(eutils_request_variables) + '\n')
while True:
xml_string = None
time.sleep(self.sleep_delay)
try:
response = None
if self.maximum_url_length != None and self.maximum_url_length <= 1600:
response = urllib.request.urlopen(url = self.base_eutils_url + '?' + urllib.parse.urlencode(eutils_request_variables), timeout = self.timeout)
else:
response = urllib.request.urlopen(url = self.base_eutils_url, data = urllib.parse.urlencode(eutils_request_variables).encode('utf-8'), timeout = self.timeout)
xml_string = response.read()
except OSError as ose:
if str(ose).strip() == '[Errno 11004] getaddrinfo failed':
print('Network connection unavailable.')
attempt_number -= 1
else:
print('OSError')
print(ose)
except socket.error as se:
# ADD SOMETHING HERE TO DESCRIBE THE ERROR BETTER
print('socket.error')
print(se)
except urllib.error.HTTPError as er:
print(str(er.code) + ": " + self.http_error_codes[er.code][1])
except urllib.error.URLError as er:
print(er)
except socket.timeout:
print('Request timed out.')
except http.client.BadStatusLine as bsl:
# ADD SOMETHING HERE TO DESCRIBE THE ERROR BETTER
print('Bad status line (?).')
print(bsl)
if xml_string != None:
break
attempt_number += 1
print('Search result invalid. Attempt ' + str(attempt_number) + '.')
if self.maximum_tries < attempt_number:
print('Maximum tries exceeded.')
break
return xml_string
class IteratePubMedESearchResults(QueryEUtilsBase):
result_count = 0
result_return_maximum = 0
result_return_start = 0
result_idlist_iter = None
eutils_esearch_variables = None
def __init__(self, esearch_settings_in):
self.result_count = 0
self.result_return_maximum = 0
self.result_return_start = 0
self.eutils_esearch_variables = {
'rettype' : 'uilist',
'retstart' : 0,
'retmax' : None,
'db' : 'pubmed',
'usehistory' : None,
'term' : None,
'email' : None,
'tool' : None,
'query_key' : None,
'WebEnv' : None}
for dict_key in self.eutils_esearch_variables:
try:
if self.eutils_esearch_variables[dict_key] == None and esearch_settings_in[dict_key] != None:
self.eutils_esearch_variables[dict_key] = esearch_settings_in[dict_key]
except KeyError:
pass
self.eutils_esearch_variables['query_key'] = None
self.eutils_esearch_variables['retstart'] = 0
self.set_base_eutils_url(esearch_settings_in['base_address'] + '/esearch.fcgi')
self.set_sleep_delay(esearch_settings_in['sleep_delay'])
self.set_maximum_tries(esearch_settings_in['maximum_tries'])
self.set_timeout(esearch_settings_in['timeout'])
self.result_idlist_iter = None
self.__run_eutils_esearch_request()
def __iter__(self):
return self
def __next__(self):
try:
return self.result_idlist_iter.__next__().group(1).decode('utf-8')
except StopIteration:
if self.result_count <= (self.result_return_maximum + self.result_return_start):
raise StopIteration
else:
self.eutils_esearch_variables['retstart'] = self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax']
if self.result_count <= self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax']:
print('\nRetrieving Articles ' + str(self.eutils_esearch_variables['retstart'] + 1) + ' to ' + str(self.result_count) + '.')
else:
print('\nRetrieving Articles ' + str(self.eutils_esearch_variables['retstart'] + 1) + ' to ' + str(self.eutils_esearch_variables['retstart'] + self.eutils_esearch_variables['retmax']) + '.')
self.result_idlist_iter = None
self.__run_eutils_esearch_request()
try:
return self.result_idlist_iter.__next__().group(1).decode('utf-8')
except StopIteration:
raise StopIteration
def get_query_key(self):
return self.eutils_esearch_variables['query_key']
def get_web_env(self):
return self.eutils_esearch_variables['WebEnv']
def get_result_count(self):
return self.result_count
def __run_eutils_esearch_request(self):
while True:
xml_string = self.run_eutils_request(self.eutils_esearch_variables)
match = re.search(b'<Count>(\d+)</Count>.*?<RetMax>(\d+)</RetMax>.*?<RetStart>(\d+)</RetStart>.*?(<IdList>.*?</IdList>)', xml_string, re.DOTALL)
if match:
break
match = re.search(b'<Count>(\d+)</Count>.*?<RetMax>(\d+)</RetMax>.*?<RetStart>(\d+)</RetStart>.*?(<IdList/>)', xml_string, re.DOTALL)
if match:
break
self.result_count = int(match.group(1))
self.result_return_maximum = int(match.group(2))
self.result_return_start = int(match.group(3))
self.result_idlist_iter = re.finditer(b'<Id>(\d+)</Id>', match.group(4), re.DOTALL)
try:
self.eutils_esearch_variables['query_key'] = None
except KeyError:
pass
try:
self.eutils_esearch_variables['WebEnv'] = None
except KeyError:
pass
try:
if self.eutils_esearch_variables['usehistory'] == 'y':
match = re.search(b'<Count>\d+</Count>.*?<RetMax>\d+</RetMax>.*?<RetStart>\d+</RetStart>.*?<QueryKey>(.*?)</QueryKey>.*?<WebEnv>(.*?)</WebEnv>.*?<IdList>', xml_string, re.DOTALL)
if match:
self.eutils_esearch_variables['query_key'] = match.group(1).strip()
self.eutils_esearch_variables['WebEnv'] = match.group(2).strip()
self.eutils_esearch_variables['term'] = None
else:
self.eutils_esearch_variables['usehistory'] = None
self.eutils_esearch_variables['query_key'] = None
self.eutils_esearch_variables['WebEnv'] = None
except KeyError:
pass
class IteratePubMedEFetchData(QueryEUtilsBase):
result_count = 0
result_return_maximum = 0
result_return_start = 0
efetch_pubmed_data_iter = None
efetch_last_pubmed_id = None
efetch_pubmed_id_iter = None
eutils_efetch_variables = {
'retmode' : 'xml',
'retstart' : None,
'retmax' : None,
'db' : 'pubmed',
'usehistory' : None,
'email' : None,
'tool' : None,
'query_key' : None,
'WebEnv' : None}
def __init__(self, efetch_settings_in, efetch_pubmed_id_iterable_in=None):
for dict_key in self.eutils_efetch_variables:
try:
self.eutils_efetch_variables[dict_key] = efetch_settings_in[dict_key]
except KeyError:
pass
if efetch_pubmed_id_iterable_in == None and self.eutils_efetch_variables['query_key'] != None:
self.eutils_efetch_variables['retstart'] = 0
else:
try:
self.efetch_pubmed_id_iter = efetch_pubmed_id_iterable_in.__iter__()
except AttributeError:
self.efetch_pubmed_id_iter = [].__iter__()
self.eutils_efetch_variables['query_key'] = None
self.eutils_efetch_variables['retstart'] = None
self.eutils_efetch_variables['retmax'] = None
self.set_base_eutils_url(efetch_settings_in['base_address'] + '/efetch.fcgi')
try:
self.set_sleep_delay(int(efetch_settings_in['sleep_delay']))
except TypeError:
pass
try:
self.set_maximum_tries(int(efetch_settings_in['maximum_tries']))
except TypeError:
pass
try:
self.set_timeout(int(efetch_settings_in['timeout']))
except TypeError:
pass
try:
self.set_maximum_url_length(int(efetch_settings_in['maximum_url_length']))
except TypeError:
pass
try:
self.result_return_maximum = int(efetch_settings_in['retmax'])
except TypeError:
pass
try:
self.result_count = int(efetch_settings_in['result_count'])
except TypeError:
pass
self.efetch_pubmed_data_iter = None
def __iter__(self):
return self
def __next__(self):
if self.eutils_efetch_variables['query_key'] != None:
return self.next_by_query_key()
else:
return self.next_by_id_list()
def next_by_query_key(self):
try:
return self.efetch_pubmed_data_iter.__next__().group(1)
except StopIteration:
self.eutils_efetch_variables['retstart'] = self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax']
except AttributeError:
pass
if self.eutils_efetch_variables['retstart'] >= self.result_count:
self.efetch_pubmed_data_iter = [].__iter__()
else:
if self.result_count <= self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax']:
print('\nRetrieving Articles ' + str(self.eutils_efetch_variables['retstart'] + 1) + ' to ' + str(self.result_count) + '.')
else:
print('\nRetrieving Articles ' + str(self.eutils_efetch_variables['retstart'] + 1) + ' to ' + str(self.eutils_efetch_variables['retstart'] + self.eutils_efetch_variables['retmax']) + '.')
self.efetch_pubmed_data_iter = None
self.__run_query_key_eutils_efetch_request()
try:
return self.efetch_pubmed_data_iter.__next__().group(1)
except StopIteration:
raise StopIteration
def next_by_id_list(self):
try:
return self.efetch_pubmed_data_iter.__next__().group(1)
except StopIteration:
if self.maximum_url_length == None:
raise StopIteration
except AttributeError:
pass
self.efetch_pubmed_data_iter = None
self.__run_id_list_eutils_efetch_request()
try:
return self.efetch_pubmed_data_iter.__next__().group(1)
except StopIteration:
raise StopIteration
def __run_query_key_eutils_efetch_request(self):
xml_string = None
efetch_post_variables = {}
for dict_key in self.eutils_efetch_variables:
if self.eutils_efetch_variables[dict_key] != None:
efetch_post_variables[dict_key] = self.eutils_efetch_variables[dict_key]
while True:
xml_string = self.run_eutils_request(efetch_post_variables)
if xml_string != None:
break
self.efetch_pubmed_data_iter = re.finditer(b'(<PubmedArticle>.*?</PubmedArticle>)', xml_string, re.DOTALL)
def __run_id_list_eutils_efetch_request(self):
xml_string = None
efetch_post_variables = {}
efetch_pubmed_id_list = []
efetch_post_data = None
for dict_key in self.eutils_efetch_variables:
if self.eutils_efetch_variables[dict_key] != None:
efetch_post_variables[dict_key] = self.eutils_efetch_variables[dict_key]
if self.maximum_url_length == None:
for list_item in self.efetch_pubmed_id_iter:
efetch_pubmed_id_list.append(list_item)
else:
if self.efetch_last_pubmed_id != None:
efetch_pubmed_id_list.append(self.efetch_last_pubmed_id)
while True:
efetch_post_variables['id'] = ','.join([str(list_item).strip() for list_item in efetch_pubmed_id_list])
self.efetch_last_pubmed_id = None
try:
self.efetch_last_pubmed_id = self.efetch_pubmed_id_iter.__next__()
efetch_post_variables['id'] += ',' + str(self.efetch_last_pubmed_id).strip()
except StopIteration:
pass
if self.maximum_url_length <= 1600:
efetch_post_data = self.get_base_eutils_url() + '?' + urllib.parse.urlencode(efetch_post_variables)
else:
efetch_post_data = urllib.parse.urlencode(efetch_post_variables)
if len(efetch_post_data) <= self.maximum_url_length:
if self.efetch_last_pubmed_id != None:
efetch_pubmed_id_list.append(self.efetch_last_pubmed_id)
else:
break
else:
break
if len(efetch_pubmed_id_list) <= 0:
self.efetch_pubmed_data_iter = [].__iter__()
else:
efetch_post_variables['id'] = ','.join([str(list_item).strip() for list_item in efetch_pubmed_id_list])
while True:
xml_string = self.run_eutils_request(efetch_post_variables)
if xml_string != None:
break
self.efetch_pubmed_data_iter = re.finditer(b'(<PubmedArticle>.*?</PubmedArticle>)', xml_string, re.DOTALL)
class IteratePubMedCentralELinkCitedByPMCIDs(QueryEUtilsBase):
elink_pmcid_iter = None
base_address = None
sleep_delay = None
maximum_tries = None
timeout = None
eutils_elink_variables = {
'retmode' : 'xml',
'dbfrom' : 'pmc',
'db' : 'pmc',
'id' : None,
'email' : None,
'tool' : None}
def __init__(self, elink_settings_in, elink_pmcids_in):
for dict_key in self.eutils_elink_variables:
try:
self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key]
except KeyError:
pass
try:
self.eutils_elink_variables['usehistory'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['query_key'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['WebEnv'] = None
except KeyError:
pass
self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi')
self.eutils_elink_variables['id'] = elink_pmcids_in
self.eutils_elink_variables['db'] = 'pmc'
try:
self.set_sleep_delay(int(elink_settings_in['sleep_delay']))
except TypeError:
pass
try:
self.set_maximum_tries(int(elink_settings_in['maximum_tries']))
except TypeError:
pass
try:
self.set_timeout(int(elink_settings_in['timeout']))
except TypeError:
pass
self.elink_pmcid_iter = None
def __iter__(self):
return self
def __next__(self):
try:
return self.elink_pmcid_iter.__next__().group(1).decode('utf-8')
except StopIteration:
raise StopIteration
except AttributeError:
try:
self.__run_elink_request()
return self.elink_pmcid_iter.__next__().group(1).decode('utf-8')
except AttributeError:
raise StopIteration
def __run_elink_request(self):
xml_string = None
match = None
self.elink_pmcid_iter = None
while True:
xml_string = self.run_eutils_request(self.eutils_elink_variables)
if xml_string != None:
break
match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pmc</DbTo>.*?<LinkName>pmc_pmc_citedby</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL)
if match:
self.elink_pmcid_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL)
class IteratePubMedIDELinkCitedByPubMedIDs(QueryEUtilsBase):
elink_pmcid_iter = None
base_address = None
sleep_delay = None
maximum_tries = None
timeout = None
eutils_elink_variables = {
'retmode' : 'xml',
'dbfrom' : 'pubmed',
'db' : 'pubmed',
'id' : None,
'email' : None,
'tool' : None}
def __init__(self, elink_settings_in, elink_pubmed_ids_in):
for dict_key in self.eutils_elink_variables:
try:
self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key]
except KeyError:
pass
try:
self.eutils_elink_variables['usehistory'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['query_key'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['WebEnv'] = None
except KeyError:
pass
self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi')
self.eutils_elink_variables['id'] = elink_pubmed_ids_in
self.eutils_elink_variables['db'] = 'pubmed'
try:
self.set_sleep_delay(int(elink_settings_in['sleep_delay']))
except TypeError:
pass
try:
self.set_maximum_tries(int(elink_settings_in['maximum_tries']))
except TypeError:
pass
try:
self.set_timeout(int(elink_settings_in['timeout']))
except TypeError:
pass
self.elink_pmcid_iter = None
def __iter__(self):
return self
def __next__(self):
try:
return self.elink_pmcid_iter.__next__().group(1).decode('utf-8')
except StopIteration:
raise StopIteration
except AttributeError:
try:
self.__run_elink_request()
return self.elink_pmcid_iter.__next__().group(1).decode('utf-8')
except AttributeError:
raise StopIteration
def __run_elink_request(self):
xml_string = None
match = None
self.elink_pmcid_iter = None
while True:
xml_string = self.run_eutils_request(self.eutils_elink_variables)
if xml_string != None:
break
match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pubmed_pubmed_citedin</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL)
if match:
self.elink_pmcid_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL)
class IteratePubMedCentralELinkToPubMedIDs(QueryEUtilsBase):
elink_pmcid_iter = None
elink_pubmed_id_iter = None
elink_last_pubmed_id = None
base_address = None
sleep_delay = None
maximum_tries = None
timeout = None
eutils_elink_variables = {
'retmode' : 'xml',
'dbfrom' : 'pmc',
'db' : 'pubmed',
'id' : None,
'email' : None,
'tool' : None}
def __init__(self, elink_settings_in, elink_pmcid_iter_in):
for dict_key in self.eutils_elink_variables:
try:
self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key]
except KeyError:
pass
try:
self.eutils_elink_variables['usehistory'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['query_key'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['WebEnv'] = None
except KeyError:
pass
self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi')
self.elink_pmcid_iter = elink_pmcid_iter_in
self.eutils_elink_variables['db'] = 'pubmed'
try:
self.set_sleep_delay(int(elink_settings_in['sleep_delay']))
except TypeError:
pass
try:
self.set_maximum_tries(int(elink_settings_in['maximum_tries']))
except TypeError:
pass
try:
self.set_timeout(int(elink_settings_in['timeout']))
except TypeError:
pass
self.elink_pubmed_id_iter = None
def __iter__(self):
return self
def __next__(self):
try:
return self.elink_pubmed_id_iter.__next__().group(1).decode('utf-8')
except StopIteration:
raise StopIteration
except AttributeError:
xml_string = None
match = None
elink_post_variables = {}
elink_pmcid_list = []
elink_post_data = None
for dict_key in self.eutils_elink_variables:
if self.eutils_elink_variables[dict_key] != None:
elink_post_variables[dict_key] = self.eutils_elink_variables[dict_key]
if self.maximum_url_length == None:
for list_item in self.elink_pmcid_iter:
elink_pmcid_list.append(list_item)
else:
if self.efetch_last_pubmed_id != None:
elink_pmcid_list.append(self.elink_last_pubmed_id)
while True:
elink_post_variables['id'] = ','.join([str(list_item).strip() for list_item in elink_pmcid_list])
self.elink_last_pubmed_id = None
try:
self.elink_last_pubmed_id = self.elink_pmcid_iter.__next__()
elink_post_variables['id'] += ',' + str(self.elink_last_pubmed_id).strip()
except StopIteration:
pass
if self.maximum_url_length <= 1600:
elink_post_data = self.get_base_eutils_url() + '?' + urllib.parse.urlencode(elink_post_variables)
else:
elink_post_data = urllib.parse.urlencode(elink_post_variables)
if len(elink_post_data) <= self.maximum_url_length:
if self.elink_last_pubmed_id != None:
elink_pmcid_list.append(self.elink_last_pubmed_id)
else:
break
else:
break
if len(elink_pmcid_list) <= 0:
raise StopIteration
else:
self.elink_pubmed_id_iter = None
self.eutils_elink_variables['id'] = ','.join([str(list_item).strip() for list_item in elink_pmcid_list])
while True:
xml_string = self.run_eutils_request(self.eutils_elink_variables)
if xml_string != None:
break
match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pmc_pubmed</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL)
if match:
self.elink_pubmed_id_iter = re.finditer(b'<Id>(.*?)</Id>', match.group(1), re.DOTALL)
try:
return self.elink_pubmed_id_iter.__next__().group(1).decode('utf-8')
except AttributeError:
raise StopIteration
class IteratePubMedIDELinkNeighborPubMedIDs(QueryEUtilsBase):
base_address = None
sleep_delay = None
maximum_tries = None
timeout = None
eutils_elink_variables = {
'retmode' : 'xml',
'dbfrom' : 'pubmed',
'db' : 'pubmed',
'id' : None,
'cmd' : 'neighbor_score',
'email' : None,
'tool' : None}
def __init__(self, elink_settings_in, elink_pubmed_ids_in):
for dict_key in self.eutils_elink_variables:
try:
self.eutils_elink_variables[dict_key] = elink_settings_in[dict_key]
except KeyError:
pass
try:
self.eutils_elink_variables['usehistory'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['query_key'] = None
except KeyError:
pass
try:
self.eutils_elink_variables['WebEnv'] = None
except KeyError:
pass
self.set_base_eutils_url(elink_settings_in['base_address'] + '/elink.fcgi')
self.eutils_elink_variables['id'] = elink_pubmed_ids_in
self.eutils_elink_variables['db'] = 'pubmed'
try:
self.set_sleep_delay(int(elink_settings_in['sleep_delay']))
except TypeError:
pass
try:
self.set_maximum_tries(int(elink_settings_in['maximum_tries']))
except TypeError:
pass
try:
self.set_timeout(int(elink_settings_in['timeout']))
except TypeError:
pass
self.elink_pmcid_iter = None
def __iter__(self):
return self
def __next__(self):
next_item = None
try:
next_item = self.elink_pmcid_iter.__next__()
except StopIteration:
raise StopIteration
except AttributeError:
try:
self.__run_elink_request()
next_item = self.elink_pmcid_iter.__next__()
except AttributeError:
raise StopIteration
if next_item:
match = re.search(b'<Id>(.*?)</Id>.*?<Score>(.*?)</Score>', next_item.group(1), re.DOTALL)
if match:
return (match.group(1).decode('utf-8'), int(match.group(2).decode('utf-8')))
raise StopIteration
def __run_elink_request(self):
xml_string = None
match = None
self.elink_pmcid_iter = None
while True:
xml_string = self.run_eutils_request(self.eutils_elink_variables)
if xml_string != None:
break
match = re.search(b'.*(<LinkSetDb>.*?<DbTo>pubmed</DbTo>.*?<LinkName>pubmed_pubmed</LinkName>.*?</LinkSetDb>)', xml_string, re.DOTALL)
if match:
self.elink_pmcid_iter = re.finditer(b'<Link>(.*?)</Link>', match.group(1), re.DOTALL)
class EUtilsPubMed:
'''
classdocs
'''
eutils_settings = {
'base_address' : 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils',
'sleep_delay' : 1,
'maximum_tries' : 3,
'timeout' : 60,
'maximum_url_length' : None,
'retmax' : 100000,
'usehistory' : 'y',
'term' : None,
'email' : None,
'tool' : None,
'query_key' : None,
'WebEnv' : None,
'result_count' : None}
def __init__(self):
'''
Constructor
'''
def set_eutils_address(self, address_in):
self.eutils_settings['base_address'] = str(address_in).strip().rstrip('/')
def get_eutils_address(self):
return self.eutils_settings['base_address']
def set_sleep_delay(self, delay_in):
self.eutils_settings['sleep_delay'] = int(delay_in)
def get_sleep_delay(self):
return self.eutils_settings['sleep_delay']
def set_maximum_tries(self, tries_in):
self.eutils_settings['maximum_tries'] = int(tries_in)
def get_maximum_tries(self):
return self.eutils_settings['maximum_tries']
def set_timeout(self, timeout_in):
self.eutils_settings['timeout'] = int(timeout_in)
def get_timeout(self):
return self.eutils_settings['timeout']
def set_maximum_url_length(self, length_in):
self.eutils_settings['maximum_url_length'] = int(length_in)
def get_maximum_url_length(self):
return self.eutils_settings['maximum_url_length']
def set_return_maximum(self, maximum_in):
self.eutils_settings['retmax'] = int(maximum_in)
def get_return_maximum(self):
return self.eutils_settings['retmax']
def get_eutils_database(self):
return self.eutils_settings['db']
def set_eutils_use_history(self, history_in):
if history_in:
self.eutils_settings['usehistory'] = 'y'
else:
try:
del(self.eutils_settings['usehistory'])
except KeyError:
pass
def get_eutils_use_history(self):
try:
return self.eutils_settings['usehistory']
except KeyError:
return None
def set_email_address(self, email_in):
self.eutils_settings['email'] = email_in
def get_email_address(self):
return self.eutils_settings['email']
def set_tool_name(self, name_in):
self.eutils_settings['tool'] = name_in
def get_tool_name(self):
return self.eutils_settings['tool']
def pubmed_esearch_id_iter(self, esearch_term_in):
self.eutils_settings['term'] = esearch_term_in
pubmed_esearch_results = IteratePubMedESearchResults(self.eutils_settings)
self.eutils_settings['query_key'] = pubmed_esearch_results.get_query_key()
self.eutils_settings['WebEnv'] = pubmed_esearch_results.get_web_env()
self.eutils_settings['result_count'] = pubmed_esearch_results.get_result_count()
return pubmed_esearch_results
def pubmed_efetch_data_iter(self, efetch_pubmed_id_iterable_in):
return IteratePubMedEFetchData(self.eutils_settings, efetch_pubmed_id_iterable_in)
def pubmed_esearch_data_iter(self, esearch_term_in):
self.eutils_settings['WebEnv'] = None
self.eutils_settings['query_key'] = None
self.eutils_settings['result_count'] = None
self.eutils_settings['term'] = esearch_term_in
pubmed_esearch_results = IteratePubMedESearchResults(self.eutils_settings)
try:
if self.eutils_settings['usehistory'] == 'y':
self.eutils_settings['WebEnv'] = pubmed_esearch_results.get_web_env()
self.eutils_settings['query_key'] = pubmed_esearch_results.get_query_key()
self.eutils_settings['result_count'] = pubmed_esearch_results.get_result_count()
return IteratePubMedEFetchData(self.eutils_settings)
except KeyError:
return IteratePubMedEFetchData(self.eutils_settings, pubmed_esearch_results)
def elink_pmcid_cited_by_pmcids(self, elink_pmcids_in):
return_iter = []
try:
return_iter = IteratePubMedCentralELinkCitedByPMCIDs(self.eutils_settings, elink_pmcids_in.strip())
except AttributeError:
pass
return return_iter
def elink_pmcids_link_to_pubmed_ids(self, pmcid_iter_in):
return_iter = []
try:
return_iter = IteratePubMedCentralELinkToPubMedIDs(self.eutils_settings, pmcid_iter_in)
except AttributeError:
pass
return return_iter
def elink_pubmed_id_cited_by_pubmed_ids(self, elink_pubmed_ids_in):
return_iter = []
try:
return_iter = IteratePubMedIDELinkCitedByPubMedIDs(self.eutils_settings, elink_pubmed_ids_in.strip())
except AttributeError:
pass
return return_iter
def elink_pubmed_id_neighbor_pubmed_ids(self, elink_pubmed_ids_in):
return_iter = []
try:
return_iter = IteratePubMedIDELinkNeighborPubMedIDs(self.eutils_settings, elink_pubmed_ids_in.strip())
except AttributeError:
pass
return return_iter
| gpl-2.0 | -6,642,133,664,839,098,000 | 35.962264 | 210 | 0.50412 | false | 4.192296 | false | false | false |
hgascon/pulsar | pulsar/core/sippy/SipCallId.py | 1 | 1919 | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: [email protected].
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from random import random
from hashlib import md5
from time import time
from .SipConf import SipConf
from .SipGenericHF import SipGenericHF
class SipCallId(SipGenericHF):
hf_names = ('call-id', 'i')
body = None
def __init__(self, body = None):
SipGenericHF.__init__(self, body)
self.parsed = True
if body == None:
self.body = md5(str((random() * 1000000000) + time())).hexdigest() + '@' + str(SipConf.my_address)
def __add__(self, other):
return SipCallId(self.body + str(other))
def genCallId(self):
self.body = md5(str((random() * 1000000000) + time())).hexdigest() + '@' + str(SipConf.my_address)
def getCanName(self, name, compact = False):
if compact:
return 'i'
return 'Call-ID'
| bsd-3-clause | 8,509,275,359,061,580,000 | 37.38 | 110 | 0.696717 | false | 3.593633 | false | false | false |
dferguso/IGT4SAR | HastyLine_Assigment.py | 1 | 3982 | #-------------------------------------------------------------------------------
# Name: HastyLine_Assignment.py
#
# Purpose: Create Quick Response Task stored in the Assignments layer from a
# "Hasty Point" feature.
#
# Author: Don Ferguson
#
# Created: 06/25/2012
# Copyright: (c) Don Ferguson 2012
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The GNU General Public License can be found at
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# Take courage my friend help is on the way
import arcpy
#workspc = arcpy.GetParameterAsText(0)
#arcpy.env.workspace = workspc
arcpy.env.overwriteOutput = "True"
##fc1="Hasty_Segments"
fc1="Hasty_Line"
fc2="Assignments"
fieldName1="Length_miles"
fieldName2="PointA_X"
fieldName3="PointA_Y"
fieldName4="PointB_X"
fieldName5="PointB_Y"
expression2 = "float(!shape.firstpoint!.split()[0])"
expression3 = "float(!shape.firstpoint!.split()[1])"
expression4 = "float(!shape.lastpoint!.split()[0])"
expression5 = "float(!shape.lastpoint!.split()[1])"
##try:
arcpy.CalculateField_management(fc1, fieldName1, "!SHAPE.length@miles!", "PYTHON")
arcpy.CalculateField_management(fc1, fieldName2, expression2, "PYTHON")
arcpy.CalculateField_management(fc1, fieldName3, expression3, "PYTHON")
arcpy.CalculateField_management(fc1, fieldName4, expression4, "PYTHON")
arcpy.CalculateField_management(fc1, fieldName5, expression5, "PYTHON")
#shapeName = arcpy.Describe(fc1).ShapeFieldName
rows1 = arcpy.SearchCursor(fc1)
row1 = rows1.next()
while row1:
# you need to insert correct field names in your getvalue function
Area_Name = row1.getValue("Area_Name")
Length_miles = row1.getValue("Length_miles")
Type = row1.getValue("Type")
PtA_X = row1.getValue("PointA_X")
PtA_Y = row1.getValue("PointA_Y")
PtB_X = row1.getValue("PointB_X")
PtB_Y = row1.getValue("PointB_Y")
## feat = row1.getValue(shapeName)
## #pnt = feat.getPart()
##
## # Print x,y coordinates of current point
## #
## #print pnt.X, pnt.Y
##
## fpointX = int(feat.firstPoint.X)
## fpointY = int(feat.firstPoint.Y)
## lpointX = int(feat.lastPoint.X)
## lpointY = int(feat.lastPoint.Y)
Descrip1 = "Search along " + str(Area_Name) + " for a distance of " + str(int(Length_miles*100.0)/100.0) + " miles"
Descrip2 = " between point 1: " + str(int(PtA_X)) + " " + str(int(PtA_Y)) + ", and point2: "
Descrip3 = str(int(PtB_X)) + " " +str(int(PtB_Y)) + "."
Descrip4 = " Sweep 10 - 20 ft on each side of road/trail. Look for decision points and location where someone may leave the trail."
Descrip = Descrip1 + Descrip2 + Descrip3 + Descrip4
rows = arcpy.InsertCursor(fc2)
x = 1
while x <= 1:
row = rows.newRow()
row.Description = Descrip
row.Area_Name = Area_Name
try:
row.Priority = "High"
except:
pass
row.Status = "Planned"
row.Map_Scale = 24000
row.Create_Map = "No"
row.Previous_Search = "No"
arcpy.AddMessage(Area_Name)
rows.insertRow(row)
x += 1
del rows
del row
row1 = rows1.next()
del row1
del rows1
##except:
## # Get the tool error messages
## #
## msgs = "There was an error"
##
## # Return tool error messages for use with a script tool
## #
## arcpy.AddError(msgs)
## # Print tool error messages for use in Python/PythonWin
## #
## print msgs | gpl-3.0 | -5,015,328,674,675,826,000 | 31.120968 | 137 | 0.638373 | false | 3.157811 | false | false | false |
adcomp/super-fruit-pie | tuto/06_collect.py | 1 | 7778 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# David Art <[email protected]>
# Program Arcade Games With Python And Pygame - Build a Platformer
# http://programarcadegames.com
import pygame
import random
WIDTH = 640
HEIGHT = 480
class Platform (pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/block.png')
self.rect = self.image.get_rect()
class Raspberry(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/raspberry.png')
self.rect = self.image.get_rect()
class Player(pygame.sprite.Sprite):
change_x = 0
change_y = 0
jump_ok = True
frame_since_collision = 0
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/player.png')
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self,blocks, raspberries):
self.rect.x += self.change_x
# check collision with raspberries
block_hit_list = pygame.sprite.spritecollide(self, raspberries, False)
for raspberry in block_hit_list:
raspberries.remove(raspberry)
# check collision with platform
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
self.rect.y += self.change_y
block_hit_list = pygame.sprite.spritecollide(self, blocks, False)
for block in block_hit_list:
if self.change_y > 0:
self.jump_ok = True
# Keep track of the last time we hit something
self.frame_since_collision = 0
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
# If we haven't hit anything in a while, allow us jump
if self.frame_since_collision > 2:
self.jump_ok = False
# Increment frame counter
self.frame_since_collision += 1
# Calculate effect of gravity.
def calc_grav(self):
self.change_y += .4
# See if we are on the ground.
if self.rect.y >= HEIGHT-48 and self.change_y >= 0:
self.change_y = 0
self.rect.y = HEIGHT-48
self.frame_since_collision = 0
self.jump_ok = True
# Called when user hits 'jump' button
def jump(self,blocks):
# If it is ok to jump, set our speed upwards
if self.jump_ok:
self.change_y = -9.81
class Game():
def __init__(self, width=640, height=480, fullscreen=False):
self.width = width
self.height = height
if fullscreen:
flags = pygame.FULLSCREEN
else:
flags = 0
pygame.init()
self.screen = pygame.display.set_mode([width, height], flags, 32)
pygame.display.set_caption("RaspJam")
self.scene = Scene()
bself.lock_list = pygame.sprite.Group()
self.all_sprites_list = pygame.sprite.Group()
self.raspberry_list = pygame.sprite.Group()
create_level1(self.block_list, self.all_sprites_list)
self.player = Player(32, 32)
self.player.rect.x = 240
self.player.rect.y = 0
self.all_sprites_list.add(self.player)
def update(self):
pass
def draw(self):
pass
class Scene:
def __init__(self):
self.image = pygame.image.load('images/bg.png')
def draw(self, screen):
screen.blit(self.image, (0, 0))
# Create platforms
def create_level1(block_list, all_sprites_list):
block = Platform(128, 16)
block.rect.x = 160
block.rect.y = 128
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 352
block.rect.y = 128
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 0
block.rect.y = 432
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = WIDTH - 128
block.rect.y = 432
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 0
block.rect.y = 240
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = WIDTH - 128
block.rect.y = 240
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 160
block.rect.y = 336
block_list.add(block)
all_sprites_list.add(block)
block = Platform(128, 16)
block.rect.x = 352
block.rect.y = 336
block_list.add(block)
all_sprites_list.add(block)
# Initialize the window
pygame.init()
# Set the height and width of the screen
screen = pygame.display.set_mode([WIDTH, HEIGHT], 0, 32)
pygame.display.set_caption("RaspJam")
background = pygame.image.load('images/bg.png')
# Main program, create the blocks
block_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
raspberry_list = pygame.sprite.Group()
create_level1(block_list,all_sprites_list)
player = Player(32, 32)
player.rect.x = 240
player.rect.y = 0
all_sprites_list.add(player)
for i in range(16):
# This represents a block
block = Raspberry()
# Set a random location for the block
block.rect.x = random.randrange(WIDTH/92)* 92
block.rect.y = random.randrange(HEIGHT/92)* 92
# Add the block to the list of objects
raspberry_list.add(block)
#~ all_sprites_list.add(block)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Event Processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
if event.key == pygame.K_LEFT:
player.change_x = -6
if event.key == pygame.K_RIGHT:
player.change_x = 6
if event.key == pygame.K_SPACE:
player.jump(block_list)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.change_x = 0
if event.key == pygame.K_RIGHT:
player.change_x = 0
# --- Game Logic
# Wrap player around the screen
if player.rect.x >= WIDTH:
player.rect.x = -15
if player.rect.x <= -16:
player.rect.x = WIDTH
player.calc_grav()
player.update(block_list, raspberry_list)
block_list.update()
# --- Draw Frame
#~ screen.fill(BLACK)
screen.blit(background, (0, 0))
all_sprites_list.draw(screen)
raspberry_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit ()
| mit | -412,745,760,245,602,750 | 25.195804 | 92 | 0.575598 | false | 3.532243 | false | false | false |
bbusemeyer/mython | drivers/scan_pw.py | 2 | 3001 | import sys
import os
from copy import deepcopy
import subprocess as sp
from mython import gen_qsub
def writelines(lines):
strlines = [" ".join(line) for line in lines]
def submit_job(base,lines,cwd):
if not os.path.exists(base): os.mkdir(base)
os.chdir(base)
with open("%s.inp"%base,'w') as outf:
outf.write("\n".join([" ".join(l) for l in lines]))
pc = ["module load openmpi/1.4-gcc+ifort"]
qin = gen_qsub("~/bin/pw.x < %s.inp"%(base),
stdout="%s.out"%(base),
queue="physics",
name="%s/%s.out"%(cwd,base),
time="72:00:00",
nn=1,
prep_commands=pc)
print sp.check_output("qsub %s"%qin,shell=True)
os.chdir(cwd)
if len(sys.argv) < 2:
print "You need to enter a base file, dummy!"
exit(1)
else:
basefn = sys.argv[1]
baselines = []
with open(basefn,'r') as basef:
for line in basef:
baselines.append(line.split())
# Make base file for easy comparison.
with open("base.inp",'w') as outf:
outf.write("\n".join([" ".join(l) for l in baselines]))
cwd = os.getcwd()
changes = {
#"lambda":[0.01,0.02,0.03,0.1,0.2]
#"kpoint":[2,4,6,8,10],
#"ecutwfc":[50,60,70,80,90,100],
#"ecutrho":[400]
#"psuedo":[
# ("Si","Si.pbe-n-rrkjus_psl.0.1.UPF"),
# ("Si","Si.rel-pbe-n-rrkjus_psl.0.1.UPF"),
# ("Si","Si.pz-vbc.UPF")
"nqx":[1]
}
# Now's the part where you mess with something.
for key in changes.keys():
lines = deepcopy(baselines)
if key=="kpoint":
for newval in changes["kpoint"]:
lines[-1] = [str(newval),str(newval),str(newval),"0","0","0"]
base = "conv_%s_%s"%(key,newval)
submit_job(base,lines,cwd)
continue
if key=="psuedo":
start,end = 0,0
# Find where pseudos are chosen.
for li, line in enumerate(lines):
if "ATOMIC_SPECIES" in line:
start = li+1
for li, line in enumerate(lines):
if "ATOMIC_POSITIONS" in line:
end = li-1
# Replace for every species.
for atom,pot in changes[key]:
poss = []
for li in range(start,end):
if atom in lines[li][0]:
poss.append(li)
for pos in poss:
lines[pos][-1] = pot
base = "pseudo_%s"%pot
submit_job(base,lines,cwd)
if key=="ecutwfc":
ecutwfc,ecutrho=0,0
for li,line in enumerate(lines):
if "ecutwfc" in line: ecutwfc=li
if "ecutrho" in line: ecutrho=li
for newval in changes[key]:
lines[ecutwfc][-1] = str(newval)
lines[ecutrho][-1] = str(10*newval)
base = "conv_%s_%s"%(key,newval)
submit_job(base,lines,cwd)
continue # TODO: test new configuration.
if key=="nqx":
for newval in changes[key]:
for line in lines:
if any(["nqx" in word for word in line]):
line[-1] = str(newval)
base = "conv_%s_%s"%(key,newval)
submit_job(base,lines,cwd)
# Basic key replacement.
for line in lines:
if key in line:
for newval in changes[key]:
line[-1] = str(newval)
base = "conv_%s_%s"%(key,newval)
submit_job(base,lines,cwd)
| gpl-2.0 | -1,392,826,540,828,022,800 | 27.046729 | 67 | 0.591803 | false | 2.789033 | false | false | false |
Leopardob/dice-dev | dice/dice_extras/core_app.py | 1 | 4399 | # Standard Python modules
# =======================
import os
# Standard Python modules
# =======================
from PyQt5.QtCore import QObject, pyqtProperty, pyqtSignal, pyqtSlot, qDebug, QUrl, QAbstractListModel, QModelIndex, \
Qt, QVariant, QUrl
from PyQt5.QtQuick import QQuickItem
# DICE modules
# ============
from dice.app_helper.file_operations import FileOperations
class CoreApp(QObject, FileOperations):
def __init__(self, parent=None):
super(CoreApp, self).__init__(parent)
# all CoreApps are instantiated by the dice instance
self.dice = parent
# by default the image is in the images folder as lower-case svg and the qml file is the name itself
self.__image = os.path.join("images", self.name.lower()+".svg")
self.__page_location = self.name+".qml"
self.view = None # the QML item that is assigned to this CoreApp
def setParent(self, q_object):
super().setParent(q_object)
self.dice = q_object
def load(self):
pass
name_changed = pyqtSignal()
@pyqtProperty("QString", notify=name_changed)
def name(self):
return self.__class__.__name__
image_changed = pyqtSignal(name="imageChanged")
@pyqtProperty(QUrl, notify=image_changed)
def image(self):
# adjust the location as it is needed by the loader
return QUrl(os.path.join("../../../core_apps", self.name, "view", self.__image))
@image.setter
def image(self, image):
if self.__image != image:
self.__image = image
self.image_changed.emit()
page_location_changed = pyqtSignal(name="pageLocationChanged")
@property
def page_location(self):
# adjust the location as it is needed by the loader
return QUrl(os.path.join("../../../core_apps", self.name, "view", self.__page_location))
@page_location.setter
def page_location(self, page_location):
if self.__page_location != page_location:
self.__page_location = page_location
self.page_location_changed.emit()
pageLocation = pyqtProperty(QUrl, fget=page_location.fget, fset=page_location.fset, notify=page_location_changed)
@pyqtSlot(QQuickItem, name="setView")
def set_view(self, qml_item):
self.view = qml_item
completed = pyqtSignal() # this signal is sent from QML when the Component has finished loading
@staticmethod
def debug(msg):
qDebug(msg)
class CoreAppListModel(QAbstractListModel):
NameRole = Qt.UserRole + 1
ImageRole = Qt.UserRole + 2
PageLocationRole = Qt.UserRole + 3
CoreAppRole = Qt.UserRole + 4
_roles = {NameRole: "name", ImageRole: "image", PageLocationRole: "pageLocation", CoreAppRole: "coreApp"}
def __init__(self, parent=None):
super(CoreAppListModel, self).__init__(parent)
self.__core_apps = []
def add_core_app(self, core_app):
self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount())
self.__core_apps.append(core_app)
self.endInsertRows()
self.count_changed.emit()
def append(self, core_app):
self.add_core_app(core_app)
def rowCount(self, parent=QModelIndex()):
return len(self.__core_apps)
def data(self, index, role=Qt.DisplayRole):
try:
core_app = self.__core_apps[index.row()]
except IndexError:
return QVariant()
if role == self.NameRole:
return core_app.name
if role == self.ImageRole:
return QUrl(core_app.image)
if role == self.PageLocationRole:
return QUrl(core_app.page_location)
if role == self.CoreAppRole:
return core_app
return QVariant()
def roleNames(self):
return self._roles
count_changed = pyqtSignal(name="countChanged")
@pyqtProperty(int, notify=count_changed)
def count(self):
return len(self.__core_apps)
@pyqtSlot(int, result=CoreApp)
def get(self, index):
try:
return self.__core_apps[index]
except IndexError:
return CoreApp()
@pyqtSlot("QString", result=CoreApp, name="getByName")
def get_by_name(self, name):
for core_app in self.__core_apps:
if core_app.name == name:
return core_app
else:
return CoreApp()
| gpl-3.0 | 5,779,190,735,725,706,000 | 28.52349 | 118 | 0.616731 | false | 3.805363 | false | false | false |
huxianglin/pythonstudy | week07-胡湘林/FTP_server/public/commons.py | 1 | 1955 | #!/usr/bin/env python
# encoding:utf-8
# __author__: commons
# date: 2016/10/2 13:47
# blog: http://huxianglin.cnblogs.com/ http://xianglinhu.blog.51cto.com/
import hashlib
import time
import os
from conf import settings
def encrypt_passwd(passwd):
"""加密密码"""
sha256_obj=hashlib.sha256()
sha256_obj.update(passwd.encode("utf-8"))
return sha256_obj.hexdigest()
# print(encrypt_passwd("123456"))
def get_file_hash(filepath):
file_size=os.stat(filepath).st_size
read_size=0
md5_obj=hashlib.md5()
with open(filepath,"rb") as f:
while read_size != file_size:
data=f.read(1024)
md5_obj.update(data)
read_size+=len(data)
return md5_obj.hexdigest()
# filepath=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),"source","client_user.py")
# print(get_file_hash(filepath))
# with open(filepath,"rb") as f:
# f.seek(10)
# print(f.read(1024))
if isinstance(settings.PROGRESS_LINE_SIZE,int):
if 1 <= settings.PROGRESS_LINE_SIZE <= 100:
star_list=[i for i in range(1,settings.PROGRESS_LINE_SIZE+1)]
else:
raise Exception("参数设置错误...PROGRESS_LINE_SIZE范围在1-100")
else:
raise Exception("参数设置错误...PROGRESS_LINE_SIZE必须是整数")
def show_progress_line(send_que,totol_size):
"""进度条"""
send_size=send_que.get()
while send_size/totol_size<=1:
time.sleep(settings.PROGRESS_LINE_PERIOD)
percent=int(send_size * 100 / totol_size)
space_len=" "*int(settings.PROGRESS_LINE_SIZE-settings.PROGRESS_LINE_SIZE*(percent/100)+3)
message="\t".join((space_len.join(("".join(("="*int(percent/100*settings.PROGRESS_LINE_SIZE),">")),"{percent}%".format(percent=percent))),time.ctime()))
print(message,end="\n",flush=True)
if send_size==totol_size:
print("Translate is finish!")
break
send_size=send_que.get()
| gpl-3.0 | -5,946,956,106,803,415,000 | 32.946429 | 160 | 0.64545 | false | 2.984301 | false | false | false |
reminisce/mxnet | example/automatic-mixed-precision/amp_model_conversion.py | 1 | 6305 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import logging
import argparse
import mxnet as mx
from common import modelzoo
import gluoncv
from gluoncv.model_zoo import get_model
from mxnet.contrib.amp import amp
import numpy as np
def download_model(model_name, logger=None):
dir_path = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(dir_path, 'model')
if logger is not None:
logger.info('Downloading model {}... into path {}'.format(model_name, model_path))
return modelzoo.download_model(args.model, os.path.join(dir_path, 'model'))
def save_symbol(fname, sym, logger=None):
if logger is not None:
logger.info('Saving symbol into file at {}'.format(fname))
sym.save(fname, remove_amp_cast=False)
def save_params(fname, arg_params, aux_params, logger=None):
if logger is not None:
logger.info('Saving params into file at {}'.format(fname))
save_dict = {('arg:%s' % k): v.as_in_context(mx.cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k): v.as_in_context(mx.cpu()) for k, v in aux_params.items()})
mx.nd.save(fname, save_dict)
if __name__ == '__main__':
symbolic_models = ['imagenet1k-resnet-152',
'imagenet1k-resnet-18',
'imagenet1k-resnet-34',
'imagenet1k-resnet-50',
'imagenet1k-resnet-101',
'imagenet1k-resnext-50',
'imagenet1k-resnext-101',
'imagenet1k-resnext-101-64x4d',
'imagenet11k-place365ch-resnet-152',
'imagenet11k-place365ch-resnet-50']
gluon_models = ['resnet18_v1',
'resnet50_v1',
'resnet101_v1',
'squeezenet1.0',
'mobilenet1.0',
'mobilenetv2_1.0',
'inceptionv3']
models = symbolic_models + gluon_models
parser = argparse.ArgumentParser(description='Convert a provided FP32 model to a mixed precision model')
parser.add_argument('--model', type=str, choices=models)
parser.add_argument('--run-dummy-inference', action='store_true', default=False,
help='Will generate random input of shape (1, 3, 224, 224) '
'and run a dummy inference forward pass')
parser.add_argument('--use-gluon-model', action='store_true', default=False,
help='If enabled, will download pretrained model from Gluon-CV '
'and convert to mixed precision model ')
parser.add_argument('--cast-optional-params', action='store_true', default=False,
help='If enabled, will try to cast params to target dtype wherever possible')
args = parser.parse_args()
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
if not args.use_gluon_model:
assert args.model in symbolic_models, "Please choose one of the available symbolic models: {} \
If you want to use gluon use the script with --use-gluon-model".format(symbolic_models)
prefix, epoch = download_model(model_name=args.model, logger=logger)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
result_sym, result_arg_params, result_aux_params = amp.convert_model(sym, arg_params, aux_params,
cast_optional_params=args.cast_optional_params)
sym_name = "%s-amp-symbol.json" % (prefix)
save_symbol(sym_name, result_sym, logger)
param_name = '%s-%04d.params' % (prefix + '-amp', epoch)
save_params(param_name, result_arg_params, result_aux_params, logger)
if args.run_dummy_inference:
logger.info("Running inference on the mixed precision model with dummy input, batch size: 1")
mod = mx.mod.Module(result_sym, data_names=['data'], label_names=['softmax_label'], context=mx.gpu(0))
mod.bind(data_shapes=[['data', (1, 3, 224, 224)]], label_shapes=[['softmax_label', (1,)]])
mod.set_params(arg_params, aux_params)
mod.forward(mx.io.DataBatch(data=[mx.nd.ones((1, 3, 224, 224))],
label=[mx.nd.ones((1,))]))
result = mod.get_outputs()[0].asnumpy()
logger.info("Inference run successfully")
else:
assert args.model in gluon_models, "Please choose one of the available gluon models: {} \
If you want to use symbolic model instead, remove --use-gluon-model when running the script".format(gluon_models)
net = gluoncv.model_zoo.get_model(args.model, pretrained=True)
net.hybridize()
result_before1 = net.forward(mx.nd.zeros((1, 3, 224, 224)))
net.export("{}".format(args.model))
net = amp.convert_hybrid_block(net, cast_optional_params=args.cast_optional_params)
net.export("{}-amp".format(args.model), remove_amp_cast=False)
if args.run_dummy_inference:
logger.info("Running inference on the mixed precision model with dummy inputs, batch size: 1")
result_after = net.forward(mx.nd.zeros((1, 3, 224, 224), dtype=np.float32, ctx=mx.gpu(0)))
result_after = net.forward(mx.nd.zeros((1, 3, 224, 224), dtype=np.float32, ctx=mx.gpu(0)))
logger.info("Inference run successfully")
| apache-2.0 | 1,404,398,644,587,556,400 | 51.983193 | 157 | 0.617288 | false | 3.793622 | false | false | false |
ceos-seo/data_cube_ui | apps/fractional_cover/models.py | 1 | 9473 | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.db import models
from apps.dc_algorithm.models import Area, Compositor, Satellite
from apps.dc_algorithm.models import (Query as BaseQuery, Metadata as BaseMetadata, Result as BaseResult, ResultType as
BaseResultType, UserHistory as BaseUserHistory, AnimationType as
BaseAnimationType, ToolInfo as BaseToolInfo)
from utils.data_cube_utilities.dc_mosaic import (create_mosaic, create_median_mosaic, create_max_ndvi_mosaic,
create_min_ndvi_mosaic)
import numpy as np
class UserHistory(BaseUserHistory):
"""
Extends the base user history adding additional fields
See the dc_algorithm.UserHistory docstring for more information
"""
pass
class ToolInfo(BaseToolInfo):
"""
Extends the base ToolInfo adding additional fields
See the dc_algorithm.ToolInfo docstring for more information
"""
pass
class Query(BaseQuery):
"""
Extends base query, adds app specific elements. See the dc_algorithm.Query docstring for more information
Defines the get_or_create_query_from_post as required, adds new fields, recreates the unique together
field, and resets the abstract property. Functions are added to get human readable names for various properties,
foreign keys should define __str__ for a human readable name.
"""
compositor = models.ForeignKey(Compositor)
base_result_dir = '/datacube/ui_results/fractional_cover'
class Meta(BaseQuery.Meta):
unique_together = (('satellite', 'area_id', 'time_start', 'time_end', 'latitude_max', 'latitude_min',
'longitude_max', 'longitude_min', 'title', 'description', 'compositor'))
abstract = True
def get_fields_with_labels(self, labels, field_names):
for idx, label in enumerate(labels):
yield [label, getattr(self, field_names[idx])]
def get_chunk_size(self):
"""Implements get_chunk_size as required by the base class
See the base query class docstring for more information.
"""
if not self.compositor.is_iterative():
return {'time': None, 'geographic': 0.05}
return {'time': 50, 'geographic': 0.1}
def get_iterative(self):
"""implements get_iterative as required by the base class
See the base query class docstring for more information.
"""
return self.compositor.id != "median_pixel"
def get_reverse_time(self):
"""implements get_reverse_time as required by the base class
See the base query class docstring for more information.
"""
return self.compositor.id == "most_recent"
def get_processing_method(self):
"""implements get_processing_method as required by the base class
See the base query class docstring for more information.
"""
processing_methods = {
'most_recent': create_mosaic,
'least_recent': create_mosaic,
'max_ndvi': create_max_ndvi_mosaic,
'min_ndvi': create_min_ndvi_mosaic,
'median_pixel': create_median_mosaic
}
return processing_methods.get(self.compositor.id, create_mosaic)
@classmethod
def get_or_create_query_from_post(cls, form_data, pixel_drill=False):
"""Implements the get_or_create_query_from_post func required by base class
See the get_or_create_query_from_post docstring for more information.
Parses out the time start/end, creates the product, and formats the title/description
Args:
form_data: python dict containing either a single obj or a list formatted with post_data_to_dict
Returns:
Tuple containing the query model and a boolean value signifying if it was created or loaded.
"""
query_data = form_data
query_data['title'] = "Fractional Cover Query" if 'title' not in form_data or form_data[
'title'] == '' else form_data['title']
query_data['description'] = "None" if 'description' not in form_data or form_data[
'description'] == '' else form_data['description']
valid_query_fields = [field.name for field in cls._meta.get_fields()]
query_data = {key: query_data[key] for key in valid_query_fields if key in query_data}
try:
query = cls.objects.get(pixel_drill_task=pixel_drill, **query_data)
return query, False
except cls.DoesNotExist:
query = cls(pixel_drill_task=pixel_drill, **query_data)
query.save()
return query, True
class Metadata(BaseMetadata):
"""
Extends base metadata, adding additional fields and adding abstract=True.
zipped_metadata_fields is required.
See the dc_algorithm.Metadata docstring for more information
"""
satellite_list = models.CharField(max_length=100000, default="")
zipped_metadata_fields = [
'acquisition_list', 'clean_pixels_per_acquisition', 'clean_pixel_percentages_per_acquisition', 'satellite_list'
]
class Meta(BaseMetadata.Meta):
abstract = True
def metadata_from_dataset(self, metadata, dataset, clear_mask, parameters):
"""implements metadata_from_dataset as required by the base class
See the base metadata class docstring for more information.
"""
for metadata_index, time in enumerate(dataset.time.values.astype('M8[ms]').tolist()):
clean_pixels = np.sum(clear_mask[metadata_index, :, :] == True)
if time not in metadata:
metadata[time] = {}
metadata[time]['clean_pixels'] = 0
metadata[time]['satellite'] = parameters['platforms'][np.unique(
dataset.satellite.isel(time=metadata_index).values)[0]] if np.unique(
dataset.satellite.isel(time=metadata_index).values)[0] > -1 else "NODATA"
metadata[time]['clean_pixels'] += clean_pixels
return metadata
def combine_metadata(self, old, new):
"""implements combine_metadata as required by the base class
See the base metadata class docstring for more information.
"""
for key in new:
if key in old:
old[key]['clean_pixels'] += new[key]['clean_pixels']
continue
old[key] = new[key]
return old
def final_metadata_from_dataset(self, dataset):
"""implements final_metadata_from_dataset as required by the base class
See the base metadata class docstring for more information.
"""
self.pixel_count = len(dataset.latitude) * len(dataset.longitude)
self.clean_pixel_count = np.sum(dataset[list(dataset.data_vars)[0]].values != -9999)
self.percentage_clean_pixels = (self.clean_pixel_count / self.pixel_count) * 100
self.save()
def metadata_from_dict(self, metadata_dict):
"""implements metadata_from_dict as required by the base class
See the base metadata class docstring for more information.
"""
dates = list(metadata_dict.keys())
dates.sort(reverse=True)
self.total_scenes = len(dates)
self.scenes_processed = len(dates)
self.acquisition_list = ",".join([date.strftime("%m/%d/%Y") for date in dates])
self.satellite_list = ",".join([metadata_dict[date]['satellite'] for date in dates])
self.clean_pixels_per_acquisition = ",".join([str(metadata_dict[date]['clean_pixels']) for date in dates])
self.clean_pixel_percentages_per_acquisition = ",".join(
[str((metadata_dict[date]['clean_pixels'] * 100) / self.pixel_count) for date in dates])
self.save()
class Result(BaseResult):
"""
Extends base result, adding additional fields and adding abstract=True
See the dc_algorithm.Result docstring for more information
"""
# result path + other data. More to come.
mosaic_path = models.CharField(max_length=250, default="")
plot_path = models.CharField(max_length=250, default="")
data_path = models.CharField(max_length=250, default="")
data_netcdf_path = models.CharField(max_length=250, default="")
class Meta(BaseResult.Meta):
abstract = True
class FractionalCoverTask(Query, Metadata, Result):
"""
Combines the Query, Metadata, and Result abstract models
"""
pass | apache-2.0 | 7,047,605,444,043,234,000 | 40.371179 | 119 | 0.656392 | false | 4.102642 | false | false | false |
mcdaniel67/sympy | sympy/sets/fancysets.py | 9 | 27393 | from __future__ import print_function, division
from sympy.logic.boolalg import And
from sympy.core import oo
from sympy.core.basic import Basic
from sympy.core.compatibility import as_int, with_metaclass, range
from sympy.sets.sets import (Set, Interval, Intersection, EmptySet, Union,
FiniteSet)
from sympy.core.singleton import Singleton, S
from sympy.core.sympify import _sympify
from sympy.core.decorators import deprecated
from sympy.core.function import Lambda
class Naturals(with_metaclass(Singleton, Set)):
"""
Represents the natural numbers (or counting numbers) which are all
positive integers starting from 1. This set is also available as
the Singleton, S.Naturals.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Naturals)
>>> next(iterable)
1
>>> next(iterable)
2
>>> next(iterable)
3
>>> pprint(S.Naturals.intersect(Interval(0, 10)))
{1, 2, ..., 10}
See Also
========
Naturals0 : non-negative integers (i.e. includes 0, too)
Integers : also includes negative integers
"""
is_iterable = True
_inf = S.One
_sup = S.Infinity
def _intersect(self, other):
if other.is_Interval:
return Intersection(
S.Integers, other, Interval(self._inf, S.Infinity))
return None
def _contains(self, other):
if other.is_positive and other.is_integer:
return S.true
elif other.is_integer is False or other.is_positive is False:
return S.false
def __iter__(self):
i = self._inf
while True:
yield i
i = i + 1
@property
def _boundary(self):
return self
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
inclusive of zero.
See Also
========
Naturals : positive integers; does not include 0
Integers : also includes the negative integers
"""
_inf = S.Zero
def _contains(self, other):
if other.is_integer and other.is_nonnegative:
return S.true
elif other.is_integer is False or other.is_nonnegative is False:
return S.false
class Integers(with_metaclass(Singleton, Set)):
"""
Represents all integers: positive, negative and zero. This set is also
available as the Singleton, S.Integers.
Examples
========
>>> from sympy import S, Interval, pprint
>>> 5 in S.Naturals
True
>>> iterable = iter(S.Integers)
>>> next(iterable)
0
>>> next(iterable)
1
>>> next(iterable)
-1
>>> next(iterable)
2
>>> pprint(S.Integers.intersect(Interval(-4, 4)))
{-4, -3, ..., 4}
See Also
========
Naturals0 : non-negative integers
Integers : positive and negative integers and zero
"""
is_iterable = True
def _intersect(self, other):
from sympy.functions.elementary.integers import floor, ceiling
if other is Interval(S.NegativeInfinity, S.Infinity) or other is S.Reals:
return self
elif other.is_Interval:
s = Range(ceiling(other.left), floor(other.right) + 1)
return s.intersect(other) # take out endpoints if open interval
return None
def _contains(self, other):
if other.is_integer:
return S.true
elif other.is_integer is False:
return S.false
def __iter__(self):
yield S.Zero
i = S(1)
while True:
yield i
yield -i
i = i + 1
@property
def _inf(self):
return -S.Infinity
@property
def _sup(self):
return S.Infinity
@property
def _boundary(self):
return self
def _eval_imageset(self, f):
from sympy import Wild
expr = f.expr
if len(f.variables) > 1:
return
n = f.variables[0]
a = Wild('a')
b = Wild('b')
match = expr.match(a*n + b)
if match[a].is_negative:
expr = -expr
match = expr.match(a*n + b)
if match[a] is S.One and match[b].is_integer:
expr = expr - match[b]
return ImageSet(Lambda(n, expr), S.Integers)
class Reals(with_metaclass(Singleton, Interval)):
def __new__(cls):
return Interval.__new__(cls, -S.Infinity, S.Infinity)
def __eq__(self, other):
return other == Interval(-S.Infinity, S.Infinity)
def __hash__(self):
return hash(Interval(-S.Infinity, S.Infinity))
class ImageSet(Set):
"""
Image of a set under a mathematical function
Examples
========
>>> from sympy import Symbol, S, ImageSet, FiniteSet, Lambda
>>> x = Symbol('x')
>>> N = S.Naturals
>>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N}
>>> 4 in squares
True
>>> 5 in squares
False
>>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares)
{1, 4, 9}
>>> square_iterable = iter(squares)
>>> for i in range(4):
... next(square_iterable)
1
4
9
16
"""
def __new__(cls, lamda, base_set):
return Basic.__new__(cls, lamda, base_set)
lamda = property(lambda self: self.args[0])
base_set = property(lambda self: self.args[1])
def __iter__(self):
already_seen = set()
for i in self.base_set:
val = self.lamda(i)
if val in already_seen:
continue
else:
already_seen.add(val)
yield val
def _is_multivariate(self):
return len(self.lamda.variables) > 1
def _contains(self, other):
from sympy.solvers.solveset import solveset, linsolve
L = self.lamda
if self._is_multivariate():
solns = list(linsolve([expr - val for val, expr in zip(other, L.expr)],
L.variables).args[0])
else:
solns = list(solveset(L.expr - other, L.variables[0]))
for soln in solns:
try:
if soln in self.base_set:
return S.true
except TypeError:
return self.base_set.contains(soln.evalf())
return S.false
@property
def is_iterable(self):
return self.base_set.is_iterable
def _intersect(self, other):
from sympy import Dummy
from sympy.solvers.diophantine import diophantine
from sympy.sets.sets import imageset
if self.base_set is S.Integers:
if isinstance(other, ImageSet) and other.base_set is S.Integers:
f, g = self.lamda.expr, other.lamda.expr
n, m = self.lamda.variables[0], other.lamda.variables[0]
# Diophantine sorts the solutions according to the alphabetic
# order of the variable names, since the result should not depend
# on the variable name, they are replaced by the dummy variables
# below
a, b = Dummy('a'), Dummy('b')
f, g = f.subs(n, a), g.subs(m, b)
solns_set = diophantine(f - g)
if solns_set == set():
return EmptySet()
solns = list(diophantine(f - g))
if len(solns) == 1:
t = list(solns[0][0].free_symbols)[0]
else:
return None
# since 'a' < 'b'
return imageset(Lambda(t, f.subs(a, solns[0][0])), S.Integers)
if other == S.Reals:
from sympy.solvers.solveset import solveset_real
from sympy.core.function import expand_complex
if len(self.lamda.variables) > 1:
return None
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
return imageset(Lambda(n_, re),
self.base_set.intersect(
solveset_real(im, n_)))
@deprecated(useinstead="ImageSet", issue=7057, deprecated_since_version="0.7.4")
def TransformationSet(*args, **kwargs):
"""Deprecated alias for the ImageSet constructor."""
return ImageSet(*args, **kwargs)
class Range(Set):
"""
Represents a range of integers.
Examples
========
>>> from sympy import Range
>>> list(Range(5)) # 0 to 5
[0, 1, 2, 3, 4]
>>> list(Range(10, 15)) # 10 to 15
[10, 11, 12, 13, 14]
>>> list(Range(10, 20, 2)) # 10 to 20 in steps of 2
[10, 12, 14, 16, 18]
>>> list(Range(20, 10, -2)) # 20 to 10 backward in steps of 2
[12, 14, 16, 18, 20]
"""
is_iterable = True
def __new__(cls, *args):
from sympy.functions.elementary.integers import ceiling
# expand range
slc = slice(*args)
start, stop, step = slc.start or 0, slc.stop, slc.step or 1
try:
start, stop, step = [w if w in [S.NegativeInfinity, S.Infinity] else S(as_int(w))
for w in (start, stop, step)]
except ValueError:
raise ValueError("Inputs to Range must be Integer Valued\n" +
"Use ImageSets of Ranges for other cases")
if not step.is_finite:
raise ValueError("Infinite step is not allowed")
if start == stop:
return S.EmptySet
n = ceiling((stop - start)/step)
if n <= 0:
return S.EmptySet
# normalize args: regardless of how they are entered they will show
# canonically as Range(inf, sup, step) with step > 0
if n.is_finite:
start, stop = sorted((start, start + (n - 1)*step))
else:
start, stop = sorted((start, stop - step))
step = abs(step)
if (start, stop) == (S.NegativeInfinity, S.Infinity):
raise ValueError("Both the start and end value of "
"Range cannot be unbounded")
else:
return Basic.__new__(cls, start, stop + step, step)
start = property(lambda self: self.args[0])
stop = property(lambda self: self.args[1])
step = property(lambda self: self.args[2])
def _intersect(self, other):
from sympy.functions.elementary.integers import floor, ceiling
from sympy.functions.elementary.miscellaneous import Min, Max
if other.is_Interval:
osup = other.sup
oinf = other.inf
# if other is [0, 10) we can only go up to 9
if osup.is_integer and other.right_open:
osup -= 1
if oinf.is_integer and other.left_open:
oinf += 1
# Take the most restrictive of the bounds set by the two sets
# round inwards
inf = ceiling(Max(self.inf, oinf))
sup = floor(Min(self.sup, osup))
# if we are off the sequence, get back on
if inf.is_finite and self.inf.is_finite:
off = (inf - self.inf) % self.step
else:
off = S.Zero
if off:
inf += self.step - off
return Range(inf, sup + 1, self.step)
if other == S.Naturals:
return self._intersect(Interval(1, S.Infinity))
if other == S.Integers:
return self
return None
def _contains(self, other):
if (((self.start - other)/self.step).is_integer or
((self.stop - other)/self.step).is_integer):
return _sympify(other >= self.inf and other <= self.sup)
elif (((self.start - other)/self.step).is_integer is False and
((self.stop - other)/self.step).is_integer is False):
return S.false
def __iter__(self):
if self.start is S.NegativeInfinity:
i = self.stop - self.step
step = -self.step
else:
i = self.start
step = self.step
while(i < self.stop and i >= self.start):
yield i
i += step
def __len__(self):
return (self.stop - self.start)//self.step
def __nonzero__(self):
return True
__bool__ = __nonzero__
def _ith_element(self, i):
return self.start + i*self.step
@property
def _last_element(self):
if self.stop is S.Infinity:
return S.Infinity
elif self.start is S.NegativeInfinity:
return self.stop - self.step
else:
return self._ith_element(len(self) - 1)
@property
def _inf(self):
return self.start
@property
def _sup(self):
return self.stop - self.step
@property
def _boundary(self):
return self
def normalize_theta_set(theta):
"""
Normalize a Real Set theta in the Interval [0, 2*pi). It currently
supports Interval and FiniteSet. It Returns a the normalized value
of theta in the Set. For Interval, a maximum of one cycle [0, 2*pi],
is returned i.e. for theta equal to [0, 10*pi], returned normalized
value would be [0, 2*pi). As of now it supports theta as FiniteSet
and Interval.
Raises
======
NotImplementedError
The algorithms for Normalizing theta Set are not yet
implemented.
ValueError
The input is not valid, i.e. the input is not a real set.
RuntimeError
It is a bug, please report to the github issue tracker.
Examples
========
>>> from sympy.sets.fancysets import normalize_theta_set
>>> from sympy import Interval, FiniteSet, pi
>>> normalize_theta_set(Interval(9*pi/2, 5*pi))
[pi/2, pi]
>>> normalize_theta_set(Interval(-3*pi/2, pi/2))
[0, 2*pi)
>>> normalize_theta_set(Interval(-pi/2, pi/2))
[0, pi/2] U [3*pi/2, 2*pi)
>>> normalize_theta_set(Interval(-4*pi, 3*pi))
[0, 2*pi)
>>> normalize_theta_set(Interval(-3*pi/2, -pi/2))
[pi/2, 3*pi/2]
>>> normalize_theta_set(FiniteSet(0, pi, 3*pi))
{0, pi}
"""
from sympy.functions.elementary.trigonometric import _pi_coeff as coeff
from sympy.functions.elementary.complexes import Abs
if theta.is_Interval:
# one complete circle
if Abs(theta.args[0] - theta.args[1]) >= 2*S.Pi:
return Interval(0, 2*S.Pi, False, True)
new_theta = []
for val in [theta.args[0], theta.args[1]]:
k = coeff(val)
if (not k) and (k != S.Zero):
raise NotImplementedError('Normalizing theta without pi as'
'coefficient, is not Implemented.')
elif k == S.Zero:
if val == S.Zero:
new_theta.append(S.Zero)
else:
# when theta is n*pi
new_theta.append(2*S.Pi)
else:
new_theta.append(k*S.Pi)
# for negative theta
if new_theta[0] > new_theta[1]:
return Union(Interval(S(0), new_theta[1]),
Interval(new_theta[0], 2*S.Pi, False, True))
else:
return Interval(*new_theta)
elif theta.is_FiniteSet:
new_theta = []
for element in theta:
k = coeff(element)
if (not k) and (k != S.Zero):
raise NotImplementedError('Normalizing theta without pi as'
'coefficient, is not Implemented.')
elif k == S.Zero:
if element == S.Zero:
new_theta.append(S.Zero)
else:
new_theta.append(k*S.Pi)
return FiniteSet(*new_theta)
elif theta.is_subset(S.Reals):
raise NotImplementedError("Normalizing theta when, its %s is not"
"Implemented" % type(theta))
else:
raise ValueError(" %s is not a real set" % (theta))
class ComplexRegion(Set):
"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of r and theta, & use the flag polar=True.
Z = {z in C | z = r*[cos(theta) + I*sin(theta)], r in [r], theta in [theta]}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y the of the Complex numbers in a Plane.
Default input type is in rectangular form.
Z = {z in C | z = x + I*y, x in [Re(z)], y in [Im(z)]}
Examples
========
>>> from sympy.sets.fancysets import ComplexRegion
>>> from sympy.sets import Interval
>>> from sympy import S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c = Interval(1, 8)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
ComplexRegion(Lambda((_x, _y), _x + _y*I), [2, 3] x [4, 6])
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
ComplexRegion(Lambda((_x, _y), _x + _y*I),
[2, 3] x [4, 6] U [4, 6] x [1, 8])
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))),
[0, 1] x [0, 2*pi))
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))), [0, 1] x [0, pi])
>>> intersection == upper_half_unit_disk
True
See Also
========
Reals
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
from sympy import symbols, Dummy
x, y, r, theta = symbols('x, y, r, theta', cls=Dummy)
I = S.ImaginaryUnit
# Rectangular Form
if polar is False:
if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2):
# ** ProductSet of FiniteSets in the Complex Plane. **
# For Cases like ComplexRegion({2, 4}*{3}), It
# would return {2 + 3*I, 4 + 3*I}
complex_num = []
for x in sets.args[0]:
for y in sets.args[1]:
complex_num.append(x + I*y)
obj = FiniteSet(*complex_num)
else:
obj = ImageSet.__new__(cls, Lambda((x, y), x + I*y), sets)
# Polar Form
elif polar is True:
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
from sympy.sets import ProductSet
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
from sympy import cos, sin
obj = ImageSet.__new__(cls, Lambda((r, theta),
r*(cos(theta) + I*sin(theta))),
sets)
return obj
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
[2, 3] x [4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
[2, 3] x [4, 5] U [4, 5] x [1, 7]
"""
return self.args[1]
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
([2, 3] x [4, 5],)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
([2, 3] x [4, 5], [4, 5] x [1, 7])
"""
if self.args[1].is_ProductSet:
psets = ()
psets = psets + (self.args[1], )
else:
psets = self.args[1].args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
[2, 3]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
[2, 3] U [4, 5]
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
[4, 5]
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
[1, 7]
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def polar(self):
"""
Returns True if self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union, S
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> theta = Interval(0, 2*S.Pi)
>>> C1 = ComplexRegion(a*b)
>>> C1.polar
False
>>> C2 = ComplexRegion(a*theta, polar=True)
>>> C2.polar
True
"""
return self.args[0].args[1].is_Mul
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
def _contains(self, other):
from sympy.functions import arg, Abs
# self in rectangular form
if not self.polar:
re, im = other.as_real_imag()
for element in self.psets:
if And(element.args[0]._contains(re),
element.args[1]._contains(im)):
return True
return False
# self in polar form
elif self.polar:
if S(other).is_zero:
r, theta = S(0), S(0)
else:
r, theta = Abs(other), arg(other)
for element in self.psets:
if And(element.args[0]._contains(r),
element.args[1]._contains(theta)):
return True
return False
def _intersect(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Intersection(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
r1, theta1 = self.a_interval, self.b_interval
r2, theta2 = other.a_interval, other.b_interval
new_r_interval = Intersection(r1, r2)
new_theta_interval = Intersection(theta1, theta2)
# 0 and 2*Pi means the same
if ((2*S.Pi in theta1 and S(0) in theta2) or
(2*S.Pi in theta2 and S(0) in theta1)):
new_theta_interval = Union(new_theta_interval,
FiniteSet(0))
return ComplexRegion(new_r_interval*new_theta_interval,
polar=True)
if other is S.Reals:
return other
if other.is_subset(S.Reals):
new_interval = []
# self in rectangular form
if not self.polar:
for element in self.psets:
if S.Zero in element.args[0]:
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
# self in polar form
elif self.polar:
for element in self.psets:
if (0 in element.args[1]) or (S.Pi in element.args[1]):
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
def _union(self, other):
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Union(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
return ComplexRegion(Union(self.sets, other.sets), polar=True)
if other.is_subset(S.Reals):
return self
return None
class Complexes(with_metaclass(Singleton, ComplexRegion)):
def __new__(cls):
return ComplexRegion.__new__(cls, S.Reals*S.Reals)
def __eq__(self, other):
if other == ComplexRegion(S.Reals*S.Reals):
return True
def __hash__(self):
return hash(ComplexRegion(S.Reals*S.Reals))
| bsd-3-clause | -1,737,921,225,718,698,200 | 28.710412 | 93 | 0.527178 | false | 3.807227 | false | false | false |
mfherbst/spack | var/spack/repos/builtin/packages/r-alsace/package.py | 2 | 2034 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAlsace(RPackage):
"""Alternating Least Squares (or Multivariate Curve Resolution)
for analytical chemical data, in particular hyphenated data where
the first direction is a retention time axis, and the second a
spectral axis. Package builds on the basic als function from the
ALS package and adds functionality for high-throughput analysis,
including definition of time windows, clustering of profiles,
retention time correction, etcetera."""
homepage = "https://www.bioconductor.org/packages/alsace/"
git = "https://git.bioconductor.org/packages/alsace.git"
version('1.12.0', commit='1364c65bbff05786d05c02799fd44fd57748fae3')
depends_on('r-als', type=('build', 'run'))
depends_on('r-ptw', type=('build', 'run'))
| lgpl-2.1 | 4,785,196,162,717,225,000 | 46.302326 | 78 | 0.692724 | false | 4.051793 | false | false | false |
detly/coan-packaging | python/coanlib.py | 1 | 12012 | #!/usr/bin/python
# Module coanlib provides utility routines for the coan test harnesses
import sys, os, subprocess, re, shutil, string, shlex, time, atexit, \
errno
__progress = 0
__info = 1
__warning = 2
__error = 3
__fatal = 4
__severities_by_word = {
'progress' : __progress,
'info' : __info,
'warning' : __warning,
'error' : __error,
'fatal' : __fatal
}
__severities_by_num = {
__progress : 'progress',
__info : 'info',
__warning : 'warning',
__error : 'error',
__fatal : 'fatal'
}
__prog = '<unknown program>'
__verbosity = __progress
__time_file = None
__test_size_file = None
def get_prog():
''' Get the current program name '''
return __prog
def set_prog(prog):
''' Set the current program name '''
global __prog
__prog = prog
def get_verbosity():
''' Get the current verbosity level by keyword '''
return __severities_by_num[__verbosity]
def set_verbosity(verbosity_keyword):
'''
Set the verbosity level by keyword.
Messages with a lower verbosity level will be suppressed
'''
global __verbosity
__validate_verbosity(verbosity_keyword)
__verbosity = __severities_by_word[verbosity_keyword]
def progress(msg):
''' Issue a progress message '''
__report(__progress,msg)
def info(msg):
''' Issue an informational message '''
__report(__info,msg)
def warn(msg):
''' Issue a warning message '''
__report(__warning,msg)
def error(msg):
''' Issue an error message '''
__report(__error,msg)
def fatal(msg):
''' Issue a fatal error message '''
__report(__fatal,msg)
def finis(failures):
''' End a test according to the number of failures'''
if (failures):
file_del(__get_time_file())
file_del(__get_test_size_file())
sys.exit(failures)
def bail(msg, exitcode = 1):
''' Optionally issue a fatal error message and exit with a given system code'''
fatal(msg)
file_del(__get_time_file())
file_del(__get_test_size_file())
sys.exit(exitcode)
def __get_time_file():
''' Set the name of the test timing file if unset'''
global __time_file
if not __time_file:
prog = get_prog()
pkgdir = deduce_pkgdir()
__time_file = os.path.join(pkgdir,'test_coan',prog +'.time.txt') \
if prog != '<unknown program>' else None
return __time_file
def __get_test_size_file():
''' Set the name of the test size file if unset'''
global __test_size_file
if not __test_size_file:
prog = get_prog()
pkgdir = deduce_pkgdir()
__test_size_file = os.path.join(pkgdir,'test_coan',prog +'.size.txt') \
if prog != '<unknown program>' else None
return __test_size_file
def __compute_runtime(time_file):
''' Parse the coan timing file and add the entries to
compute the total runtime of the process(es)
recorded there '''
lines = slurp_lines(time_file)
seconds = 0.0
for line in lines:
try:
seconds = seconds + float(line)
except:
pass
return seconds
def __compute_test_size(size_file):
''' Parse the coan test size file and add the entries to
compute the total size of the tests(es)
recorded there '''
lines = slurp_lines(size_file)
test_files = 0
for line in lines:
try:
test_files = test_files + int(line)
except:
pass
return test_files
def __report(severity,msg):
''' Issue a message with a given severity '''
if severity >= __verbosity:
severity_keyword = __severities_by_num[severity]
outstr = __prog + ": " + severity_keyword + ": " + \
msg + '\n'
if severity < __warning:
sys.stdout.write(outstr)
else:
sys.stderr.write(outstr)
def __validate_verbosity(keyword):
''' Validate a verbosity level keyword '''
if keyword not in __severities_by_word:
bail("Unknown severity keyword: \"" + keyword + "\"")
def windows():
''' Say whether the host OS is Windows '''
return os.name == 'nt'
def fopen(file,mode):
if mode != 'r' and mode != 'w' and mode != 'a':
bail('*** Unknown file open mode\'' + mode + '\' ***')
try:
return open(file,mode)
except IOError as error:
modestr = 'reading' if mode == 'r' else 'writing'
bail('*** Cannot open file \"' + file + '\"' + " for " + \
modestr + ': ' + error.strerror + ' ***')
def make_path(path):
'''
Try to create the directory specified by a path,
bailing on failiure
'''
try:
os.makedirs(path)
except OSError as error:
bail('*** Failed to create directory \"' + path + '\": ' + \
error.strerror)
def del_tree(rootdir):
''' Try to delete a directory tree, if it exists,
bailing on failure '''
try:
if os.path.isdir(rootdir):
shutil.rmtree(rootdir)
except OSError as error:
bail('*** Failed to delete directory \"' + rootdir + '\": ' +
error.strerror)
def file_copy(src,dest):
''' Try to copy a file to another, bailing on failure '''
try:
shutil.copyfile(src,dest)
except IOError:
bail('*** Failed to copy file \"' +\
src + '\" as \"' + dest + '\"')
def file_copy_to(src,dest):
''' Try to copy a file, bailing on failure '''
try:
shutil.copy(src,dest)
except IOError:
bail('*** Failed to copy file \"' +\
src + '\" -> \"' + dest + '\"')
def file_del(filename):
''' Try to delete a file if it exists, bailing on failure '''
try:
os.remove(filename)
except OSError, e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
bail('*** Failed to delete file \"' + filename + '\"')
def slurp_command(cmd, with_stderr = False):
''' Return the output of a command as a string '''
words = cmd.split()
output = subprocess.check_output(words) if not with_stderr \
else subprocess.check_output(words, stderr=subprocess.STDOUT)
return output
def slurp_file(file):
''' Return the contents of as a string '''
fh = fopen(file,'r')
data = fh.read()
fh.close()
return data
def slurp_lines(file):
''' Return the contents of as a list of lines '''
fh = fopen(file,'r')
lines = fh.readlines()
fh.close()
return lines
def __timing_metrics_enabled():
return os.getenv('TIMING_METRICS') == '1'
def __do_timing_metrics():
if not __timing_metrics_enabled():
return False
time_version = slurp_command('/usr/bin/time --version', True)
return time_version.find('GNU') != -1
DO_TIMING_METRICS = __do_timing_metrics()
def run(cmd,
stdout_file = None,stderr_file = None,stdin_file = None, timing = DO_TIMING_METRICS):
'''
Run a command optionally specifying
files to capture stdout and stderr and whether timing is required
Return the exit code of the command.
'''
time_file = __get_time_file() if timing else None
if windows():
cmd = re.sub(r'\\',r'\\\\',cmd)
elif time_file:
cmd = "/usr/bin/time -f \"%e\" --quiet -a -o " \
+ time_file + ' ' + cmd
args = []
try:
args = shlex.split(cmd)
except ValueError:
args = cmd.split()
stdout_fh = None
stderr_fh = None
stdin_fh = None
if stdout_file:
stdout_fh = fopen(stdout_file,'w')
if stderr_file:
stderr_fh = fopen(stderr_file,'w')
if stdin_file:
stdin_fh = fopen(stdin_file,'r')
stdin_fh = fopen(stdin_file,'r')
progress('*** Running: ' + cmd)
syscode = subprocess.call(args,
stdout=stdout_fh,stderr=stderr_fh,stdin=stdin_fh)
if stdout_fh:
stdout_fh.close()
if stderr_fh:
stderr_fh.close()
if stdin_fh:
stdin_fh.close()
return syscode
def run_noerr(cmd):
retcode = run(cmd)
if retcode:
bail('*** Command failed: \"' + cmd + '\": ' + \
os.strerror(retcode))
def is_exe(path):
''' Say whether a path is an executable file '''
return os.path.isfile(path) and os.access(path, os.X_OK)
def deduce_pkgdir(args = {}):
''' Deduce the actual coan package directory given the commandline args '''
pkgdir = None
try:
pkgdir = args['pkgdir']
except:
pass
if not pkgdir:
pkgdir = os.getenv('COAN_PKGDIR')
if not pkgdir:
pkgdir = os.pardir
return os.path.abspath(pkgdir)
def deduce_execdir(args = {}):
''' Deduce the actual directory containing the coan
executable given the commandline args '''
execdir = None
try:
execdir = args['execdir']
except:
pass
if not execdir:
execdir = 'src'
builddir = os.getenv('COAN_BUILDDIR')
if not builddir:
builddir = deduce_pkgdir(args)
execdir = os.path.join(builddir,execdir)
return os.path.abspath(execdir)
def compute_runtime(time_files = [__get_time_file()]):
''' Parse the coan timing files and add the entries to
compute the total runtime of the process(es)
recorded there. If the list of timing files includes any
but the the program's own timing file then the current
program's timing file is removed from the list.
'''
time_file = __get_time_file()
if time_files != [time_file]:
try:
posn = time_files.index(time_file)
del time_files[posn]
except:
pass
seconds = 0.0
for time_file in time_files:
if os.path.exists(time_file):
seconds = seconds + __compute_runtime(time_file)
return seconds
def update_test_size_file(nfiles):
''' Append the number of files a test is to process to the
coan test size file '''
size_file = __get_test_size_file();
if size_file:
fh = fopen(size_file,'a')
fh.write(str(nfiles) + '\n')
fh.close()
def compute_test_size(size_files = [__get_test_size_file()]):
''' Parse the coan test size files and add the entries to
compute the total size of the test(es)
recorded there. If the list of size files includes any
but the the program's own size file then the current
program's size file is removed from the list.
'''
size_file = __get_test_size_file()
if size_files != [size_file]:
try:
posn = size_files.index(size_file)
del size_files[posn]
except:
pass
test_files = 0
for size_file in size_files:
if os.path.exists(size_file):
test_files = test_files + __compute_test_size(size_file)
return test_files
def report_runtime(time_files = [__get_time_file()]):
''' Display the total runtime recorded in the
coan timing files. If the list of timing files
includes any but the current program's timing file
then the current program's timing file is removed from
the list and the report is also written to the current
program's timing file.
Return the total runtime computed.
'''
tot_time = compute_runtime(time_files)
info('runtime in coan: ' + str(tot_time) + ' secs.')
time_file = __get_time_file()
if time_file not in time_files:
fh = fopen(time_file,'w')
fh.write(str(tot_time) +'\n')
fh.close()
return tot_time
def report_test_size(size_files = [__get_test_size_file()]):
''' Display the total test size recorded in the
coan test size files. If the list of size files
includes any but the current program's size file
then the current program's size file is removed from
the list and the report is also written to the current
program's size file.
Return the total test size computed.
'''
tot_size = compute_test_size(size_files)
if tot_size != 0:
info('Coan processed ' + str(tot_size) + ' input_files.')
size_file = __get_test_size_file()
if size_file not in size_files:
fh = fopen(size_file,'w')
fh.write(str(tot_size) + '\n')
fh.close()
return tot_size
def report_metrics( time_files = [__get_time_file()],
size_files = [__get_test_size_file()]):
tot_size = report_test_size(size_files)
if tot_size == 0:
return
tot_time = report_runtime(time_files);
av_time = float(tot_time) / float(tot_size)
info('Average processing time per input file: {:2.6f} secs.'\
.format(av_time))
def measure_runtime():
''' Initialize coan runtime measuring '''
time_file = __get_time_file()
if time_file:
file_del(time_file)
atexit.register(report_runtime,[__get_time_file()])
def measure_test_size():
''' Initialize coan test_size measuring '''
size_file = __get_test_size_file()
if size_file:
file_del(size_file)
atexit.register(report_test_size,[__get_test_size_file()])
def do_metrics():
''' Initailize coan test metrics '''
time_file = __get_time_file()
size_file = __get_test_size_file()
if time_file and size_file:
file_del(time_file)
file_del(size_file)
atexit.register(
report_metrics,
[__get_time_file()],
[__get_test_size_file()])
if __name__ == "__main__":
print slurp_command('/usr/bin/time --version', True)
| bsd-3-clause | 4,693,554,869,173,499,000 | 25.752784 | 86 | 0.658675 | false | 2.933333 | true | false | false |
frappe/frappe | frappe/automation/doctype/milestone_tracker/milestone_tracker.py | 1 | 1460 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
import frappe.cache_manager
from frappe.model import log_types
class MilestoneTracker(Document):
def on_update(self):
frappe.cache_manager.clear_doctype_map('Milestone Tracker', self.document_type)
def on_trash(self):
frappe.cache_manager.clear_doctype_map('Milestone Tracker', self.document_type)
def apply(self, doc):
before_save = doc.get_doc_before_save()
from_value = before_save and before_save.get(self.track_field) or None
if from_value != doc.get(self.track_field):
frappe.get_doc(dict(
doctype = 'Milestone',
reference_type = doc.doctype,
reference_name = doc.name,
track_field = self.track_field,
from_value = from_value,
value = doc.get(self.track_field),
milestone_tracker = self.name,
)).insert(ignore_permissions=True)
def evaluate_milestone(doc, event):
if (frappe.flags.in_install
or frappe.flags.in_migrate
or frappe.flags.in_setup_wizard
or doc.doctype in log_types):
return
# track milestones related to this doctype
for d in get_milestone_trackers(doc.doctype):
frappe.get_doc('Milestone Tracker', d.get('name')).apply(doc)
def get_milestone_trackers(doctype):
return frappe.cache_manager.get_doctype_map('Milestone Tracker', doctype,
dict(document_type = doctype, disabled=0))
| mit | -3,799,643,336,522,752,500 | 31.444444 | 81 | 0.734932 | false | 3.208791 | false | false | false |
aaae/kazam | kazam/backend/grabber.py | 2 | 7672 | # -*- coding: utf-8 -*-
#
# grabber.py
#
# Copyright 2012 David Klasinc <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import subprocess
import logging
logger = logging.getLogger("Grabber")
from gi.repository import GObject, Gtk, Gdk, GdkPixbuf, GdkX11
from kazam.backend.prefs import *
from kazam.frontend.save_dialog import SaveDialog
from gettext import gettext as _
class Grabber(GObject.GObject):
__gsignals__ = {
"save-done" : (GObject.SIGNAL_RUN_LAST,
None,
[GObject.TYPE_PYOBJECT],),
"flush-done" : (GObject.SIGNAL_RUN_LAST,
None,
(),),
}
def __init__(self):
GObject.GObject.__init__(self)
logger.debug("Starting Grabber.")
def setup_sources(self, video_source, area, xid, active = False, god = False):
self.video_source = video_source
self.area = area
self.xid = xid
self.god = god
if active:
from gi.repository import GdkX11
active_win = HW.default_screen.get_active_window()
self.xid = GdkX11.X11Window.get_xid(active_win)
logger.debug("Grabber source: {0}, {1}, {2}, {3}".format(self.video_source['x'],
self.video_source['y'],
self.video_source['width'],
self.video_source['height']))
def grab(self):
self.pixbuf = None
disp = GdkX11.X11Display.get_default()
dm = Gdk.Display.get_device_manager(disp)
pntr_device = dm.get_client_pointer()
#
# Rewrite this, because it sucks
#
if prefs.shutter_sound and (not self.god):
soundfile = os.path.join(prefs.datadir, 'sounds', prefs.sound_files[prefs.shutter_type])
subprocess.call(['/usr/bin/canberra-gtk-play', '-f', soundfile])
if self.xid:
if prefs.capture_borders_pic:
app_win = GdkX11.X11Window.foreign_new_for_display(disp, self.xid)
(rx, ry, rw, rh) = app_win.get_geometry()
area = app_win.get_frame_extents()
(fx, fy, fw, fh) = (area.x, area.y, area.width, area.height)
win = Gdk.get_default_root_window()
logger.debug("Coordinates w: RX {0} RY {1} RW {2} RH {3}".format(rx, ry, rw, rh))
logger.debug("Coordinates f: FX {0} FY {1} FW {2} FH {3}".format(fx, fy, fw, fh))
dx = fw - rw
dy = fh - rh
(x, y, w, h) = (fx, fy, fw, fh)
logger.debug("Coordinates delta: DX {0} DY {1}".format(dx, dy))
else:
win = GdkX11.X11Window.foreign_new_for_display(disp, self.xid)
(x, y, w, h) = win.get_geometry()
else:
win = Gdk.get_default_root_window()
(x, y, w, h) = (self.video_source['x'],
self.video_source['y'],
self.video_source['width'],
self.video_source['height'])
self.pixbuf = Gdk.pixbuf_get_from_window(win, x, y, w, h)
logger.debug("Coordinates X {0} Y {1} W {2} H {3}".format(x, y, w, h))
# Code below partially solves problem with overlapping windows.
# Partially only because if something is overlapping window frame
# it will be captured where the frame should be and also
# because it doesn't work as it should. Offset trouble.
#
#if self.xid and prefs.capture_borders_pic:
# cw_pixbuf = Gdk.pixbuf_get_from_window(app_win, rx, ry, rw, rh)
# cw_pixbuf.composite(self.pixbuf, rx, ry, rw, rh,
# dx,
# dy,
# 1.0,
# 1.0,
# GdkPixbuf.InterpType.BILINEAR,
# 255)
if prefs.capture_cursor_pic:
logger.debug("Adding cursor.")
cursor = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.LEFT_PTR)
c_picbuf = Gdk.Cursor.get_image(cursor)
if self.xid and prefs.capture_borders_pic:
pointer = app_win.get_device_position(pntr_device)
(px, py) = (pointer[1], pointer[2])
logger.debug("XID cursor: {0} {1}".format(px, py))
c_picbuf.composite(self.pixbuf, rx, ry, rw, rh,
px + dx - 6,
py + dy - 2,
1.0,
1.0,
GdkPixbuf.InterpType.BILINEAR,
255)
else:
(scr, px, py) = pntr_device.get_position()
cur = scr.get_monitor_at_point(x, y)
px = px - HW.screens[cur]['x']
py = py - HW.screens[cur]['y']
#
# Cursor is offset by 6 pixels to the right and 2 down
#
c_picbuf.composite(self.pixbuf, 0, 0, w - 1, h - 1,
px - 6,
py - 2,
1.0,
1.0,
GdkPixbuf.InterpType.BILINEAR,
255)
logger.debug("Cursor coords: {0} {1}".format(px, py))
if self.area is not None:
logger.debug("Cropping image.")
self.area_buf = GdkPixbuf.Pixbuf.new(GdkPixbuf.Colorspace.RGB, True, 8, self.area[4], self.area[5])
self.pixbuf.copy_area(self.area[0], self.area[1], self.area[4], self.area[5], self.area_buf, 0, 0)
self.pixbuf = None
self.pixbuf = self.area_buf
self.emit("flush-done")
def save(self, filename):
if self.pixbuf is not None:
self.pixbuf.savev(filename, "png", "", "")
def save_capture(self, old_path):
logger.debug("Saving screenshot.")
self.old_path = old_path
(dialog, result, self.old_path) = SaveDialog(_("Save capture"),
self.old_path, None, main_mode=MODE_SCREENSHOT)
if result == Gtk.ResponseType.OK:
uri = os.path.join(dialog.get_current_folder(), dialog.get_filename())
self.save(uri)
dialog.destroy()
self.emit("save-done", self.old_path)
def autosave(self, filename):
logger.debug("Autosaving to: {0}".format(filename))
self.save(filename)
self.emit("save-done", filename)
| gpl-3.0 | 2,435,549,433,357,760,500 | 40.247312 | 111 | 0.501173 | false | 3.851406 | false | false | false |
demophoon/sams-client | client/check.py | 1 | 1348 | """
Perform http checks and generate reports
"""
import urllib
import urllib2
from client.cli import parser
class Check(object):
def __init__(self,
url,
checkType='http',
method='GET',
data=None,
headers=None,
*args, **kwargs):
self.checkType = checkType
self.url = url
self.method = method
self.data = data
self.headers = {}
if headers:
self.headers = headers
def run(self):
req = urllib2.Request(
self.url,
self.data,
self.headers,
)
result = {
'state': ''
}
try:
response = urllib2.urlopen(req)
except Exception as e:
return e
return response
#'close'
#'code'
#'errno'
#'fileno'
#'fp'
#'getcode'
#'geturl'
#'headers'
#'info'
#'msg'
#'next'
#'read'
#'readline'
#'readlines'
#'url']
def run_check(url):
check = Check(url)
result = check.run()
from pprint import pprint
pprint({
'code': result.code,
'headers': result.headers.items(),
'msg': result.msg,
'url': result.url,
'content': result.read(),
})
def cli_handler(**args):
url = args.get('<url>')
run_check(url)
| mit | 1,980,387,785,553,849,900 | 16.736842 | 44 | 0.491098 | false | 3.930029 | false | false | false |
inkerra/cinder | cinder/tests/keymgr/mock_key_mgr.py | 1 | 3617 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A mock implementation of a key manager. This module should NOT be used for
anything but integration testing.
"""
import array
from cinder import exception
from cinder.keymgr import key
from cinder.keymgr import key_mgr
from cinder.openstack.common import log as logging
from cinder.openstack.common import uuidutils
from cinder import utils
LOG = logging.getLogger(__name__)
class MockKeyManager(key_mgr.KeyManager):
"""
This mock key manager implementation supports all the methods specified
by the key manager interface. This implementation stores keys within a
dictionary, and as a result, it is not acceptable for use across different
services. Side effects (e.g., raising exceptions) for each method are
handled as specified by the key manager interface.
This class should NOT be used for anything but integration testing because
keys are not stored persistently.
"""
def __init__(self):
self.keys = {}
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
# generate the key
key_length = kwargs.get('key_length', 256)
# hex digit => 4 bits
hex_string = utils.generate_password(length=key_length / 4,
symbolgroups='0123456789ABCDEF')
_bytes = array.array('B', hex_string.decode('hex')).tolist()
_key = key.SymmetricKey('AES', _bytes)
return self.store_key(ctxt, _key)
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager.
"""
if ctxt is None:
raise exception.NotAuthorized()
# generate UUID and ensure that it isn't in use
key_id = uuidutils.generate_uuid()
while key_id in self.keys:
key_id = uuidutils.generate_uuid()
self.keys[key_id] = key
return key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.keys[key_id]
def delete_key(self, ctxt, key_id, **kwargs):
"""Deletes the key identified by the specified id.
A NotAuthorized exception is raised if the context is None and a
KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
del self.keys[key_id]
| apache-2.0 | -1,597,223,274,417,341,200 | 32.803738 | 78 | 0.66298 | false | 4.357831 | false | false | false |
whelan957/leetcode | python3/Design/leetcode000First Unique Number2.py | 1 | 3883 | # You have a queue of integers, you need to retrieve the first unique integer in the queue.
# Implement the FirstUnique class:
# FirstUnique(int[] nums) Initializes the object with the numbers in the queue.
# int showFirstUnique() returns the value of the first unique integer of the queue, and returns -1 if there is no such integer.
# void add(int value) insert value to the queue.
# Example 1:
# Input:
# ["FirstUnique","showFirstUnique","add","showFirstUnique","add","showFirstUnique","add","showFirstUnique"]
# [[[2,3,5]],[],[5],[],[2],[],[3],[]]
# Output:
# [null,2,null,2,null,3,null,-1]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([2,3,5]);
# firstUnique.showFirstUnique(); // return 2
# firstUnique.add(5); // the queue is now [2,3,5,5]
# firstUnique.showFirstUnique(); // return 2
# firstUnique.add(2); // the queue is now [2,3,5,5,2]
# firstUnique.showFirstUnique(); // return 3
# firstUnique.add(3); // the queue is now [2,3,5,5,2,3]
# firstUnique.showFirstUnique(); // return -1
# Example 2:
# Input:
# ["FirstUnique","showFirstUnique","add","add","add","add","add","showFirstUnique"]
# [[[7,7,7,7,7,7]],[],[7],[3],[3],[7],[17],[]]
# Output:
# [null,-1,null,null,null,null,null,17]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([7,7,7,7,7,7]);
# firstUnique.showFirstUnique(); // return -1
# firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7]
# firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3]
# firstUnique.add(3); // the queue is now [7,7,7,7,7,7,7,3,3]
# firstUnique.add(7); // the queue is now [7,7,7,7,7,7,7,3,3,7]
# firstUnique.add(17); // the queue is now [7,7,7,7,7,7,7,3,3,7,17]
# firstUnique.showFirstUnique(); // return 17
# Example 3:
# Input:
# ["FirstUnique","showFirstUnique","add","showFirstUnique"]
# [[[809]],[],[809],[]]
# Output:
# [null,809,null,-1]
# Explanation:
# FirstUnique firstUnique = new FirstUnique([809]);
# firstUnique.showFirstUnique(); // return 809
# firstUnique.add(809); // the queue is now [809,809]
# firstUnique.showFirstUnique(); // return -1
# Constraints:
# 1 <= nums.length <= 10^5
# 1 <= nums[i] <= 10^8
# 1 <= value <= 10^8
# At most 50000 calls will be made to showFirstUnique and add.
class Node:
def __init__(self,val):
self.val = val
self.prev = None
self.next = None
class DLL:
def __init__(self):
self.head = Node(-1)
self.tail = Node(-1)
self.head.next, self.tail.prev = self.tail, self.head
self.count = 0
def insert(self, val):
newNode = Node(val)
newNode.prev = self.tail.prev
newNode.next = self.tail
self.tail.prev.next = newNode
self.tail.prev = newNode
self.count += 1
return newNode
def remove(self, node):
prev, nxt = node.prev, node.next
node.prev.next = nxt
node.next.prev = prev
self.count -= 1
def isEmpty(self):
return (self.count == 0)
# LRU cache logic.
# We can use a doubly linked list(which will only store unique numbers and the first one in the front, pointed by the head)
# and a hashmap (which will have the object reference of the number).
# Upon seeing a duplicate, go and delete the object from the DLL.
class FirstUnique:
def __init__(self, nums: List[int]):
self.dll = DLL()
self.numDict = {}
for num in nums:
self.add(num)
def showFirstUnique(self) -> int:
if self.dll.isEmpty():
return -1
return self.dll.head.next.val
def add(self, value: int) -> None:
if value in self.numDict and self.numDict[value] != -1:
self.dll.remove(self.numDict[value])
self.numDict[value] = -1
else:
self.numDict[value] = self.dll.insert(value)
| gpl-3.0 | 9,048,359,477,316,779,000 | 30.827869 | 127 | 0.608035 | false | 3.086645 | false | false | false |
dmccloskey/SBaaS_isotopomer | template_scripts/template_analyzeMRMdata.py | 1 | 6931 | import sys
sys.path.append('C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base')
#sys.path.append('C:/Users/dmccloskey/Google Drive/SBaaS_base')
from SBaaS_base.postgresql_settings import postgresql_settings
from SBaaS_base.postgresql_orm import postgresql_orm
# read in the settings file
filename = 'C:/Users/dmccloskey-sbrg/Google Drive/SBaaS_base/settings_1.ini';
#filename = 'C:/Users/dmccloskey/Google Drive/SBaaS_base/settings_2.ini';
pg_settings = postgresql_settings(filename);
# connect to the database from the settings file
pg_orm = postgresql_orm();
pg_orm.set_sessionFromSettings(pg_settings.database_settings);
session = pg_orm.get_session();
engine = pg_orm.get_engine();
# your app...
sys.path.append(pg_settings.datadir_settings['drive']+'/SBaaS_LIMS')
sys.path.append(pg_settings.datadir_settings['drive']+'/SBaaS_isotopomer')
sys.path.append(pg_settings.datadir_settings['github']+'/io_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/calculate_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/MDV_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/molmass')
sys.path.append(pg_settings.datadir_settings['github']+'/matplotlib_utilities')
sys.path.append(pg_settings.datadir_settings['github']+'/quantification_analysis')
##Analyze the MRM data
#make the results table to analyze the MRM data
from SBaaS_isotopomer.stage01_isotopomer_MQResultsTable_execute import stage01_isotopomer_MQResultsTable_execute
exmqrt01 = stage01_isotopomer_MQResultsTable_execute(session,engine,pg_settings.datadir_settings);
exmqrt01.drop_dataStage01_isotopomer_MQResultsTable();
exmqrt01.initialize_dataStage01_isotopomer_MQResultsTable();
exmqrt01.execute_deleteExperimentFromMQResultsTable('ALEsKOs01',sample_types_I = ['Quality Control','Unknown','Standard','Blank'])
exmqrt01.import_dataStage01IsotopomerMQResultsTable_add('data/tests/analysis_isotopomer/150911_Isotopomer_ALEsKOs01_tpiAEvo01-04_samples01.csv');
exmqrt01.export_dataStage01MQResultsTable_metricPlot_js('chemoCLim01',component_names_I = ['fdp.fdp_1.Light'],measurement_I='RT');
#make the normalized methods tables
from SBaaS_isotopomer.stage01_isotopomer_normalized_execute import stage01_isotopomer_normalized_execute
normalized01 = stage01_isotopomer_normalized_execute(session,engine,pg_settings.datadir_settings);
normalized01.drop_dataStage01_isotopomer_normalized();
normalized01.initialize_dataStage01_isotopomer_normalized();
normalized01.reset_dataStage01_isotopomer_normalized('ALEsKOs01');
# build the spectrums from MRM
normalized01.execute_buildSpectrumFromMRMs('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc',
],
met_ids_I=[
]
);
# export the data to .csv
normalized01.export_dataStage01IsotopomerNormalized_csv('ALEsKOs01',
filename_O = 'data/tests/analysis_isotopomer/normalized_MRM.csv',
sample_name_abbreviation_I='%',
time_point_I='%',
scan_type_I='%',
met_id_I='%')
#export spectrums to js
normalized01.export_dataStage01IsotopomerNormalized_js('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM'],
single_plot_I = False,
);
#export spectrums to matplotlib
normalized01.plot_normalizedSpectrum('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM'],
);
# update the DB from .csv
# NOTE: by_id = True should be used for most cases, but for this example, the row id's will do not match what is in the DB
normalized01.import_dataStage01IsotopomerNormalized_update('data/tests/analysis_isotopomer/150911_Isotopomer_ALEsKOs01_tpiAEvo01-04_normalizedUpdate01.csv',
by_id = False)
# update specific samples
normalized01.execute_updateNormalizedSpectrum('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM']
);
#make the averages methods tables
from SBaaS_isotopomer.stage01_isotopomer_averages_execute import stage01_isotopomer_averages_execute
ave01 = stage01_isotopomer_averages_execute(session,engine,pg_settings.datadir_settings);
ave01.drop_dataStage01_isotopomer_averages();
ave01.initialize_dataStage01_isotopomer_averages();
ave01.reset_dataStage01_isotopomer_averages('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'],
scan_types_I = ['MRM']);
# calculate the spectrum averages for specific met_ids and/or scan_types
ave01.execute_analyzeAverages('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'],
met_ids_I=[],
scan_types_I = ['MRM']);
# calculate averages by normalizing the spectrum to 1.0 for specific met_ids and/or scan_types
ave01.execute_analyzeAveragesNormSum('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM']
);
# review the spectrums in excel
ave01.export_dataStage01IsotopomerAveragesNormSum_csv('ALEsKOs01',
filename_O='data/tests/analysis_isotopomer/averagesNormSum.csv',
sample_name_abbreviation_I='%',
time_point_I='%',
sample_type_I='%',
scan_type_I='%',
met_id_I='%')
# export the spectrums to matplotlib
ave01.plot_averageSpectrumNormSum('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM']
);
# export the spectrums to .js
ave01.export_dataStage01IsotopomerAveragesNormSum_js('ALEsKOs01',
sample_name_abbreviations_I=[
'OxicEvo04tpiAEvo01EPEcoli13CGlc',
'OxicEvo04tpiAEvo02EPEcoli13CGlc',
'OxicEvo04tpiAEvo03EPEcoli13CGlc',
'OxicEvo04tpiAEvo04EPEcoli13CGlc'
],
met_ids_I=[],
scan_types_I=['MRM'],
single_plot_I = False,
); | mit | 6,447,267,082,143,363,000 | 39.776471 | 156 | 0.747367 | false | 2.679165 | false | false | false |
NewEconomyMovement/nem-py | Account.py | 2 | 1522 | import ed25519
from python_sha3.python_sha3 import *
import base64
import hashlib
from binascii import hexlify, unhexlify
class Account:
def __init__(self, hexPrivKey, network='mainnet'):
self.hexPrivKey = hexPrivKey
self.network = network
self._calculateKeyPair()
self._calculateAddress()
def _calculateKeyPair(self):
self.sk = unhexlify(self.hexPrivKey)[::-1]
self.pk = ed25519.publickey_hash_unsafe(self.sk, sha3_512)
self.hexPublicKey = hexlify(self.pk)
def _calculateAddress(self):
pubkey = self.pk
s = sha3_256()
s.update(pubkey)
sha3_pubkey = s.digest()
h = hashlib.new('ripemd160')
h.update(sha3_pubkey)
ripe = h.digest()
if self.network == 'testnet':
version = "\x98" + ripe
else:
version = "\x68" + ripe
s2 = sha3_256()
s2.update(version)
checksum = s2.digest()[0:4]
self.address = base64.b32encode(version + checksum)
def getHexPublicKey(self):
return self.hexPublicKey
def getHexPrivateKey(self):
return self.hexPrivKey
def getAddress(self):
return self.address
def sign(self, binMessage):
signature = ed25519.signature_hash_unsafe(binMessage, self.sk, self.pk, sha3_512)
# print ' sig:', hexlify(signature)
return signature
def verify(self, hexedMessage):
pass
| mit | 7,082,993,817,338,588,000 | 23.79661 | 89 | 0.583443 | false | 3.703163 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.