repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bacaldwell/ironic | ironic/drivers/modules/agent_client.py | 1 | 10718 | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
import requests
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
agent_opts = [
cfg.StrOpt('agent_api_version',
default='v1',
help=_('API version to use for communicating with the ramdisk '
'agent.'))
]
CONF = cfg.CONF
CONF.register_opts(agent_opts, group='agent')
LOG = log.getLogger(__name__)
DEFAULT_IPA_PORTAL_PORT = 3260
class AgentClient(object):
"""Client for interacting with nodes via a REST API."""
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'Content-Type': 'application/json'})
def _get_command_url(self, node):
agent_url = node.driver_internal_info.get('agent_url')
if not agent_url:
raise exception.IronicException(_('Agent driver requires '
'agent_url in '
'driver_internal_info'))
return ('%(agent_url)s/%(api_version)s/commands' %
{'agent_url': agent_url,
'api_version': CONF.agent.agent_api_version})
def _get_command_body(self, method, params):
return jsonutils.dumps({
'name': method,
'params': params,
})
def _command(self, node, method, params, wait=False):
url = self._get_command_url(node)
body = self._get_command_body(method, params)
request_params = {
'wait': str(wait).lower()
}
LOG.debug('Executing agent command %(method)s for node %(node)s',
{'node': node.uuid, 'method': method})
try:
response = self.session.post(url, params=request_params, data=body)
except requests.RequestException as e:
msg = (_('Error invoking agent command %(method)s for node '
'%(node)s. Error: %(error)s') %
{'method': method, 'node': node.uuid, 'error': e})
LOG.error(msg)
raise exception.IronicException(msg)
# TODO(russellhaering): real error handling
try:
result = response.json()
except ValueError:
msg = _(
'Unable to decode response as JSON.\n'
'Request URL: %(url)s\nRequest body: "%(body)s"\n'
'Response status code: %(code)s\n'
'Response: "%(response)s"'
) % ({'response': response.text, 'body': body, 'url': url,
'code': response.status_code})
LOG.error(msg)
raise exception.IronicException(msg)
LOG.debug('Agent command %(method)s for node %(node)s returned '
'result %(res)s, error %(error)s, HTTP status code %(code)d',
{'node': node.uuid, 'method': method,
'res': result.get('command_result'),
'error': result.get('command_error'),
'code': response.status_code})
return result
def get_commands_status(self, node):
url = self._get_command_url(node)
LOG.debug('Fetching status of agent commands for node %s', node.uuid)
resp = self.session.get(url)
result = resp.json()['commands']
status = '; '.join('%(cmd)s: result "%(res)s", error "%(err)s"' %
{'cmd': r.get('command_name'),
'res': r.get('command_result'),
'err': r.get('command_error')}
for r in result)
LOG.debug('Status of agent commands for node %(node)s: %(status)s',
{'node': node.uuid, 'status': status})
return result
def prepare_image(self, node, image_info, wait=False):
"""Call the `prepare_image` method on the node."""
LOG.debug('Preparing image %(image)s on node %(node)s.',
{'image': image_info.get('id'),
'node': node.uuid})
params = {'image_info': image_info}
# this should be an http(s) URL
configdrive = node.instance_info.get('configdrive')
if configdrive is not None:
params['configdrive'] = configdrive
return self._command(node=node,
method='standby.prepare_image',
params=params,
wait=wait)
def start_iscsi_target(self, node, iqn,
portal_port=DEFAULT_IPA_PORTAL_PORT,
wipe_disk_metadata=False):
"""Expose the node's disk as an ISCSI target.
:param node: an Ironic node object
:param iqn: iSCSI target IQN
:param portal_port: iSCSI portal port
:param wipe_disk_metadata: True if the agent should wipe first the
disk magic strings like the partition
table, RAID or filesystem signature.
"""
params = {'iqn': iqn}
# This is to workaround passing default values to an old ramdisk
# TODO(vdrok): remove this workaround in Ocata release
if portal_port != DEFAULT_IPA_PORTAL_PORT:
params['portal_port'] = portal_port
if wipe_disk_metadata:
params['wipe_disk_metadata'] = wipe_disk_metadata
while True:
result = self._command(node=node,
method='iscsi.start_iscsi_target',
params=params,
wait=True)
if (result['command_status'] == 'FAILED' and
result['command_error']['type'] == 'TypeError'):
message = result['command_error']['message']
if 'wipe_disk_metadata' in message:
# wipe_disk_metadata was introduced after portal_port, so
# portal_port might still work, retry
LOG.warning(_LW(
"The ironic python agent in the ramdisk on node "
"%(node)s failed to start the iSCSI target because "
"it doesn't support wipe_disk_metadata parameter, "
"retrying without passing it. If you need to have "
"node's root disk wiped before exposing it via iSCSI, "
"or because https://bugs.launchpad.net/bugs/1550604 "
"affects you, please update the ramdisk to use "
"version >= 1.3 (Newton, or higher) of ironic python "
"agent."), {'node': node.uuid})
# NOTE(vdrok): This is needed to make unit test's
# assert_has_calls work, otherwise it will report it was
# called without wipe_disk_metadata both times as "params"
# dictionary is stored by reference in mock
params = params.copy()
del params['wipe_disk_metadata']
continue
elif 'portal_port' in message:
# It means that ironic is configured in a way that the
# deploy driver has requested some things not available
# on the old ramdisk. Since the user specified a
# non-default portal_port, we do not try again with the
# default value. Instead, the user needs to take some
# explicit action.
LOG.error(_LE(
"The ironic python agent in the ramdisk on node "
"%(node)s failed to start the iSCSI target because "
"the agent doesn't support portal_port parameter. "
"Please update the ramdisk to use version >= 1.3 "
"(Newton, or higher) of ironic python agent, or use "
"the default value of [iscsi]portal_port config "
"option."), {'node': node.uuid})
# In all the other cases, it is a usual error, no additional action
# required, break from the loop returning the result
return result
def install_bootloader(self, node, root_uuid, efi_system_part_uuid=None):
"""Install a boot loader on the image."""
params = {'root_uuid': root_uuid,
'efi_system_part_uuid': efi_system_part_uuid}
return self._command(node=node,
method='image.install_bootloader',
params=params,
wait=True)
def get_clean_steps(self, node, ports):
params = {
'node': node.as_dict(),
'ports': [port.as_dict() for port in ports]
}
return self._command(node=node,
method='clean.get_clean_steps',
params=params,
wait=True)
def execute_clean_step(self, step, node, ports):
params = {
'step': step,
'node': node.as_dict(),
'ports': [port.as_dict() for port in ports],
'clean_version': node.driver_internal_info.get(
'hardware_manager_version')
}
return self._command(node=node,
method='clean.execute_clean_step',
params=params)
def power_off(self, node):
"""Soft powers off the bare metal node by shutting down ramdisk OS."""
return self._command(node=node,
method='standby.power_off',
params={})
def sync(self, node):
"""Flush file system buffers forcing changed blocks to disk."""
return self._command(node=node,
method='standby.sync',
params={},
wait=True)
| apache-2.0 | -6,578,982,039,283,899,000 | 43.473029 | 79 | 0.52743 | false |
rwatson/chromium-capsicum | o3d/tests/selenium/pdiff_test.py | 1 | 5162 | import os
import re
import subprocess
import unittest
import sys
import selenium_utilities
import selenium_constants
class PDiffTest(unittest.TestCase):
"""A perceptual diff test class, for running perceptual diffs on any
number of screenshots."""
def __init__(self, name, num_screenshots, screenshot_name, pdiff_path,
gen_dir, ref_dir, options):
unittest.TestCase.__init__(self, name)
self.name = name
self.num_screenshots = num_screenshots
self.screenshot_name = screenshot_name
self.pdiff_path = pdiff_path
self.gen_dir = gen_dir
self.ref_dir = ref_dir
self.options = options
def shortDescription(self):
"""override unittest.TestCase shortDescription for our own descriptions."""
return "Screenshot comparison for: " + self.name
def PDiffTest(self):
"""Runs a generic Perceptual Diff test."""
# Get arguments for perceptual diff.
pixel_threshold = "10"
alpha_threshold = "1.0"
use_colorfactor = False
use_downsample = False
use_edge = True
edge_threshold = "5"
for opt in self.options:
if opt.startswith("pdiff_threshold"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_mac") and
sys.platform == "darwin"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_win") and
sys.platform == 'win32' or sys.platform == "cygwin"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("pdiff_threshold_linux") and
sys.platform[:5] == "linux"):
pixel_threshold = selenium_utilities.GetArgument(opt)
elif (opt.startswith("colorfactor")):
colorfactor = selenium_utilities.GetArgument(opt)
use_colorfactor = True
elif (opt.startswith("downsample")):
downsample_factor = selenium_utilities.GetArgument(opt)
use_downsample = True
elif (opt.startswith("pdiff_edge_ignore_off")):
use_edge = False
elif (opt.startswith("pdiff_edge_threshold")):
edge_threshold = selenium_utilities.GetArgument(opt)
results = []
# Loop over number of screenshots.
for screenshot_no in range(self.num_screenshots):
# Find reference image.
shotname = self.screenshot_name + str(screenshot_no + 1)
J = os.path.join
platform_img_path = J(self.ref_dir,
selenium_constants.PLATFORM_SCREENSHOT_DIR,
shotname + '_reference.png')
reg_img_path = J(self.ref_dir,
selenium_constants.DEFAULT_SCREENSHOT_DIR,
shotname + '_reference.png')
if os.path.exists(platform_img_path):
ref_img_path = platform_img_path
elif os.path.exists(reg_img_path):
ref_img_path = reg_img_path
else:
self.fail('Reference image for ' + shotname + ' not found.')
# Find generated image.
gen_img_path = J(self.gen_dir, shotname + '.png')
diff_img_path = J(self.gen_dir, 'cmp_' + shotname + '.png')
self.assertTrue(os.path.exists(gen_img_path),
'Generated screenshot for ' + shotname + ' not found.\n')
# Run perceptual diff
arguments = [self.pdiff_path,
ref_img_path,
gen_img_path,
"-output", diff_img_path,
"-fov", "45",
"-alphaThreshold", alpha_threshold,
# Turn on verbose output for the percetual diff so we
# can see how far off we are on the threshold.
"-verbose",
# Set the threshold to zero so we can get a count
# of the different pixels. This causes the program
# to return failure for most images, but we can compare
# the values ourselves below.
"-threshold", "0"]
if use_colorfactor:
arguments += ["-colorfactor", colorfactor]
if use_downsample:
arguments += ["-downsample", downsample_factor]
if use_edge:
arguments += ["-ignoreEdges", edge_threshold]
pdiff_pipe = subprocess.Popen(arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(pdiff_stdout, pdiff_stderr) = pdiff_pipe.communicate()
result = pdiff_pipe.returncode
# Find out how many pixels were different by looking at the output.
pixel_re = re.compile("(\d+) pixels are different", re.DOTALL)
pixel_match = pixel_re.search(pdiff_stdout)
different_pixels = "0"
if pixel_match:
different_pixels = pixel_match.group(1)
results += [(shotname, int(different_pixels))]
all_tests_passed = True
msg = "Pixel threshold is %s. Failing screenshots:\n" % pixel_threshold
for name, pixels in results:
if pixels >= int(pixel_threshold):
all_tests_passed = False
msg += " %s, differing by %s\n" % (name, str(pixels))
self.assertTrue(all_tests_passed, msg)
| bsd-3-clause | -601,721,010,795,747,200 | 38.106061 | 79 | 0.603061 | false |
WilJoey/tn_ckan | ckan/migration/versions/037_role_anon_editor.py | 1 | 1728 | from sqlalchemy import *
from sqlalchemy.sql import select, and_
from migrate import *
import logging
log = logging.getLogger(__name__)
def upgrade(migrate_engine):
'''#1066 Change Visitor role on System from "reader" to "anon_editor".'''
metadata = MetaData(migrate_engine)
# get visitor ID
user = Table('user', metadata, autoload=True)
s = select([user.c.id, user.c.name],
user.c.name == u'visitor')
results = migrate_engine.execute(s).fetchall()
if len(results) == 0:
log.debug('No visitor on the system - obviously init hasn\'t been run yet' \
'and that will init visitor to an anon_editor')
return
visitor_id, visitor_name = results[0]
# find visitor role as reader on system
uor = Table('user_object_role', metadata, autoload=True)
visitor_system_condition = and_(uor.c.context == u'System',
uor.c.user_id == visitor_id)
s = select([uor.c.context, uor.c.user_id, uor.c.role],
visitor_system_condition)
results = migrate_engine.execute(s).fetchall()
if len(results) != 1:
log.warn('Could not find a Right for a Visitor on the System')
return
context, user_id, role = results[0]
if role != 'reader':
log.info('Visitor right for the System is not "reader", so not upgrading it to anon_editor.')
return
# change visitor role to anon_editor
log.info('Visitor is a "reader" on the System, so upgrading it to "anon_editor".')
sql = uor.update().where(visitor_system_condition).\
values(role=u'anon_editor')
migrate_engine.execute(sql)
def downgrade(migrate_engine):
raise NotImplementedError()
| mit | -45,108,048,257,190,770 | 35 | 101 | 0.634259 | false |
TIGER-NET/Temporal_profile_tool | ui/dlgabout.py | 1 | 2609 | # -*- coding: utf-8 -*-
"""
***************************************************************************
temporalprofileplugin.py
-------------------------------------
Copyright (C) 2014 TIGER-NET (www.tiger-net.org)
Based on Profile tool plugin:
Copyright (C) 2012 Patrice Verchere
***************************************************************************
* This plugin is part of the Water Observation Information System (WOIS) *
* developed under the TIGER-NET project funded by the European Space *
* Agency as part of the long-term TIGER initiative aiming at promoting *
* the use of Earth Observation (EO) for improved Integrated Water *
* Resources Management (IWRM) in Africa. *
* *
* WOIS is a free software i.e. you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation, either version 3 of the License, *
* or (at your option) any later version. *
* *
* WOIS is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************
"""
from PyQt4 import uic
from PyQt4.QtCore import QSettings
from PyQt4.QtGui import QDialog
import platform
import os
uiFilePath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'about.ui'))
FormClass = uic.loadUiType(uiFilePath)[0]
class DlgAbout(QDialog, FormClass):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
fp = os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__),"..")) , "metadata.txt")
iniText = QSettings(fp, QSettings.IniFormat)
verno = iniText.value("version")
name = iniText.value("name")
description = iniText.value("description")
self.title.setText( name )
self.description.setText( description + " - " + verno)
| gpl-3.0 | -6,917,851,896,494,671,000 | 41.770492 | 106 | 0.520889 | false |
nikkomidoy/project_soa | tests/engine.py | 1 | 5462 | from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.settings['python_version']
)
python_package.build()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage()
postgres_package.build()
redis_package = hitchredis.RedisPackage()
redis_package.build()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=float(self.settings["shutdown_timeout"]),
)
postgres_user = hitchpostgres.PostgresUser("project_soa", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("project_soa", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
settings="project_soa.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("xvfb", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Docs : https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html
self.driver = self.services['Firefox'].driver
self.webapp = hitchselenium.SeleniumStepLibrary(
selenium_webdriver=self.driver,
wait_for_timeout=5,
)
# Add selenium steps
self.click = self.webapp.click
self.wait_to_appear = self.webapp.wait_to_appear
self.wait_to_contain = self.webapp.wait_to_contain
self.wait_for_any_to_contain = self.webapp.wait_for_any_to_contain
self.click_and_dont_wait_for_page_load = self.webapp.click_and_dont_wait_for_page_load
# Configure selenium driver
self.driver.set_window_size(self.settings['window_size']['width'], self.settings['window_size']['height'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
self.click("djHideToolBarButton")
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def click_on_link_in_last_email(self, which=1):
"""Click on the nth link in the last email sent."""
self.driver.get(
self.services['HitchSMTP'].logs.json()[-1]['links'][which - 1]
)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| mit | -8,286,656,587,252,442,000 | 34.23871 | 114 | 0.617906 | false |
GeoMop/GeoMop | testing_integration/Analysis/store_restore.py | 1 | 3919 | import os
import shutil
import subprocess
from client_pipeline.mj_preparation import *
from pipeline.pipeline_processor import *
# setting testing directory
test_dir = "d:/test/store_restore"
# remove old files
workspace = os.path.join(test_dir, "workspace")
shutil.rmtree(workspace, ignore_errors=True)
# copy files to testing directory
shutil.copytree("store_restore_res/workspace", workspace)
# ------------
# analysis an1
# ------------
# prepare mj
analysis="an1"
mj="mj1"
python_script="s.py"
pipeline_name="Pipeline_5"
err, input_files = MjPreparation.prepare(workspace=workspace, analysis=analysis, mj=mj,
python_script=python_script, pipeline_name=pipeline_name)
if len(err) > 0:
for e in err:
print(e)
exit()
# mj_config_dir
mj_config_dir = os.path.join(workspace, analysis, "mj", mj, "mj_config")
# change cwd
cwd = os.getcwd()
os.chdir(mj_config_dir)
# run script
try:
with open(python_script, 'r') as fd:
script_text = fd.read()
except (RuntimeError, IOError) as e:
print("Can't open script file: {0}".format(e))
exit()
action_types.__action_counter__ = 0
exec(script_text)
pipeline = locals()[pipeline_name]
# pipeline processor
pp = Pipelineprocessor(pipeline)
# validation
err = pp.validate()
if len(err) > 0:
for e in err:
print(e)
exit()
# run pipeline
names = []
pp.run()
i = 0
while pp.is_run():
runner = pp.get_next_job()
if runner is None:
time.sleep(0.1)
else:
names.append(runner.name)
command = runner.command
if command[0] == "flow123d":
command[0] = "flow123d.bat"
process = subprocess.Popen(command, stderr=subprocess.PIPE)
return_code = process.wait(10)
if return_code is not None:
#print(process.stderr)
pass
pp.set_job_finished(runner.id)
i += 1
assert i < 1000, "Timeout"
print("\nrun flows\n---------")
for name in names:
print(name)
print("")
# return cwd
os.chdir(cwd)
# ------------
# analysis an2
# ------------
# prepare mj
analysis="an2"
mj="mj1"
python_script="s.py"
pipeline_name="Pipeline_7"
last_analysis="an1"
err, input_files = MjPreparation.prepare(workspace=workspace, analysis=analysis, mj=mj,
python_script=python_script, pipeline_name=pipeline_name,
last_analysis=last_analysis)
if len(err) > 0:
for e in err:
print(e)
exit()
# mj_config_dir
mj_config_dir = os.path.join(workspace, analysis, "mj", mj, "mj_config")
# change cwd
cwd = os.getcwd()
os.chdir(mj_config_dir)
# run script
try:
with open(python_script, 'r') as fd:
script_text = fd.read()
except (RuntimeError, IOError) as e:
print("Can't open script file: {0}".format(e))
exit()
action_types.__action_counter__ = 0
exec(script_text)
pipeline = locals()[pipeline_name]
# identical list
il_file = os.path.join(mj_config_dir, "identical_list.json")
if not os.path.isfile(il_file):
il_file = None
# pipeline processor
pp = Pipelineprocessor(pipeline, identical_list=il_file)
# validation
err = pp.validate()
if len(err) > 0:
for e in err:
print(e)
exit()
# run pipeline
names = []
pp.run()
i = 0
while pp.is_run():
runner = pp.get_next_job()
if runner is None:
time.sleep(0.1)
else:
names.append(runner.name)
command = runner.command
if command[0] == "flow123d":
command[0] = "flow123d.bat"
process = subprocess.Popen(command, stderr=subprocess.PIPE)
return_code = process.wait(10)
if return_code is not None:
#print(process.stderr)
pass
pp.set_job_finished(runner.id)
i += 1
assert i < 1000, "Timeout"
print("\nrun flows\n---------")
for name in names:
print(name)
print("")
# return cwd
os.chdir(cwd)
| gpl-3.0 | -8,578,005,189,924,669,000 | 21.267045 | 98 | 0.615718 | false |
Xreki/Xreki.github.io | fluid/inference/inference_transpiler.py | 1 | 1937 | import os
import sys
import argparse
import paddle.fluid as fluid
def Transpile(src_dir, dst_dir, model_filename, params_filename):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
print "Loading inference_program from ", src_dir
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(src_dir, exe, model_filename, params_filename)
inference_transpiler_program = inference_program.clone()
# NOTE: Applying the inference transpiler will change the inference_transpiler_program.
t = fluid.InferenceTranspiler()
# Under the with statement, inference_scope is the global scope.
t.transpile(inference_transpiler_program, place)
#print inference_transpiler_program
print "Saving the optimized inference_program to ", dst_dir
# There is a bug in fluid.io.save_inference_model, so we can use the following code instead.
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
model_path = os.path.join(dst_dir, model_filename)
with open(model_path, "wb") as f:
f.write(inference_transpiler_program.desc.serialize_to_string())
fluid.io.save_persistables(exe, dst_dir, inference_transpiler_program, params_filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', help='Source directory of inference model')
parser.add_argument('--dst_dir', help='Dstination directory of inference model')
parser.add_argument('--model_filename', default=None, help='The name of model file')
parser.add_argument('--params_filename', default=None, help='The name of params file')
args = parser.parse_args()
Transpile(args.src_dir, args.dst_dir, args.model_filename, args.params_filename)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,947,622,634,226,983,000 | 41.108696 | 102 | 0.690243 | false |
erudit/zenon | tests/unit/apps/public/auth/test_views.py | 1 | 3041 | import pytest
from unittest import mock
from django.urls import reverse
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import RequestFactory
from base.test.factories import UserFactory
from base.test.factories import get_authenticated_request
from erudit.test.factories import JournalFactory
from core.authorization.test.factories import AuthorizationFactory
from core.authorization.defaults import AuthorizationConfig as AC
from apps.public.auth.views import (
UserPersonalDataUpdateView,
UserParametersUpdateView,
UserLoginLandingRedirectView,
UserPasswordChangeView,
)
@pytest.fixture()
def test_view(monkeypatch):
monkeypatch.setattr('apps.public.auth.views.messages', mock.Mock())
return UserLoginLandingRedirectView()
@pytest.mark.django_db
class TestUserLoginLandingRedirectView:
def test_login_redirects_superuser_to_dashboard(self, test_view):
superuser = UserFactory(is_superuser=True)
request = get_authenticated_request(user=superuser)
test_view.request = request
assert test_view.get_redirect_url() == reverse('userspace:dashboard')
def test_login_redirects_individual_subscriber_to_next_if_next_is_specified(self, test_view):
normal_user = UserFactory()
request = RequestFactory().get('/', data={"next": "/fr/revues/"})
request.user = normal_user
test_view.request = request
assert test_view.get_redirect_url() == '/fr/revues/'
def test_login_redirects_individual_subscriber_to_homepage_if_no_referer(self, test_view):
normal_user = UserFactory()
request = get_authenticated_request(user=normal_user)
test_view.request = request
assert test_view.get_redirect_url() == reverse('public:home')
def test_login_redirects_journal_member_to_dashboard(self, test_view):
normal_user = UserFactory()
request = get_authenticated_request(user=normal_user)
journal = JournalFactory()
journal.members.add(normal_user)
journal.save()
AuthorizationFactory.create(
content_type=ContentType.objects.get_for_model(journal), object_id=journal.id,
user=normal_user, authorization_codename=AC.can_edit_journal_information.codename)
test_view.request = request
assert test_view.get_redirect_url() == reverse('userspace:dashboard')
@pytest.mark.django_db
class TestCanModifyAccountMixin:
@pytest.mark.parametrize('view', [
(UserPersonalDataUpdateView),
(UserParametersUpdateView),
(UserPasswordChangeView),
])
@pytest.mark.parametrize('user', [
(UserFactory),
(AnonymousUser),
])
def test_can_modify_account_does_not_crash(self, user, view):
view = view()
view.object = mock.MagicMock()
view.request = mock.MagicMock()
view.request.user = user()
context = view.get_context_data()
assert context['can_modify_account']
| gpl-3.0 | -4,681,696,006,401,395,000 | 37.0125 | 97 | 0.710622 | false |
minireference/noBSLAnotebooks | aspynb/Linear_algebra_chapters_overview.py | 1 | 23478 | def cells():
'''
# Linear algebra overview
'''
'''
'''
'''
Linear algebra is the study of **vectors** and **linear transformations**. This notebook introduces concepts form linear algebra in a birds-eye overview. The goal is not to get into the details, but to give the reader a taste of the different types of thinking: computational, geometrical, and theoretical, that are used in linear algebra.
'''
'''
'''
'''
## Chapters overview
- 1/ Math fundamentals
- 2/ Intro to linear algebra
- Vectors
- Matrices
- Matrix-vector product representation of linear transformations
- Linear property: $f(a\mathbf{x} + b\mathbf{y}) = af(\mathbf{x}) + bf(\mathbf{y})$
- 3/ Computational linear algebra
- Gauss-Jordan elimination procedure
- Augemnted matrix representaiton of systems of linear equations
- Reduced row echelon form
- Matrix equations
- Matrix operations
- Matrix product
- Determinant
- Matrix inverse
- 4/ Geometrical linear algebra
- Points, lines, and planes
- Projection operation
- Coordinates
- Vector spaces
- Vector space techniques
- 5/ Linear transformations
- Vector functions
- Input and output spaces
- Matrix representation of linear transformations
- Column space and row spaces of matrix representations
- Invertible matrix theorem
- 6/ Theoretical linear algebra
- Eigenvalues and eigenvectors
- Special types of matrices
- Abstract vectors paces
- Abstract inner product spaces
- Gram–Schmidt orthogonalization
- Matrix decompositions
- Linear algebra with complex numbers
- 7/ Applications
- 8/ Probability theory
- 9/ Quantum mechanics
- Notation appendix
'''
'''
'''
# helper code needed for running in colab
if 'google.colab' in str(get_ipython()):
print('Downloading plot_helpers.py to util/ (only neded for colab')
!mkdir util; wget https://raw.githubusercontent.com/minireference/noBSLAnotebooks/master/util/plot_helpers.py -P util
'''
'''
# setup SymPy
from sympy import *
x, y, z, t = symbols('x y z t')
init_printing()
# a vector is a special type of matrix (an n-vector is either a nx1 or a 1xn matrix)
Vector = Matrix # define alias Vector so I don't have to explain this during video
Point = Vector # define alias Point for Vector since they're the same thing
# setup plotting
%matplotlib inline
import matplotlib.pyplot as mpl
from util.plot_helpers import plot_vec, plot_vecs, plot_line, plot_plane, autoscale_arrows
'''
'''
'''
# 1/ Math fundamentals
'''
'''
'''
'''
Linear algebra builds upon high school math concepts like:
- Numbers (integers, rationals, reals, complex numbers)
- Functions ($f(x)$ takes an input $x$ and produces an output $y$)
- Basic rules of algebra
- Geometry (lines, curves, areas, triangles)
- The cartesian plane
'''
'''
'''
'''
'''
'''
# 2/ Intro to linear algebra
Linear algebra is the study of vectors and matrices.
'''
'''
'''
'''
## Vectors
'''
'''
'''
# define two vectors
u = Vector([2,3])
v = Vector([3,0])
u
'''
'''
v
'''
'''
plot_vecs(u, v)
autoscale_arrows()
'''
'''
'''
## Vector operations
'''
'''
'''
'''
- Addition (denoted $\vec{u}+\vec{v}$)
- Subtraction, the inverse of addition (denoted $\vec{u}-\vec{v}$)
- Scaling (denoted $\alpha \vec{u}$)
- Dot product (denoted $\vec{u} \cdot \vec{v}$)
- Cross product (denoted $\vec{u} \times \vec{v}$)
'''
'''
'''
'''
### Vector addition
'''
'''
'''
# algebraic
u+v
'''
'''
# graphical
plot_vecs(u, v)
plot_vec(v, at=u, color='b')
plot_vec(u+v, color='r')
autoscale_arrows()
'''
'''
'''
### Basis
When we describe the vector as the coordinate pair $(4,6)$, we're implicitly using the *standard basis* $B_s = \{ \hat{\imath}, \hat{\jmath} \}$. The vector $\hat{\imath} \equiv (1,0)$ is a unit-length vector in the $x$-direciton,
and $\hat{\jmath} \equiv (0,1)$ is a unit-length vector in the $y$-direction.
To be more precise when referring to vectors, we can indicate the basis as a subscript of every cooridnate vector $\vec{v}=(4,6)_{B_s}$, which tells $\vec{v}= 4\hat{\imath}+6\hat{\jmath}=4(1,0) +6(0,1)$.
'''
'''
'''
# the standard basis
ihat = Vector([1,0])
jhat = Vector([0,1])
v = 4*ihat + 6*jhat
v
'''
'''
# geomtrically...
plot_vecs(ihat, jhat, 4*ihat, 6*jhat, v)
autoscale_arrows()
'''
'''
'''
The same vector $\vec{v}$ will correspond to the a different pair of coefficients if a differebt basis is used.
For example, if we use the basis $B^\prime = \{ (1,1), (1,-1) \}$, the same vector $\vec{v}$ must be expressed as $\vec{v} = 5\vec{b}_1 +(-1)\vec{b}_2=(5,-1)_{B^\prime}$.
'''
'''
'''
# another basis B' = { (1,1), (1,-1) }
b1 = Vector([ 1, 1])
b2 = Vector([ 1, -1])
v = 5*b1 + (-1)*b2
v
# How did I know 5 and -1 are the coefficients w.r.t basis {b1,b2}?
# Matrix([[1,1],[1,-1]]).inv()*Vector([4,6])
'''
'''
# geomtrically...
plot_vecs(b1, b2, 5*b1, -1*b2, v)
autoscale_arrows()
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix operations
'''
'''
'''
'''
- Addition (denoted $A+B$)
- Subtraction, the inverse of addition (denoted $A-B$)
- Scaling by a constant $\alpha$ (denoted $\alpha A$)
- Matrix-vector product (denoted $A\vec{x}$, related to linear transformations)
- Matrix product (denoted $AB$)
- Matrix inverse (denoted $A^{-1}$)
- Trace (denoted $\textrm{Tr}(A)$)
- Determinant (denoted $\textrm{det}(A)$ or $|A|$)
'''
'''
'''
'''
In linear algebra we'll extend the notion of funttion $f:\mathbb{R}\to \mathbb{R}$, to functions that act on vectors called *linear transformations*. We can understand the properties of linear transformations $T$ in analogy with ordinary functions:
\begin{align*}
\textrm{function }
f:\mathbb{R}\to \mathbb{R}
& \ \Leftrightarrow \,
\begin{array}{l}
\textrm{linear transformation }
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m}
\end{array} \\
\textrm{input } x\in \mathbb{R}
& \ \Leftrightarrow \
\textrm{input } \vec{x} \in \mathbb{R}^n \\
\textrm{output } f(x) \in \mathbb{R}
& \ \Leftrightarrow \
\textrm{output } T(\vec{x})\in \mathbb{R}^m \\
g\circ\! f \: (x) = g(f(x))
& \ \Leftrightarrow \
% \textrm{matrix product }
S(T(\vec{x})) \\
\textrm{function inverse } f^{-1}
& \ \Leftrightarrow \
\textrm{inverse transformation } T^{-1} \\
\textrm{zeros of } f
& \ \Leftrightarrow \
\textrm{kernel of } T \\
\textrm{image of } f
& \ \Leftrightarrow \
\begin{array}{l}
\textrm{image of } T
\end{array}
\end{align*}
'''
'''
'''
'''
## Linear property
$$
T(a\mathbf{x}_1 + b\mathbf{x}_2) = aT(\mathbf{x}_1) + bT(\mathbf{x}_2)
$$
'''
'''
'''
'''
## Matrix-vector product representation of linear transformations
'''
'''
'''
'''
Equivalence between linear transformstions $T$ and matrices $M_T$:
$$
T : \mathbb{R}^n \to \mathbb{R}^m
\qquad
\Leftrightarrow
\qquad
M_T \in \mathbb{R}^{m \times n}
$$
$$
\vec{y} = T(\vec{x})
\qquad
\Leftrightarrow
\qquad
\vec{y} = M_T\vec{x}
$$
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
'''
# 3/ Computational linear algebra
'''
'''
'''
'''
## Gauss-Jordan elimination procedure
Suppose you're asked to solve for $x_1$ and $x_2$ in the following system of equations
\begin{align*}
1x_1 + 2x_2 &= 5 \\
3x_1 + 9x_2 &= 21.
\end{align*}
'''
'''
'''
# represent as an augmented matrix
AUG = Matrix([
[1, 2, 5],
[3, 9, 21]])
AUG
'''
'''
# eliminate x_1 in second equation by subtracting 3x times the first equation
AUG[1,:] = AUG[1,:] - 3*AUG[0,:]
AUG
'''
'''
# simplify second equation by dividing by 3
AUG[1,:] = AUG[1,:]/3
AUG
'''
'''
# eliminate x_2 from first equation by subtracting 2x times the second equation
AUG[0,:] = AUG[0,:] - 2*AUG[1,:]
AUG
'''
'''
'''
This augmented matrix is in *reduced row echelon form* (RREF), and corresponds to the system of equations:
\begin{align*}
1x_1 \ \ \qquad &= 1 \\
1x_2 &= 2,
\end{align*}
so the the solution is $x_1=1$ and $x_2=2$.
'''
'''
'''
'''
## Matrix equations
'''
'''
'''
'''
See **page 177** in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix product
'''
'''
'''
a,b,c,d,e,f, g,h,i,j = symbols('a b c d e f g h i j')
A = Matrix([[a,b],
[c,d],
[e,f]])
B = Matrix([[g,h],
[i,j]])
A, B
'''
'''
A*B
'''
'''
def mat_prod(A, B):
"""Compute the matrix product of matrices A and B."""
assert A.cols == B.rows, "Error: matrix dimensions not compatible."
m, ell = A.shape # A is a m x ell matrix
ell, n = B.shape # B is a ell x n matrix
C = zeros(m,n)
for i in range(0,m):
for j in range(0,n):
C[i,j] = A[i,:].dot(B[:,j])
return C
mat_prod(A,B)
'''
'''
# mat_prod(B,A)
'''
'''
'''
## Determinant
'''
'''
'''
a, b, c, d = symbols('a b c d')
A = Matrix([[a,b],
[c,d]])
A.det()
'''
'''
# Consider the parallelogram with sides:
u1 = Vector([3,0])
u2 = Vector([2,2])
plot_vecs(u1,u2)
plot_vec(u1, at=u2, color='k')
plot_vec(u2, at=u1, color='b')
autoscale_arrows()
# What is the area of this parallelogram?
'''
'''
# base = 3, height = 2, so area is 6
'''
'''
# Compute the area of the parallelogram with sides u1 and u2 using the deteminant
A = Matrix([[3,0],
[2,2]])
A.det()
'''
'''
'''
'''
'''
'''
'''
## Matrix inverse
For an invertible matrix $A$, the matrix inverse $A^{-1}$ acts to undo the effects of $A$:
$$
A^{-1} A \vec{v} = \vec{v}.
$$
The effect applying $A$ followed by $A^{-1}$ (or the other way around) is the identity transformation:
$$
A^{-1}A \ = \ \mathbb{1} \ = \ AA^{-1}.
$$
'''
'''
'''
A = Matrix([[1, 2],
[3, 9]])
A
'''
'''
# Compute deteminant to check if inverse matrix exists
A.det()
'''
'''
'''
The deteminant is non-zero so inverse exists.
'''
'''
'''
A.inv()
'''
'''
A.inv()*A
'''
'''
'''
### Adjugate-matrix formula
The *adjugate matrix* of the matrix $A$ is obtained by replacing each entry of the matrix with a partial determinant calculation (called *minors*). The minor $M_{ij}$ is the determinant of $A$ with its $i$th row and $j$th columns removed.
'''
'''
'''
A.adjugate() / A.det()
'''
'''
'''
### Augmented matrix approach
$$
\left[ \, A \, | \, \mathbb{1} \, \right]
\qquad
-\textrm{Gauss-Jordan elimination}\rightarrow
\qquad
\left[ \, \mathbb{1} \, | \, A^{-1} \, \right]
$$
'''
'''
'''
AUG = A.row_join(eye(2))
AUG
'''
'''
# perform row operations until left side of AUG is in RREF
AUG[1,:] = AUG[1,:] - 3*AUG[0,:]
AUG[1,:] = AUG[1,:]/3
AUG[0,:] = AUG[0,:] - 2*AUG[1,:]
AUG
'''
'''
# the inverse of A is in the right side of RREF(AUG)
AUG[:,2:5] # == A-inverse
'''
'''
# verify A times A-inverse gives the identity matrix...
A*AUG[:,2:5]
'''
'''
'''
### Using elementary matrices
Each row operation $\mathcal{R}_i$ can be represented as an elementary matrix $E_i$. The elementary matrix of a given row operation is obtained by performing the row operation on the identity matrix.
'''
'''
'''
E1 = eye(2)
E1[1,:] = E1[1,:] - 3*E1[0,:]
E2 = eye(2)
E2[1,:] = E2[1,:]/3
E3 = eye(2)
E3[0,:] = E3[0,:] - 2*E3[1,:]
E1, E2, E3
'''
'''
# the sequence of three row operations transforms the matrix A into RREF
E3*E2*E1*A
'''
'''
'''
Recall definition $A^{-1}A=\mathbb{1}$, and we just observed that $E_3E_2E_1 A =\mathbb{1}$, so it must be that $A^{-1}=E_3E_2E_1$.
'''
'''
'''
E3*E2*E1
'''
'''
'''
'''
'''
'''
'''
# 4/ Geometrical linear algebra
Points, lines, and planes are geometrical objects that are conveniently expressed using the language of vectors.
'''
'''
'''
'''
## Points
A point $p=(p_x,p_y,p_z)$ refers to a single location in $\mathbb{R}^3$.
'''
'''
'''
p = Point([2,4,5])
p
'''
'''
'''
## Lines
A line is a one dimensional infinite subset of $\mathbb{R}^3$ that can be described as
$$
\ell: \{ p_o + \alpha \vec{v} \ | \ \forall \alpha \in \mathbb{R} \}.
$$
'''
'''
'''
po = Point([1,1,1])
v = Vector([1,1,0])
plot_line(v, po)
'''
'''
'''
## Planes
A plane is a two-dimensional infinite subset of $\mathbb{R}^3$ that can be described in one of three ways:
The *general equation*:
$$
P: \left\{ \, Ax+By+Cz=D \, \right\}
$$
The *parametric equation*:
$$
P: \{ p_{\textrm{o}}+s\,\vec{v} + t\,\vec{w}, \ \forall s,t \in \mathbb{R} \},
$$
which defines a plane that that contains the point $p_{\textrm{o}}$ and the vectors $\vec{v}$ and $\vec{w}$.
Or the *geometric equation*:
$$
P: \left\{ \vec{n} \cdot [ (x,y,z) - p_{\textrm{o}} ] = 0 \,\right\},
$$
which defines a plane that contains point $p_{\textrm{o}}$ and has normal vector $\hat{n}$.
'''
'''
'''
# plot plane 2x + 1y + 1z = 5
normal = Vector([2, 1, 1])
D = 5
plot_plane(normal, D)
'''
'''
'''
'''
'''
'''
'''
## Projection operation
'''
'''
'''
'''
A projection of the vector $\vec{v}$ in the direction $\vec{d}$ is denoted $\Pi_{\vec{d}}(\vec{v})$. The formula for computing the projections uses the dot product operation:
$$
\Pi_{\vec{d}}(\vec{v})
\ \equiv \
(\vec{v} \cdot \hat{d}) \hat{d}
\ = \
\left(\vec{v} \cdot \frac{\vec{d}}{\|\vec{d}\|} \right) \frac{\vec{d}}{\|\vec{d}\|}.
$$
'''
'''
'''
def proj(v, d):
"""Computes the projection of vector `v` onto direction `d`."""
return v.dot( d/d.norm() )*( d/d.norm() )
'''
'''
v = Vector([2,2])
d = Vector([3,0])
proj_v_on_d = proj(v,d)
plot_vecs(d, v, proj_v_on_d)
autoscale_arrows()
'''
'''
'''
The basic projection operation can be used to compute projection onto planes, and compute distances between geomteirc objects (page 192).
'''
'''
'''
'''
## Bases and coordinate projections
'''
'''
'''
'''
See [page 225](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=68) in v2.2 of the book:
- Different types of bases
- Orthonormal
- Orthogonal
- Generic
- Change of basis operation
'''
'''
'''
'''
## Vector spaces
'''
'''
'''
'''
See **page 231** in v2.2 of the book.
'''
'''
'''
'''
## Vector space techniques
'''
'''
'''
'''
See **page 244** in the book.
'''
'''
'''
'''
'''
'''
'''
'''
# 5/ Linear transformations
'''
'''
'''
'''
See [page 257](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=70) in v2.2 of the book.
'''
'''
'''
'''
## Vector functions
'''
'''
'''
'''
Functions that take vectors as inputs and produce vectors as outputs:
$$
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m}
$$
'''
'''
'''
'''
## Matrix representation of linear transformations
'''
'''
'''
'''
$$
T : \mathbb{R}^n \to \mathbb{R}^m
\qquad
\Leftrightarrow
\qquad
M_T \in \mathbb{R}^{m \times n}
$$
'''
'''
'''
'''
'''
'''
'''
'''
## Input and output spaces
'''
'''
'''
'''
We can understand the properties of linear transformations $T$, and their matrix representations $M_T$ in analogy with ordinary functions:
\begin{align*}
\textrm{function }
f:\mathbb{R}\to \mathbb{R}
& \ \Leftrightarrow \,
\begin{array}{l}
\textrm{linear transformation }
T:\mathbb{R}^{n}\! \to \mathbb{R}^{m} \\
\textrm{represented by the matrix } M_T \in \mathbb{R}^{m \times n}
\end{array} \\
%
\textrm{input } x\in \mathbb{R}
& \ \Leftrightarrow \
\textrm{input } \vec{x} \in \mathbb{R}^n \\
%\textrm{compute }
\textrm{output } f(x) \in \mathbb{R}
& \ \Leftrightarrow \
% \textrm{compute matrix-vector product }
\textrm{output } T(\vec{x}) \equiv M_T\vec{x} \in \mathbb{R}^m \\
%\textrm{function composition }
g\circ\! f \: (x) = g(f(x))
& \ \Leftrightarrow \
% \textrm{matrix product }
S(T(\vec{x})) \equiv M_SM_T \vec{x} \\
\textrm{function inverse } f^{-1}
& \ \Leftrightarrow \
\textrm{matrix inverse } M_T^{-1} \\
\textrm{zeros of } f
& \ \Leftrightarrow \
\textrm{kernel of } T \equiv \textrm{null space of } M_T \equiv \mathcal{N}(A) \\
\textrm{image of } f
& \ \Leftrightarrow \
\begin{array}{l}
\textrm{image of } T \equiv \textrm{column space of } M_T \equiv \mathcal{C}(A)
\end{array}
\end{align*}
Observe we refer to the linear transformation $T$ and its matrix representation $M_T$ interchangeably.
'''
'''
'''
'''
## Finding matrix representations
'''
'''
'''
'''
See [page 269](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=74) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Invertible matrix theorem
See [page 288](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=78) in the book.
'''
'''
'''
'''
'''
'''
'''
'''
# 6/ Theoretical linear algebra
'''
'''
'''
'''
## Eigenvalues and eigenvectors
An eigenvector of the matirx $A$ is a special input vector, for which the matrix $A$ acts as a scaling:
$$
A\vec{e}_\lambda = \lambda\vec{e}_\lambda,
$$
where $\lambda$ is called the *eigenvalue* and $\vec{e}_\lambda$ is the corresponding eigenvector.
'''
'''
'''
A = Matrix([[1, 5],
[5, 1]])
A
'''
'''
A*Vector([1,0])
'''
'''
A*Vector([1,1])
'''
'''
'''
The *characterisitic polynomial* of the matrix $A$ is defined as
$$
p(\lambda) \equiv \det(A-\lambda \mathbb{1}).
$$
'''
'''
'''
l = symbols('lambda')
(A-l*eye(2)).det()
'''
'''
# the roots of the characteristic polynomial are the eigenvalues of A
solve( (A-l*eye(2)).det(), l)
'''
'''
# or call `eigenvals` method
A.eigenvals()
'''
'''
A.eigenvects()
# can also find eigenvects using (A-6*eye(2)).nullspace() and (A+4*eye(2)).nullspace()
'''
'''
Q, Lambda = A.diagonalize()
Q, Lambda
'''
'''
Q*Lambda*Q.inv() # == eigendecomposition of A
'''
'''
'''
'''
'''
'''
'''
## Special types of matrices
'''
'''
'''
'''
See [page 312](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=83) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Abstract vectors paces
'''
'''
'''
'''
Generalize vector techniques to other vector like quantities. Allow us to talk about basis, dimention, etc.
See [page 318](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=84) in the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Abstract inner product spaces
'''
'''
'''
'''
Use geometrical notions like length and orthogonaloty for abstract vectors.
See **page 322** in the book.
'''
'''
'''
'''
'''
'''
'''
'''
## Gram–Schmidt orthogonalization
'''
'''
'''
'''
See **page 328**.
'''
'''
'''
'''
'''
'''
'''
'''
## Matrix decompositions
'''
'''
'''
'''
See **page 332**.
'''
'''
'''
'''
'''
'''
'''
'''
## Linear algebra with complex numbers
'''
'''
'''
'''
See [page 339](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=88) in v2.2 of the book.
'''
'''
'''
'''
'''
'''
'''
'''
# Applications chapters
'''
'''
'''
'''
- Chapter 7: Applications
- Chapter 8: Probability theory
- Chapter 9: Quantum mechanics
'''
'''
'''
'''
'''
'''
'''
'''
# Notation appendix
'''
'''
'''
'''
Check out [page 571](https://minireference.com/static/excerpts/noBSLA_v2_preview.pdf#page=142) in the book.
'''
'''
'''
'''
'''
'''
'''
| mit | -7,188,144,948,188,234,000 | 16.864536 | 343 | 0.468561 | false |
OpenVolunteeringPlatform/django-ovp-projects | ovp_projects/migrations/0014_apply.py | 1 | 1497 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-15 23:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ovp_projects', '0013_project_roles'),
]
operations = [
migrations.CreateModel(
name='Apply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=30, verbose_name='name')),
('date', models.DateTimeField(auto_now_add=True)),
('canceled', models.BooleanField(default=False, verbose_name='Canceled')),
('canceled_date', models.DateTimeField(blank=True, null=True, verbose_name='Canceled date')),
('email', models.CharField(blank=True, max_length=200, null=True, verbose_name='Email')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ovp_projects.Project')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'applies',
'verbose_name': 'apply',
},
),
]
| agpl-3.0 | 8,839,063,111,466,270,000 | 41.771429 | 141 | 0.606546 | false |
heromod/migrid | user-projects/EpistasisOnGrid/gridepistasisgui.py | 1 | 20836 | import sys
sys.path.append("GUI/")
import epistasisviewer as viewer
import gridepistasis as epistasisControl
import wx
#import time
import os
sys.path.append("RfilesAndscripts/")
import readdata
from threading import Thread
import Configuration.epistasisconfiguration as config
exec_state = "executing"
pending_state = "pending"
finished_state = "finished"
cancelled_state = "cancelled"
class gridepistasisgui:
def __init__(self):
self.gene_selection = set()
self.gene_selection_dict = {}
self.trait_selection = set()
self.trait_selection_dict = {}
self.class_selection = list()
self.all_genes_and_traits = list()
self.data_sheet = []
self.jobs = []
self.epistasis_status = pending_state
self.epistasis_thread = Thread()
def popup_box(self,comment, caption_title=" "):
dlg = wx.MessageDialog(frame_1,
message=comment,
caption=caption_title,
style=wx.OK|wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
def yesno_box(self,comment, caption_title=" "):
dlg = wx.MessageDialog(frame_1,
message=comment,
caption=caption_title,
style=wx.YES_DEFAULT|wx.ICON_INFORMATION
)
choice = dlg.ShowModal()
print choice
dlg.Destroy()
#def load_selection_var_list(self, selection_variables):
#sel_var = frame_1.selection_variable_list.GetSelection()
#value_list = self.data_sheet[sel_var]
#values = list(set(value_list))
## all values are either float or string
#if type(values[0]) == float:
#values = filter(lambda x : str(x) not in ["nan","-99.0"], values)
#elif type(values[0]) == str:
#values = filter(lambda x : x.strip() not in ["?"], values)
#values.sort()
#def clean(v):
#if type(v) == type(1.0) and v % 1 == 0.0:
#v = int(v)
#str_v = str(v).strip()
#return str_v
#values = map(clean,values)
#frame_1.start_class.SetItems(values)
#frame_1.start_class.SetSelection(0)
#frame_1.end_class.SetItems(values)
#frame_1.end_class.SetSelection(len(values)-1)
#while(self.class_selection != []):
#self.class_selection.pop(0)
#self.class_selection.extend(values)
############################
######## GENE SELECTOR TAB###########
#############################
def read_data_sheet(self):
datafile = frame_1.datafile.GetValue()
if not os.path.exists(datafile):
self.popup_box("Can't find "+datafile, "Can't find "+datafile)
return
#print all_genes_and_traits
#data_sheet.update(readdata.read_data(datafile))
data_list, column_labels = readdata.read_data(datafile)
self.data_sheet.extend(data_list)
#column_labels = data_sheet.keys()
self.all_genes_and_traits.extend(column_labels)
#print "all", all_genes_and_traits
#all_genes_and_traits.sort()
frame_1.gene_list.Set(self.all_genes_and_traits)
frame_1.trait_list.Set(self.all_genes_and_traits)
# assume that the selection variable is in first columns
frame_1.selection_variable_list.SetItems(self.all_genes_and_traits[0:20])
#frame_1.selection_variable_list.Select(1)
##### BUTTONS ############
def update_selected_genes(self):
frame_1.selected_genes.Set(list(self.gene_selection))
def on_add_genes(self,event=None):
indexes = frame_1.gene_list.GetSelections()
#indexes = list(index)
#print indexes, all_genes_and_traits
#frame_1.selected_genes.InsertItems(index, 0)
for i in indexes:
# gene_name = all_genes_and_traits[i]
#if not gene_name in gene_selection:
#gene_selection.append(all_genes_and_traits[i])
gene_name = self.all_genes_and_traits[i]
self.gene_selection.add(gene_name)
if not self.gene_selection_dict.has_key(gene_name):
self.gene_selection_dict[gene_name] = i
self.update_selected_genes()
def on_remove_genes(self,event=None):
indexes = list(frame_1.selected_genes.GetSelections())
indexes.reverse()
#print indexes
for i in indexes:
gene_name = list(self.gene_selection)[i] # list converts from set to list
self.gene_selection.remove(gene_name)
del(self.gene_selection_dict[gene_name])
#gene_selection.remove(genes)
self.update_selected_genes()
###########################
######## TRAIT SELECTOR TAB###########
##########################
##### BUTTONS ############
def update_selected_traits(self):
frame_1.selected_traits.Set(list(self.trait_selection))
def on_add_traits(self,event=None):
indexes = frame_1.trait_list.GetSelections()
#indexes = list(index)
#frame_1.selected_genes.InsertItems(index, 0)
for i in indexes:
trait_name = self.all_genes_and_traits[i]
#if not gene_name in gene_selection:
#gene_selection.append(all_genes_and_traits[i])
self.trait_selection.add(trait_name)
if not self.trait_selection_dict.has_key(trait_name):
self.trait_selection_dict[trait_name] = i
self.update_selected_traits()
def on_remove_traits(self,event=None):
indexes = list(frame_1.selected_traits.GetSelections())
indexes.reverse()
for i in indexes:
trait_name = list(self.trait_selection)[i] # list converts set
self.trait_selection.remove(trait_name)
del(self.trait_selection_dict[trait_name])
#gene_selection.remove(genes)
self.update_selected_traits()
##########################
#### GENERAL TAB ############
#########################
def validateInput(self):
try:
g1 = int(frame_1.g1.GetValue())
g2 = int(frame_1.g2.GetValue())
t1 = int(frame_1.t1.GetValue())
t2 = int(frame_1.t2.GetValue())
# sv = int(frame_1.sv.GetValue())
#c1 = int(frame_1.c1.GetValue())
#c2 = int(frame_1.c2.GetValue())
except ValueError:
return False, "Index values must be integers"
#if type(g1) != type(1) and type(g2) != type(1):
# return False, "Genes indexes must be integers"
#if type(t1) != type(1) and type(t2) != type(1):
# return False, "Trait indexes must be integers"
#if type(sv) != type(1) :
# return False, "Selection variable index must be an integer"
datafile = frame_1.datafile.GetValue()
outputdir = frame_1.outputdir.GetValue()
#type(sv) != type(1)
if not os.path.exists(datafile):
return False, "Can't find data file : "+datafile
if not os.path.exists(outputdir):
return False, "Can't find output directory : "+outputdir
if frame_1.selection_variable_list.GetSelection() == -1:
return False, "Choose a selection variable."
return True, "OK"
##### START/ STOP #############
def start(self):
#collect values
datafile = frame_1.datafile.GetValue()
outputdir = frame_1.outputdir.GetValue()
local_mode = frame_1.runlocal.GetValue()
print local_mode
#selected_genes = list(frame_1.selected_genes.GetSelections())
if outputdir[-1] != "/":
outputdir += "/"
if frame_1.use_indexes.GetValue():
g1 = int(frame_1.g1.GetValue())
g2 = int(frame_1.g2.GetValue())
t1 = int(frame_1.t1.GetValue())
t2 = int(frame_1.t2.GetValue())
#sv = frame_1.sv.GetValue()
#c1 = frame_1.c1.GetValue()
#c2 = frame_1.c2.GetValue()
genes = range(g1,g2+1)
traits = range(t1,t2+1)
#, traits = readdata.get_by_index(datafile,g1,g2,t1,t2)
else:
genes = self.gene_selection_dict.values()
traits = self.trait_selection_dict.values()
list_pos = frame_1.selection_variable_list.GetSelection()+1 # indexes start at 1 in R
selection_variable = list_pos
i = frame_1.start_class.GetSelection()
j = frame_1.end_class.GetSelection()
selection_variable_values= self.class_selection[i:j+1]
self.epistasis_thread = My_epistasis_thread(genelist=genes, traitlist=traits, selection_variable=selection_variable, selection_variable_values=selection_variable_values, data=datafile, output_dir=outputdir, local_mode=local_mode)
# genelist,traitlist,selection_variable, selection_variable_values,local_mode,data,output_dir
frame_1.statusfeed.write("Creating %i jobs..." % len(selection_variable_values))
self.epistasis_thread.start()
#jobs = model.start_epistasis(c1,c2,g1,g2,t1,t2,sv,datafile,outputdir,local_mode)
#self.jobs = self.epistasis_thread.start_epistasis(genelist=genes,traitlist=traits,selection_variable=selection_variable, selection_variable_values=selection_variable_values,local_mode=local_mode,data=datafile,output_dir=outputdir)
#model.epistasis_status=exec_state
self.epistasis_status = exec_state
frame_1.timer.Start(milliseconds=2000) # start the timer for 2 sec
def stop(self):
self.epistasis_thread.stop()
#model.clean_up_epistasis()
#model.__init__()
self.EnableControls(True)
#frame_1.timer.Stop()
#self.update_gui()
#self.epistasis_thread.join()
def finish(self):
self.epistasis_thread.join()
def post_commands(self):
post_exec_str = frame_1.post_exec_cmds.GetValue()
post_exec_commands = post_exec_str.split(";\n")
for cmd in post_exec_commands:
try:
proc = os.popen(cmd, "w")
proc.close()
except OSError:
print "Unable to execute command :"+cmd
def final(self):
#model.clean_up_epistasis()
self.post_commands()
def EnableControls(self,enable):
frame_1.datafile.Enable(enable)
#frame_1.g1.Enable(enable)
#frame_1.g2.Enable(enable)
#frame_1.t1.Enable(enable)
#frame_1.t2.Enable(enable)
#frame_1.sv.Enable(enable)
frame_1.datafile.Enable(enable)
frame_1.outputdir.Enable(enable)
frame_1.button_1.Enable(enable)
frame_1.button_2.Enable(enable)
frame_1.Start.Enable(enable)
frame_1.Stop.Enable(enable)
frame_1.runlocal.Enable(enable)
frame_1.use_indexes.Enable(enable)
#frame_1.c1.Enable(enable)
#frame_1.c2.Enable(enable)
def update_gui(self):
if self.epistasis_status == pending_state: # if the grid jobs havent been started, do nothing
return
running_jobs = self.epistasis_thread.jobs
finished_jobs = self.epistasis_thread.finished_jobs
all_jobs = []
all_jobs.extend(running_jobs)
all_jobs.extend(finished_jobs)
if all_jobs == []: # jobs not ready yet
return
if len(all_jobs) > 0 and len(all_jobs) == len(finished_jobs) :
self.epistasis_status = finished_state
progress_str = str(len(finished_jobs)) + '/'\
+ str(len(all_jobs))
status_lines = self.create_gui_job_text(all_jobs)
status = ""
for line in status_lines:
status += line + '\n'
frame_1.statusfeed.Clear()
frame_1.statusfeed.write(status)
frame_1.progress.Clear()
frame_1.progress.write(progress_str)
def create_gui_job_text(self,jobs):
"""Return a status string for each job"""
lines = []
for j in jobs:
line = 'Grid Epistasis Job \t %(class)s \t %(status)s \t %(started)s' % j
lines.append(line)
return lines
##### BUTTONS ############
# event handlers
def OnBtnStart(self,event=None):
valid, comment = self.validateInput()
if not valid:
self.popup_box(comment, "Incorret input")
return
#model.epistasis_status = pending_state
self.epistasis_status = pending_state
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Starting epistasis...")
self.EnableControls(False)
frame_1.Stop.Enable(True)
TIMER_ID = 100 # pick a number
shorttime= 100
frame_1.timer = wx.Timer(frame_1, TIMER_ID) # message will be sent to the panel
frame_1.timer.Start(shorttime)
def OnBtnStop(self,event=None):
if self.epistasis_status == exec_state:
self.epistasis_status = cancelled_state
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Stopping epistasis...")
def OnBtnBrowseFile(self,event=None):
path = os.curdir
fd = wx.FileDialog(frame_1, message="Choose file")
fd.ShowModal()
fd.Destroy()
frame_1.datafile.Clear()
frame_1.datafile.write(fd.GetPath())
#self.read_data_sheet()
def on_load_button(self, event=None):
self.read_data_sheet()
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("File loaded.")
epi_gui.EnableControls(True)
def OnBtnBrowseDir(self,event=None):
path = frame_1.outputdir.GetValue()
if path == "":
path = os.curdir
dd = wx.DirDialog(frame_1, message="Choose dir", defaultPath=path)
dd.ShowModal()
dd.Destroy()
frame_1.outputdir.Clear()
frame_1.outputdir.write(dd.GetPath())
def OnMenuQuit(self,event=None):
if self.epistasis_thread.is_alive():
self.epistasis_thread.stop()
self.epistasis_thread.join()
frame_1.Destroy()
def on_use_indexes(self,event=None):
value = frame_1.use_indexes.GetValue()
frame_1.gene_index1_label.Enable(value)
frame_1.gene_index2_label.Enable(value)
frame_1.trait_index1_label.Enable(value)
frame_1.trait_index2_label.Enable(value)
frame_1.g1.Enable(value)
frame_1.g2.Enable(value)
frame_1.t1.Enable(value)
frame_1.t2.Enable(value)
def on_choice(self,event=None):
sel_var = frame_1.selection_variable_list.GetSelection()
value_list = self.data_sheet[sel_var]
values = list(set(value_list))
# all values are either float or string
if type(values[0]) == float:
values = filter(lambda x : str(x) not in ["nan","-99.0"], values)
elif type(values[0]) == str:
values = filter(lambda x : x.strip() not in ["?"], values)
values.sort()
def clean(v):
if type(v) == type(1.0) and v % 1 == 0.0:
v = int(v)
str_v = str(v).strip()
return str_v
values = map(clean,values)
frame_1.start_class.SetItems(values)
frame_1.start_class.SetSelection(0)
frame_1.end_class.SetItems(values)
frame_1.end_class.SetSelection(len(values)-1)
while(self.class_selection != []):
self.class_selection.pop(0)
self.class_selection.extend(values)
def bindViewerEvents(self):
frame_1.button_1.Bind(wx.EVT_BUTTON, self.OnBtnBrowseFile)
frame_1.load_data_button.Bind(wx.EVT_BUTTON, self.on_load_button)
frame_1.button_2.Bind(wx.EVT_BUTTON, self.OnBtnBrowseDir)
frame_1.Start.Bind(wx.EVT_BUTTON, self.OnBtnStart)
frame_1.Stop.Bind(wx.EVT_BUTTON, self.OnBtnStop)
frame_1.add_genes.Bind(wx.EVT_BUTTON,self.on_add_genes)
frame_1.remove_genes.Bind(wx.EVT_BUTTON, self.on_remove_genes)
frame_1.add_traits.Bind(wx.EVT_BUTTON,self.on_add_traits)
frame_1.remove_traits.Bind(wx.EVT_BUTTON, self.on_remove_traits)
#frame_1.notebook_1_pane_2.Bind(wx.EVT_BUTTON,on_gene_selector_tab)
#frame_1.notebook_1_pane_2.Bind(wx.EVT_KEY_DOWN,on_gene_selector_tab)
frame_1.use_indexes.Bind(wx.EVT_CHECKBOX, self.on_use_indexes)
frame_1.selection_variable_list.Bind(wx.EVT_CHOICE,self.on_choice)
frame_1.Bind(wx.EVT_MENU, self.OnMenuQuit)
frame_1.Bind(wx.EVT_CLOSE, self.OnMenuQuit)
frame_1.Bind(wx.EVT_TIMER, self.OnTimer)
def OnTimer(self,event=None):
#print "timer event. status "+self.epistasis_status, exec_state, self.epistasis_status == exec_state
self.update_gui()
#print "restarting timer"
frame_1.timer.Start(milliseconds=config.gui_update_timer)
if self.epistasis_status == pending_state:
self.start()
#elif self.epistasis_status == exec_state:
#print "restarting timer"
#frame_1.timer.Start(milliseconds=config.gui_update_timer)
elif self.epistasis_status == finished_state:
frame_1.timer.Stop()
self.finish()
self.popup_box('Result files are in your output directory.', 'Epistasis complete')
self.final()
self.EnableControls(True)
elif self.epistasis_status == cancelled_state:
self.stop()
frame_1.timer.Stop()
self.final()
self.update_gui()
class My_epistasis_thread(Thread):
def __init__(self, genelist, traitlist, selection_variable, selection_variable_values, data, output_dir, local_mode):
Thread.__init__(self)
self.genelist = genelist
self.traitlist = traitlist
self.selection_variable = selection_variable
self.selection_variable_values = selection_variable_values
self.data = data
self.output_dir = output_dir
self.status = ""
self.progress = ""
self.cancel_jobs = False
self.jobs = []
self.finished_jobs = []
self.local_mode = local_mode
def run(self):
import time
self.jobs = epistasisControl.start_epistasis(self.selection_variable_values, self.genelist,self.traitlist, self.selection_variable, self.data, self.output_dir, local_mode=self.local_mode)
total_jobs = len(self.jobs)
time.sleep(5)
while True:
print "Updating"
self.jobs = epistasisControl.update_epistasis(self.jobs)
for j in self.jobs:
if j["status"] == "FINISHED":
epistasisControl.download_output(j)
self.jobs.remove(j)
self.finished_jobs.append(j)
if self.cancel_jobs: # Stop
epistasisControl.stop_epistasis(self.jobs)
self.jobs = epistasisControl.update_epistasis(self.jobs)
#self.update_epistasis()
break
if total_jobs == len(self.finished_jobs): # we're finished
break
time.sleep(config.polling_update_timer)
print "Thread exiting"
def stop(self):
self.cancel_jobs = True
# frame_1.runlocal.Bind(wx.EVT_CHECKBOX, OnLocal)
if __name__ == '__main__':
epi_gui = gridepistasisgui()
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = viewer.MyEpiFrame(None, -1, "")
# dissable all controls except file browser
epi_gui.EnableControls(False)
frame_1.datafile.Enable(True)
frame_1.button_1.Enable(True)
frame_1.statusfeed.Clear()
frame_1.statusfeed.write("Load a data file to get started.")
#model = EpiModel.GridEpistasis()
#read_genes()
#epi_gui.read_data_sheet()
app.SetTopWindow(frame_1)
epi_gui.bindViewerEvents()
frame_1.Show()
app.MainLoop()
| gpl-2.0 | 93,554,748,704,978,140 | 35.618629 | 239 | 0.566376 | false |
gandalf221553/CodeSection | compilare/compilatore.py | 1 | 6485 | def RowChanger(row,textToSearch,textToReplace,fileToSearch):
a=1
import fileinput
tempFile = open( fileToSearch, 'r+' )
for line in fileinput.input( fileToSearch ):
if row in line :
print('done yet')
a=0
if a:
if textToReplace=="0":
textToReplace = textToSearch+"\n"+row
#fileToSearch = 'D:\dummy1.txt'
tempFile = open( fileToSearch, 'r+' )
for line in fileinput.input( fileToSearch ):
if textToSearch in line :
print('done now')
tempFile.write(line.replace(textToSearch,textToReplace))
tempFile.close()
#http://pythoncentral.io/pyinstaller-package-python-applications-windows-mac-linux/
def ModSpec():
print("modddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
import os
print(os.path.basename(os.path.dirname(os.path.realpath(__file__))))
#nome=os.path.basename(os.path.dirname(os.path.realpath(__file__)))
nome="kivy_matplotlib"
icon=1
onefile=0
executive=0
vuoimettereunimmagine=0
altrecartelle=0
versionfile=0
nomepy=nome+".py"
nomespec=nome+".spec"
nomecart="\\"+nome+"\\"
nomeIcon="icon.ico"
import platform
#windowsonly="" if StringCnf(platform.system(),"Windows") else windowsonly=" -m "
from calcolatrice.misuras import StringCnf
if StringCnf(platform.system(),"Windows"):
windowsonly=" -m "
else:
windowsonly=""
if onefile:
vuoifareunfile=" --onefile"
else:
vuoifareunfile=""
if vuoimettereunimmagine:
nomeimmagine="logo.png"
else:
nomeimmagine=""
if icon:
iconStr=" --icon "+nomeIcon+" "
else:
iconStr=""
#compilatore
a=""#"\\"+os.getcwd()
posizione=a+nomepy
if versionfile:
versionfile=" --version-file=version.txt "
else:
versionfile=""
pythonpath="!python "#"C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\python-3.5.2.amd64\\Scripts\\pyinstaller.exe "
#pythonpath="path='"+a+"'"
#pythonpath= "C:\Users\Von Braun\Downloads\WinPython-64bit-3.5.2.3Qt5\python-3.5.2.amd64\python.exe "
pyinstallerpath="PyInstaller "
#pyinstallerpath="C:\Users\Von Braun\Downloads\WinPython-64bit-3.5.2.3Qt5\python-3.5.2.amd64\Lib\site-packages\PyInstaller\building\makespec.py "
#http://stackoverflow.com/questions/8663046/how-to-install-a-python-package-from-within-ipython
#%%!python -m PyInstaller --onefile --name nome --icon icon.ico kivy_matplotlib.py
print("\n\n ATTENDI.....POTRESTI DOVER ASPETTARE MOLTO TEMPO\n\n")
creaspecfile=pythonpath+windowsonly+pyinstallerpath+posizione+vuoifareunfile+" --windowed "+" --name "+nome+iconStr+versionfile
print(creaspecfile)
print("\n\n")
if executive and 0:
#from IPython import get_ipython
#ipython = get_ipython()
#ipython.magic(exec(creaspecfile))
#run(creaspecfile)
#exec(input("inserisci la frase di sopra\n\n"))
import PyInstaller.__main__
specpath="--specpath " +os.getcwd() #/opt/bk/spec
distpath="--distpath " +os.getcwd()+"\\dist" #/opt/bk/dist
workpath="--workpath " +os.getcwd()+"\\build" #/opt/bk/build
print(specpath)
print(distpath)
print(workpath)
#import PyInstaller.utils.cliutils.makespec
#'C:\\Users\\Von Braun\\Google Drive\\mat2pylab\\ProgettoTesi3.86\\hello'
#'C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\settings'
#pathex=['C:\\Users\\Von Braun\\Downloads\\WinPython-64bit-3.5.2.3Qt5\\python-3.5.2.amd64\\Lib\\site-packages\\PyInstaller']
#PyInstaller.__main__.run_makespec([nomepy,pathex])
PyInstaller.__main__.run(["-y", "-w",nomepy])
#exec(creaspecfile)
if 1:
import os.path
esistelospec=os.path.isfile(nomespec)
if esistelospec==0:
from sys import exit
exit()
print("\ncreazione dello spec completata")
#add this lines to the spec fil
#http://stackoverflow.com/questions/17140886/how-to-search-and-replace-text-in-a-file-using-python
print("modifica dello spec in corso\n\n")
import fileinput
riga="from kivy.deps import sdl2, glew"
textToSearch = "# -*- mode: python -*-"
NomeFile = nome+".spec"
#fileToSearch = 'D:\dummy1.txt'
RowChanger(riga,textToSearch,"0",NomeFile)
if altrecartelle:
nuova="Tree('.."+nomecart+"'),"
textToSearch="coll = COLLECT(exe,"
textSub=textToSearch+nuova
RowChanger(nuova,textToSearch,textSub,NomeFile)
#if icona:
# modIcon=" "+"icon='icon.ico',"
# cerca="exe = EXE(pyz,"
# Modificatore(modIcon,cerca,"0",NomeFile)
cerca2="a.datas,"
modText2=" "+"*[Tree(p) for p in (sdl2.dep_bins + glew.dep_bins)],"
RowChanger(modText2,cerca2,"0",NomeFile)
print("spec file completed")
print("modddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd")
#coll = COLLECT(exe, Tree('examples-path\\demo\\touchtracer\\'),
#--onefile
print("\n\nsta per iniziare la compilazione, attendi fino a che non avrà finito, troverai il file exe nella cartella DIST\n")
compilaspecfile=pythonpath+windowsonly+pyinstallerpath+nomespec
print(compilaspecfile)
if executive:
#ipython = get_ipython()
#exec(input("inserisci la frase di sopra\n\n"))
import PyInstaller.__main__
PyInstaller.__main__.run(["-y", "-w","kivy_matplotlib.py"])
#run(exec(creaspecfile))
print("\ncompilation complete")
"""
if args.filenames[0].endswith('.spec'):
spec_file = args.filenames[0]
else:
spec_file = run_makespec(**vars(args))
##############################################################################################
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
a=os.getcwd()
print(a)
#os.chdir("C:\\Users\\Von Braun\\Google Drive\\mat2pylab\\ProgettoTesi4.00")
print(spec_file)
from compilatore import ModSpec
ModSpec()
os.chdir(a)
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
##############################################################################################
run_build(pyi_config, spec_file, **vars(args))
""" | mit | 6,083,077,644,254,006,000 | 38.066265 | 149 | 0.6095 | false |
broadinstitute/herc | herc/async.py | 1 | 2127 | from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from tornado import ioloop
from tornado.concurrent import run_on_executor
from tornado import gen
from functools import wraps
import threading
executors = {
'short': ThreadPoolExecutor(max_workers=8), # For little things to avoid blocking the main thread
'long': ThreadPoolExecutor(max_workers=4), # For longer work, like file I/O
'aurora': ThreadPoolExecutor(max_workers=4), # Exclusively for communicating with Aurora
'docker': ThreadPoolExecutor(max_workers=4) # Exclusively for communicating with Docker
}
def usepool(executor):
"""
Decorator that runs the decorated function asynchronously in the given executor pool whenever it's run.
Anything calling a function decorated with this decorator must be a gen.coroutine.
"""
def dec(func):
@wraps(func)
@gen.coroutine
def inner(*args, **kwargs):
t = Task(executor)
ret = yield t.run(func, *args, **kwargs)
raise gen.Return(ret)
return inner
return dec
class Task:
"""
Class that turns any function into an asynchronous call.
Usage: t = Task( 'executorname' )
result = yield t.run( fn, *args, **kwargs )
Caller must be a gen.coroutine.
"""
def __init__(self, executor):
self.executor = executors[executor]
self.io_loop = ioloop.IOLoop.instance()
@run_on_executor
def run(self, fn, *args, **kwargs):
return fn(*args, **kwargs)
class ThreadedDict(object):
"""A dict of values keyed by thread id."""
def __init__(self, ctor_func):
self.ctor_func = ctor_func
self._thrdict = dict()
def get(self):
"""Get the ID of the current thread. If there exists a value in the dict for that thread, return it.
Otherwise, construct one and return that."""
thrid = threading.get_ident()
try:
return self._thrdict[thrid]
except KeyError:
#No value! Create it.
self._thrdict[thrid] = self.ctor_func()
return self._thrdict[thrid]
| bsd-3-clause | 4,405,503,052,446,012,000 | 31.723077 | 108 | 0.649741 | false |
planetarymike/IDL-Colorbars | IDL_py_test/107_Multihue_Blue3.py | 1 | 8419 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0.0000208612, 0.0000200049, 0.0000198463],
[0.000378464, 0.000334228, 0.000406941],
[0.00109526, 0.000946811, 0.00122278],
[0.00213279, 0.00181849, 0.00245175],
[0.00347066, 0.002929, 0.00409539],
[0.0050945, 0.00426533, 0.00616089],
[0.00699278, 0.00581839, 0.00865833],
[0.00915552, 0.00758153, 0.0115996],
[0.0115736, 0.0095498, 0.0149977],
[0.0142383, 0.0117195, 0.0188666],
[0.0171413, 0.014088, 0.023221],
[0.0202742, 0.0166534, 0.0280761],
[0.0236288, 0.0194145, 0.0334475],
[0.0271967, 0.0223705, 0.0393512],
[0.0309693, 0.0255214, 0.0455235],
[0.0349381, 0.0288675, 0.0516749],
[0.0390942, 0.0324094, 0.05782],
[0.0433178, 0.0361482, 0.0639625],
[0.0474437, 0.0400853, 0.0701057],
[0.0514836, 0.044063, 0.0762523],
[0.0554396, 0.048003, 0.0824045],
[0.059313, 0.0519144, 0.0885641],
[0.0631051, 0.0558004, 0.0947325],
[0.0668168, 0.0596644, 0.100911],
[0.0704486, 0.0635092, 0.107101],
[0.0740011, 0.0673375, 0.113302],
[0.0774745, 0.0711518, 0.119515],
[0.0808688, 0.0749544, 0.125741],
[0.0841838, 0.0787476, 0.131979],
[0.0874195, 0.0825333, 0.138229],
[0.0905753, 0.0863135, 0.144492],
[0.0936507, 0.09009, 0.150766],
[0.0966453, 0.0938646, 0.157052],
[0.0995582, 0.0976388, 0.163349],
[0.102389, 0.101414, 0.169655],
[0.105136, 0.105192, 0.175971],
[0.107799, 0.108975, 0.182295],
[0.110376, 0.112762, 0.188626],
[0.112867, 0.116557, 0.194963],
[0.11527, 0.120359, 0.201305],
[0.117584, 0.124171, 0.207651],
[0.119807, 0.127992, 0.213999],
[0.121939, 0.131825, 0.220347],
[0.123977, 0.13567, 0.226695],
[0.12592, 0.139529, 0.23304],
[0.127766, 0.143401, 0.239381],
[0.129514, 0.147289, 0.245717],
[0.131161, 0.151192, 0.252045],
[0.132707, 0.155112, 0.258364],
[0.134148, 0.15905, 0.264671],
[0.135483, 0.163005, 0.270966],
[0.13671, 0.166979, 0.277245],
[0.137826, 0.170972, 0.283507],
[0.138829, 0.174986, 0.28975],
[0.139717, 0.179019, 0.295971],
[0.140486, 0.183073, 0.30217],
[0.141136, 0.187149, 0.308342],
[0.141661, 0.191246, 0.314487],
[0.142061, 0.195364, 0.320603],
[0.142331, 0.199506, 0.326686],
[0.142469, 0.203669, 0.332734],
[0.142471, 0.207856, 0.338747],
[0.142333, 0.212065, 0.34472],
[0.142052, 0.216297, 0.350653],
[0.141623, 0.220552, 0.356542],
[0.141043, 0.22483, 0.362386],
[0.140306, 0.229131, 0.368182],
[0.139408, 0.233456, 0.373929],
[0.138343, 0.237803, 0.379623],
[0.137105, 0.242173, 0.385263],
[0.135688, 0.246565, 0.390847],
[0.134084, 0.25098, 0.396373],
[0.132287, 0.255416, 0.401838],
[0.130288, 0.259875, 0.407241],
[0.128076, 0.264355, 0.41258],
[0.125642, 0.268855, 0.417852],
[0.122973, 0.273377, 0.423056],
[0.120055, 0.277918, 0.428189],
[0.116871, 0.282479, 0.433251],
[0.113404, 0.287059, 0.43824],
[0.109631, 0.291658, 0.443153],
[0.105526, 0.296274, 0.447989],
[0.101057, 0.300907, 0.452747],
[0.0961837, 0.305557, 0.457425],
[0.0908579, 0.310223, 0.462022],
[0.0850158, 0.314904, 0.466536],
[0.0785739, 0.319599, 0.470967],
[0.0714179, 0.324307, 0.475313],
[0.0633846, 0.329029, 0.479573],
[0.0542272, 0.333762, 0.483746],
[0.0435402, 0.338506, 0.487831],
[0.0311184, 0.34326, 0.491827],
[0.0182863, 0.348024, 0.495735],
[0.00518918, 0.352795, 0.499552],
[0., 0.357574, 0.503279],
[0., 0.36236, 0.506915],
[0., 0.367151, 0.51046],
[0., 0.371947, 0.513914],
[0., 0.376746, 0.517276],
[0., 0.381548, 0.520546],
[0., 0.386352, 0.523725],
[0., 0.391156, 0.526812],
[0., 0.39596, 0.529808],
[0., 0.400763, 0.532713],
[0., 0.405563, 0.535528],
[0., 0.410361, 0.538253],
[0., 0.415154, 0.540889],
[0., 0.419943, 0.543436],
[0., 0.424725, 0.545895],
[0., 0.4295, 0.548267],
[0., 0.434268, 0.550553],
[0., 0.439027, 0.552755],
[0., 0.443776, 0.554872],
[0., 0.448515, 0.556907],
[0., 0.453242, 0.558861],
[0., 0.457957, 0.560735],
[0., 0.46266, 0.562531],
[0., 0.467349, 0.56425],
[0., 0.472023, 0.565894],
[0., 0.476682, 0.567464],
[0., 0.481325, 0.568962],
[0., 0.485951, 0.57039],
[0., 0.49056, 0.571751],
[0., 0.495151, 0.573044],
[0., 0.499724, 0.574274],
[0., 0.504277, 0.575442],
[0., 0.50881, 0.576549],
[0., 0.513324, 0.577599],
[0., 0.517816, 0.578593],
[0., 0.522288, 0.579534],
[0., 0.526737, 0.580423],
[0., 0.531165, 0.581263],
[0., 0.535569, 0.582057],
[0., 0.539952, 0.582807],
[0., 0.54431, 0.583516],
[0., 0.548646, 0.584185],
[0., 0.552957, 0.584817],
[0., 0.557244, 0.585415],
[0., 0.561507, 0.585982],
[0., 0.565746, 0.586519],
[0., 0.56996, 0.58703],
[0., 0.574149, 0.587517],
[0., 0.578314, 0.587982],
[0., 0.582453, 0.588429],
[0., 0.586567, 0.58886],
[0., 0.590656, 0.589277],
[0., 0.59472, 0.589684],
[0., 0.59876, 0.590082],
[0., 0.602774, 0.590475],
[0., 0.606763, 0.590865],
[0., 0.610727, 0.591254],
[0., 0.614667, 0.591647],
[0., 0.618582, 0.592044],
[0.00946509, 0.622473, 0.592449],
[0.046316, 0.626339, 0.592864],
[0.0755715, 0.630182, 0.593292],
[0.0986812, 0.634001, 0.593736],
[0.118566, 0.637796, 0.594198],
[0.136422, 0.641568, 0.59468],
[0.152868, 0.645318, 0.595186],
[0.168275, 0.649044, 0.595717],
[0.182879, 0.652749, 0.596277],
[0.196845, 0.656431, 0.596868],
[0.210289, 0.660093, 0.597492],
[0.223301, 0.663733, 0.598151],
[0.235945, 0.667352, 0.598849],
[0.248276, 0.670951, 0.599587],
[0.260333, 0.67453, 0.600369],
[0.272151, 0.67809, 0.601195],
[0.283758, 0.681632, 0.602069],
[0.295175, 0.685154, 0.602993],
[0.306423, 0.688659, 0.603969],
[0.317517, 0.692147, 0.605],
[0.328472, 0.695618, 0.606087],
[0.339298, 0.699073, 0.607232],
[0.350006, 0.702512, 0.608439],
[0.360605, 0.705935, 0.609708],
[0.371102, 0.709345, 0.611042],
[0.381504, 0.712741, 0.612444],
[0.391817, 0.716123, 0.613913],
[0.402046, 0.719493, 0.615454],
[0.412196, 0.722851, 0.617067],
[0.422269, 0.726197, 0.618755],
[0.432271, 0.729533, 0.620519],
[0.442203, 0.732859, 0.622361],
[0.452069, 0.736176, 0.624282],
[0.461871, 0.739484, 0.626285],
[0.471612, 0.742784, 0.62837],
[0.481292, 0.746077, 0.63054],
[0.490915, 0.749364, 0.632795],
[0.500481, 0.752645, 0.635138],
[0.509992, 0.755921, 0.63757],
[0.519448, 0.759193, 0.640091],
[0.528852, 0.762461, 0.642703],
[0.538203, 0.765727, 0.645408],
[0.547503, 0.76899, 0.648207],
[0.556753, 0.772253, 0.6511],
[0.565952, 0.775515, 0.654088],
[0.575102, 0.778778, 0.657173],
[0.584203, 0.782042, 0.660356],
[0.593255, 0.785309, 0.663638],
[0.602258, 0.788578, 0.667018],
[0.611213, 0.791851, 0.670498],
[0.62012, 0.795128, 0.67408],
[0.62898, 0.798411, 0.677762],
[0.637791, 0.8017, 0.681546],
[0.646555, 0.804996, 0.685432],
[0.65527, 0.8083, 0.689421],
[0.663938, 0.811613, 0.693514],
[0.672558, 0.814935, 0.697709],
[0.68113, 0.818268, 0.702009],
[0.689654, 0.821612, 0.706412],
[0.698129, 0.824969, 0.710919],
[0.706557, 0.828338, 0.715529],
[0.714935, 0.831722, 0.720244],
[0.723265, 0.835121, 0.725063],
[0.731545, 0.838535, 0.729985],
[0.739776, 0.841966, 0.735011],
[0.747957, 0.845415, 0.74014],
[0.756089, 0.848882, 0.745371],
[0.76417, 0.852368, 0.750705],
[0.772201, 0.855875, 0.756141],
[0.780181, 0.859403, 0.761678],
[0.788109, 0.862952, 0.767316],
[0.795987, 0.866525, 0.773054],
[0.803812, 0.870122, 0.77889],
[0.811585, 0.873744, 0.784825],
[0.819306, 0.877391, 0.790856],
[0.826974, 0.881065, 0.796984],
[0.834588, 0.884766, 0.803207],
[0.842149, 0.888496, 0.809524],
[0.849656, 0.892256, 0.815933],
[0.857108, 0.896045, 0.822433],
[0.864506, 0.899866, 0.829022],
[0.871849, 0.903719, 0.835699],
[0.879136, 0.907605, 0.842462],
[0.886368, 0.911526, 0.849309],
[0.893543, 0.915481, 0.856238],
[0.900662, 0.919472, 0.863248],
[0.907724, 0.9235, 0.870334],
[0.914728, 0.927566, 0.877496],
[0.921675, 0.931671, 0.88473],
[0.928565, 0.935816, 0.892034],
[0.935395, 0.940002, 0.899403],
[0.942167, 0.944229, 0.906835],
[0.94888, 0.9485, 0.914325],
[0.955533, 0.952815, 0.92187],
[0.962126, 0.957176, 0.929463],
[0.968658, 0.961584, 0.937099],
[0.975129, 0.96604, 0.944771],
[0.981537, 0.970546, 0.952469],
[0.98788, 0.975106, 0.960183],
[0.994157, 0.979721, 0.967894],
[1., 0.984397, 0.975579],
[1., 0.989144, 0.983189],
[1., 0.994001, 0.990529]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 | 2,496,637,532,381,972,500 | 29.726277 | 69 | 0.64295 | false |
huggingface/pytorch-transformers | src/transformers/models/funnel/__init__.py | 1 | 3752 | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _BaseLazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
if is_torch_available():
_import_structure["modeling_funnel"] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"load_tf_weights_in_funnel",
]
if is_tf_available():
_import_structure["modeling_tf_funnel"] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
if is_tokenizers_available():
from .tokenization_funnel_fast import FunnelTokenizerFast
if is_torch_available():
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
load_tf_weights_in_funnel,
)
if is_tf_available():
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
else:
import importlib
import os
import sys
class _LazyModule(_BaseLazyModule):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def _get_module(self, module_name: str):
return importlib.import_module("." + module_name, self.__name__)
sys.modules[__name__] = _LazyModule(__name__, _import_structure)
| apache-2.0 | -220,078,560,576,268,740 | 32.801802 | 115 | 0.668443 | false |
ch1huizong/dj | bookmarks/bookmarks/settings.py | 1 | 4531 | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
from django.urls import reverse_lazy
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-u2yf301#o_1%qs#fytmr$d1$*dzpxo7x-k^is!lyfyix4czs1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'dj.com',
'localhost',
'127.0.0.1',
'e5bd050d.ngrok.io',
]
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'images.apps.ImagesConfig',
'actions.apps.ActionsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'sorl.thumbnail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'bookmarks',
'USER': 'postgres',
'PASSWORD': 'quiet',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
# email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# auth
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.google.GoogleOAuth2',
]
SOCIAL_AUTH_FACEBOOK_KEY = '1888178554562550'
SOCIAL_AUTH_FACEBOOK_SECRET = '9d6b83ebce4890e0890fd1269b7d68a8'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email',]
SOCIAL_AUTH_TWITTER_KEY = ''
SOCIAL_AUTH_TWITTER_SECRET = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '329800332506-nm2hfiuan4usgpit6dohf6u7djeugoqh.apps.googleusercontent.com'
GOOGLE_AUTH_GOOGLE_OAUTH2_SECRET = 'Th6sThj4Xp-NWQ4t0ANncAcS'
# ssl
#SECURE_SSL_REDIRECT = True
# thumbnail
#THUMBNAIL_DEBUG = True
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: reverse_lazy('user_detail', args=[u.username]),
}
# redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
| unlicense | -7,262,526,610,636,818,000 | 24.59887 | 106 | 0.696314 | false |
mitocw/edx-platform | lms/djangoapps/course_api/views.py | 1 | 14554 | """
Course API Views
"""
from django.core.exceptions import ValidationError
from django.core.paginator import InvalidPage
from edx_rest_framework_extensions.paginators import NamespacedPageNumberPagination
from django.http import HttpResponseRedirect
from django.urls import reverse
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.throttling import UserRateThrottle
from rest_framework.exceptions import NotFound
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from . import USE_RATE_LIMIT_2_FOR_COURSE_LIST_API, USE_RATE_LIMIT_10_FOR_COURSE_LIST_API
from .api import course_detail, list_course_keys, list_courses
from .forms import CourseDetailGetForm, CourseIdListGetForm, CourseListGetForm
from .serializers import CourseDetailSerializer, CourseKeySerializer, CourseSerializer
# MIT-OLL : course Id for temporary redirection of a course
BIOLOGY_COURSE_ID = 'course-v1:OCW+Pre-7.01+1T2020'
@view_auth_classes(is_authenticated=False)
class CourseDetailView(DeveloperErrorViewMixin, RetrieveAPIView):
"""
**Use Cases**
Request details for a course
**Example Requests**
GET /api/courses/v1/courses/{course_key}/
**Response Values**
Body consists of the following fields:
* effort: A textual description of the weekly hours of effort expected
in the course.
* end: Date the course ends, in ISO 8601 notation
* enrollment_end: Date enrollment ends, in ISO 8601 notation
* enrollment_start: Date enrollment begins, in ISO 8601 notation
* id: A unique identifier of the course; a serialized representation
of the opaque key identifying the course.
* media: An object that contains named media items. Included here:
* course_image: An image to show for the course. Represented
as an object with the following fields:
* uri: The location of the image
* name: Name of the course
* number: Catalog number of the course
* org: Name of the organization that owns the course
* overview: A possibly verbose HTML textual description of the course.
Note: this field is only included in the Course Detail view, not
the Course List view.
* short_description: A textual description of the course
* start: Date the course begins, in ISO 8601 notation
* start_display: Readably formatted start of the course
* start_type: Hint describing how `start_display` is set. One of:
* `"string"`: manually set by the course author
* `"timestamp"`: generated from the `start` timestamp
* `"empty"`: no start date is specified
* pacing: Course pacing. Possible values: instructor, self
Deprecated fields:
* blocks_url: Used to fetch the course blocks
* course_id: Course key (use 'id' instead)
**Parameters:**
username (optional):
The username of the specified user for whom the course data
is being accessed. The username is not only required if the API is
requested by an Anonymous user.
**Returns**
* 200 on success with above fields.
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the course is not available or cannot be seen.
Example response:
{
"blocks_url": "/api/courses/v1/blocks/?course_id=edX%2Fexample%2F2012_Fall",
"media": {
"course_image": {
"uri": "/c4x/edX/example/asset/just_a_test.jpg",
"name": "Course Image"
}
},
"description": "An example course.",
"end": "2015-09-19T18:00:00Z",
"enrollment_end": "2015-07-15T00:00:00Z",
"enrollment_start": "2015-06-15T00:00:00Z",
"course_id": "edX/example/2012_Fall",
"name": "Example Course",
"number": "example",
"org": "edX",
"overview: "<p>A verbose description of the course.</p>"
"start": "2015-07-17T12:00:00Z",
"start_display": "July 17, 2015",
"start_type": "timestamp",
"pacing": "instructor"
}
"""
serializer_class = CourseDetailSerializer
def get_object(self):
"""
Return the requested course object, if the user has appropriate
permissions.
"""
requested_params = self.request.query_params.copy()
requested_params.update({'course_key': self.kwargs['course_key_string']})
form = CourseDetailGetForm(requested_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return course_detail(
self.request,
form.cleaned_data['username'],
form.cleaned_data['course_key'],
)
class CourseListUserThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the course list API."""
# The course list endpoint is likely being inefficient with how it's querying
# various parts of the code and can take courseware down, it needs to be rate
# limited until optimized. LEARNER-5527
THROTTLE_RATES = {
'user': '20/minute',
'staff': '40/minute',
}
def check_for_switches(self):
if USE_RATE_LIMIT_2_FOR_COURSE_LIST_API.is_enabled():
self.THROTTLE_RATES = {
'user': '2/minute',
'staff': '10/minute',
}
elif USE_RATE_LIMIT_10_FOR_COURSE_LIST_API.is_enabled():
self.THROTTLE_RATES = {
'user': '10/minute',
'staff': '20/minute',
}
def allow_request(self, request, view):
self.check_for_switches()
# Use a special scope for staff to allow for a separate throttle rate
user = request.user
if user.is_authenticated and (user.is_staff or user.is_superuser):
self.scope = 'staff'
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
return super(CourseListUserThrottle, self).allow_request(request, view)
class LazyPageNumberPagination(NamespacedPageNumberPagination):
"""
NamespacedPageNumberPagination that works with a LazySequence queryset.
The paginator cache uses ``@cached_property`` to cache the property values for
count and num_pages. It assumes these won't change, but in the case of a
LazySquence, its count gets updated as we move through it. This class clears
the cached property values before reporting results so they will be recalculated.
"""
def get_paginated_response(self, data):
# Clear the cached property values to recalculate the estimated count from the LazySequence
del self.page.paginator.__dict__['count']
del self.page.paginator.__dict__['num_pages']
# Paginate queryset function is using cached number of pages and sometime after
# deleting from cache when we recalculate number of pages are different and it raises
# EmptyPage error while accessing the previous page link. So we are catching that exception
# and raising 404. For more detail checkout PROD-1222
page_number = self.request.query_params.get(self.page_query_param, 1)
try:
self.page.paginator.validate_number(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=str(exc)
)
self.page.number = self.page.paginator.num_pages
raise NotFound(msg)
return super(LazyPageNumberPagination, self).get_paginated_response(data)
@view_auth_classes(is_authenticated=False)
class CourseListView(DeveloperErrorViewMixin, ListAPIView):
"""
**Use Cases**
Request information on all courses visible to the specified user.
**Example Requests**
GET /api/courses/v1/courses/
**Response Values**
Body comprises a list of objects as returned by `CourseDetailView`.
**Parameters**
search_term (optional):
Search term to filter courses (used by ElasticSearch).
username (optional):
The username of the specified user whose visible courses we
want to see. The username is not required only if the API is
requested by an Anonymous user.
org (optional):
If specified, visible `CourseOverview` objects are filtered
such that only those belonging to the organization with the
provided org code (e.g., "HarvardX") are returned.
Case-insensitive.
**Returns**
* 200 on success, with a list of course discovery objects as returned
by `CourseDetailView`.
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user specifies a username other than their own.
* 404 if the specified user does not exist, or the requesting user does
not have permission to view their courses.
Example response:
[
{
"blocks_url": "/api/courses/v1/blocks/?course_id=edX%2Fexample%2F2012_Fall",
"media": {
"course_image": {
"uri": "/c4x/edX/example/asset/just_a_test.jpg",
"name": "Course Image"
}
},
"description": "An example course.",
"end": "2015-09-19T18:00:00Z",
"enrollment_end": "2015-07-15T00:00:00Z",
"enrollment_start": "2015-06-15T00:00:00Z",
"course_id": "edX/example/2012_Fall",
"name": "Example Course",
"number": "example",
"org": "edX",
"start": "2015-07-17T12:00:00Z",
"start_display": "July 17, 2015",
"start_type": "timestamp"
}
]
"""
class CourseListPageNumberPagination(LazyPageNumberPagination):
max_page_size = 100
pagination_class = CourseListPageNumberPagination
serializer_class = CourseSerializer
throttle_classes = (CourseListUserThrottle,)
def get_queryset(self):
"""
Yield courses visible to the user.
"""
form = CourseListGetForm(self.request.query_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return list_courses(
self.request,
form.cleaned_data['username'],
org=form.cleaned_data['org'],
filter_=form.cleaned_data['filter_'],
search_term=form.cleaned_data['search_term']
)
class CourseIdListUserThrottle(UserRateThrottle):
"""Limit the number of requests users can make to the course list id API."""
THROTTLE_RATES = {
'user': '20/minute',
'staff': '40/minute',
}
def allow_request(self, request, view):
# Use a special scope for staff to allow for a separate throttle rate
user = request.user
if user.is_authenticated and (user.is_staff or user.is_superuser):
self.scope = 'staff'
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
return super(CourseIdListUserThrottle, self).allow_request(request, view)
@view_auth_classes()
class CourseIdListView(DeveloperErrorViewMixin, ListAPIView):
"""
**Use Cases**
Request a list of course IDs for all courses the specified user can
access based on the provided parameters.
**Example Requests**
GET /api/courses/v1/courses_ids/
**Response Values**
Body comprises a list of course ids and pagination details.
**Parameters**
username (optional):
The username of the specified user whose visible courses we
want to see.
role (required):
Course ids are filtered such that only those for which the
user has the specified role are returned. Role can be "staff"
or "instructor".
Case-insensitive.
**Returns**
* 200 on success, with a list of course ids and pagination details
* 400 if an invalid parameter was sent or the username was not provided
for an authenticated request.
* 403 if a user who does not have permission to masquerade as
another user who specifies a username other than their own.
* 404 if the specified user does not exist, or the requesting user does
not have permission to view their courses.
Example response:
{
"results":
[
"course-v1:edX+DemoX+Demo_Course"
],
"pagination": {
"previous": null,
"num_pages": 1,
"next": null,
"count": 1
}
}
"""
class CourseIdListPageNumberPagination(LazyPageNumberPagination):
max_page_size = 1000
pagination_class = CourseIdListPageNumberPagination
serializer_class = CourseKeySerializer
throttle_classes = (CourseIdListUserThrottle,)
def get_queryset(self):
"""
Returns CourseKeys for courses which the user has the provided role.
"""
form = CourseIdListGetForm(self.request.query_params, initial={'requesting_user': self.request.user})
if not form.is_valid():
raise ValidationError(form.errors)
return list_course_keys(
self.request,
form.cleaned_data['username'],
role=form.cleaned_data['role'],
)
def redirect_courses(request):
return HttpResponseRedirect((reverse('about_course', kwargs={'course_id': BIOLOGY_COURSE_ID})))
| agpl-3.0 | -6,341,263,580,877,471,000 | 36.413882 | 109 | 0.616669 | false |
cwolferh/heat-scratch | contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py | 1 | 27413 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests.openstack.nova import fakes
from heat.tests import utils
from ..resources import cloud_server # noqa
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"key_name" : {
"Description" : "key_name",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "OS::Nova::Server",
"Properties": {
"image" : "CentOS 5.2",
"flavor" : "256 MB Server",
"key_name" : "test",
"user_data" : "wordpress"
}
}
}
}
'''
cfg.CONF.import_opt('region_name_for_services', 'heat.common.config')
class CloudServersTest(common.HeatTestCase):
def setUp(self):
super(CloudServersTest, self).setUp()
cfg.CONF.set_override('region_name_for_services', 'RegionOne',
enforce_type=True)
self.ctx = utils.dummy_context()
self.fc = fakes.FakeClient()
mock_nova_create = mock.Mock()
self.ctx.clients.client_plugin(
'nova')._create = mock_nova_create
mock_nova_create.return_value = self.fc
# Test environment may not have pyrax client library installed and if
# pyrax is not installed resource class would not be registered.
# So register resource provider class explicitly for unit testing.
resource._register_class("OS::Nova::Server",
cloud_server.CloudServer)
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
templ = template.Template(
t, env=environment.Environment({'key_name': 'test'}))
self.stack = parser.Stack(self.ctx, stack_name, templ,
stack_id=uuidutils.generate_uuid())
return (templ, self.stack)
def _setup_test_server(self, return_server, name, image_id=None,
override_name=False, stub_create=True):
stack_name = '%s_s' % name
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties'][
'image'] = image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties'][
'flavor'] = '256 MB Server'
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='aaaaaa')
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
server_name = '%s' % name
if override_name:
tmpl.t['Resources']['WebServer']['Properties'][
'name'] = server_name
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer(server_name,
resource_defns['WebServer'],
stack)
self.patchobject(nova.NovaClientPlugin, '_create',
return_value=self.fc)
self.patchobject(server, 'store_external_ports')
if stub_create:
self.patchobject(self.fc.servers, 'create',
return_value=return_server)
# mock check_create_complete innards
self.patchobject(self.fc.servers, 'get',
return_value=return_server)
return server
def _create_test_server(self, return_server, name, override_name=False,
stub_create=True):
server = self._setup_test_server(return_server, name,
stub_create=stub_create)
scheduler.TaskRunner(server.create)()
return server
def _mock_metadata_os_distro(self):
image_data = mock.Mock(metadata={'os_distro': 'centos'})
self.fc.images.get = mock.Mock(return_value=image_data)
def test_rackconnect_deployed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_deployed')
server.context.roles = ['rack_connect']
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
def test_rackconnect_failed(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'FAILED',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_failed')
server.context.roles = ['rack_connect']
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_rackconnect_failed: '
'RackConnect automation FAILED',
six.text_type(exc))
def test_rackconnect_unprocessable(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'UNPROCESSABLE',
'rackconnect_unprocessable_reason': 'Fake reason',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_unprocessable')
server.context.roles = ['rack_connect']
scheduler.TaskRunner(server.create)()
self.assertEqual('CREATE', server.action)
self.assertEqual('COMPLETE', server.status)
def test_rackconnect_unknown(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {
'rackconnect_automation_status': 'FOO',
'rax_service_level_automation': 'Complete',
}
server = self._setup_test_server(return_server,
'test_rackconnect_unknown')
server.context.roles = ['rack_connect']
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_rackconnect_unknown: '
'Unknown RackConnect automation status: FOO',
six.text_type(exc))
def test_rackconnect_deploying(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYING',
'rax_service_level_automation': 'Complete',
})
if check_iterations[0] == 2:
return_server.status = 'ACTIVE'
if check_iterations[0] > 3:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYED',
})
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_rackconnect_no_status(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.status = 'ACTIVE'
if check_iterations[0] > 2:
return_server.metadata.update({
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete'})
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_rax_automation_lifecycle(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
server.metadata = {}
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.status = 'ACTIVE'
if check_iterations[0] == 2:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED'}
if check_iterations[0] == 3:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'In Progress'}
if check_iterations[0] > 3:
return_server.metadata = {
'rackconnect_automation_status': 'DEPLOYED',
'rax_service_level_automation': 'Complete'}
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
scheduler.TaskRunner(server.create)()
self.assertEqual((server.CREATE, server.COMPLETE), server.state)
def test_add_port_for_addresses(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'test_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
resource_defns = tmpl.resource_definitions(stack)
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
server = cloud_server.CloudServer('WebServer',
resource_defns['WebServer'], stack)
self.patchobject(server, 'store_external_ports')
class Interface(object):
def __init__(self, id, addresses):
self.identifier = id
self.addresses = addresses
@property
def id(self):
return self.identifier
@property
def ip_addresses(self):
return self.addresses
interfaces = [
{
"id": "port-uuid-1",
"ip_addresses": [
{
"address": "4.5.6.7",
"network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
"network_label": "public"
},
{
"address": "2001:4802:7805:104:be76:4eff:fe20:2063",
"network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
"network_label": "public"
}
],
"mac_address": "fa:16:3e:8c:22:aa"
},
{
"id": "port-uuid-2",
"ip_addresses": [
{
"address": "5.6.9.8",
"network_id": "11xx1-1xx1-xx11-1xx1-11xxxx11",
"network_label": "public"
}
],
"mac_address": "fa:16:3e:8c:44:cc"
},
{
"id": "port-uuid-3",
"ip_addresses": [
{
"address": "10.13.12.13",
"network_id": "1xx1-1xx1-xx11-1xx1-11xxxx11",
"network_label": "private"
}
],
"mac_address": "fa:16:3e:8c:44:dd"
}
]
ifaces = [Interface(i['id'], i['ip_addresses']) for i in interfaces]
expected = {
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa':
[{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
'addr': '4.5.6.7',
'port': 'port-uuid-1',
'version': 4},
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
'addr': '5.6.9.8',
'port': 'port-uuid-2',
'version': 4}],
'private': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:44:cc',
'addr': '10.13.12.13',
'port': 'port-uuid-3',
'version': 4}],
'public': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
'addr': '4.5.6.7',
'port': 'port-uuid-1',
'version': 4},
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
'addr': '5.6.9.8',
'port': 'port-uuid-2',
'version': 4}]}
server.client = mock.Mock()
mock_client = mock.Mock()
server.client.return_value = mock_client
mock_ext = mock_client.os_virtual_interfacesv2_python_novaclient_ext
mock_ext.list.return_value = ifaces
resp = server._add_port_for_address(return_server)
self.assertEqual(expected, resp)
def test_rax_automation_build_error(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation':
'Build Error'}
server = self._setup_test_server(return_server,
'test_managed_cloud_build_error')
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_build_error: '
'Rackspace Cloud automation failed',
six.text_type(exc))
def test_rax_automation_unknown(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'FOO'}
server = self._setup_test_server(return_server,
'test_managed_cloud_unknown')
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_unknown: '
'Unknown Rackspace Cloud automation status: FOO',
six.text_type(exc))
def _test_server_config_drive(self, user_data, config_drive, result,
ud_format='RAW'):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'no_user_data'
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
(tmpl, stack) = self._setup_test_stack(stack_name)
properties = tmpl.t['Resources']['WebServer']['Properties']
properties['user_data'] = user_data
properties['config_drive'] = config_drive
properties['user_data_format'] = ud_format
properties['software_config_transport'] = "POLL_TEMP_URL"
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer('WebServer',
resource_defns['WebServer'], stack)
server.metadata = {'rax_service_level_automation': 'Complete'}
self.patchobject(server, 'store_external_ports')
self.patchobject(server, "_populate_deployments_metadata")
mock_servers_create = mock.Mock(return_value=return_server)
self.fc.servers.create = mock_servers_create
self.patchobject(self.fc.servers, 'get',
return_value=return_server)
scheduler.TaskRunner(server.create)()
mock_servers_create.assert_called_with(
image=mock.ANY,
flavor=mock.ANY,
key_name=mock.ANY,
name=mock.ANY,
security_groups=mock.ANY,
userdata=mock.ANY,
scheduler_hints=mock.ANY,
meta=mock.ANY,
nics=mock.ANY,
availability_zone=mock.ANY,
block_device_mapping=mock.ANY,
block_device_mapping_v2=mock.ANY,
config_drive=result,
disk_config=mock.ANY,
reservation_id=mock.ANY,
files=mock.ANY,
admin_pass=mock.ANY)
def test_server_user_data_no_config_drive(self):
self._test_server_config_drive("my script", False, True)
def test_server_user_data_config_drive(self):
self._test_server_config_drive("my script", True, True)
def test_server_no_user_data_config_drive(self):
self._test_server_config_drive(None, True, True)
def test_server_no_user_data_no_config_drive(self):
self._test_server_config_drive(None, False, False)
def test_server_no_user_data_software_config(self):
self._test_server_config_drive(None, False, True,
ud_format="SOFTWARE_CONFIG")
@mock.patch.object(resource.Resource, "client_plugin")
@mock.patch.object(resource.Resource, "client")
class CloudServersValidationTests(common.HeatTestCase):
def setUp(self):
super(CloudServersValidationTests, self).setUp()
resource._register_class("OS::Nova::Server", cloud_server.CloudServer)
properties_server = {
"image": "CentOS 5.2",
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
self.mockstack = mock.Mock()
self.mockstack.has_cache_data.return_value = False
self.mockstack.db_resource_get.return_value = None
self.rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
def test_validate_no_image(self, mock_client, mock_plugin):
properties_server = {
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
mock_plugin().find_flavor_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
mock_boot_vol = self.patchobject(
server, '_validate_block_device_mapping')
mock_boot_vol.return_value = True
self.assertIsNone(server.validate())
def test_validate_no_image_bfv(self, mock_client, mock_plugin):
properties_server = {
"flavor": "256 MB Server",
"key_name": "test",
"user_data": "wordpress",
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", cloud_server.CloudServer, properties=properties_server)
mock_plugin().find_flavor_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
mock_boot_vol = self.patchobject(
server, '_validate_block_device_mapping')
mock_boot_vol.return_value = True
mock_flavor = mock.Mock(ram=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server cannot be booted from volume.',
six.text_type(error))
def test_validate_bfv_volume_only(self, mock_client, mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'memory1',
},
}
mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
mock_image.get.return_value = "memory1"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server must be booted from volume, '
'but image CentOS 5.2 was also specified.',
six.text_type(error))
def test_validate_image_flavor_excluded_class(self, mock_client,
mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
mock_image.get.return_value = "!standard1, *"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
error = self.assertRaises(
exception.StackValidationFailed, server.validate)
self.assertEqual(
'Flavor 256 MB Server cannot be used with image CentOS 5.2.',
six.text_type(error))
def test_validate_image_flavor_ok(self, mock_client, mock_plugin):
mock_plugin().find_flavor_by_name_or_id.return_value = 1
mock_plugin().find_image_by_name_or_id.return_value = 1
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = "standard1"
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'class': 'standard1',
'disk_io_index': 1,
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_empty_metadata(self, mock_client,
mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = ""
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {
'OS-FLV-WITH-EXT-SPECS:extra_specs': {
'flavor_classes': '',
},
}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_no_metadata(self, mock_client, mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = None
mock_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.return_value = mock_image
self.assertIsNone(server.validate())
def test_validate_image_flavor_not_base(self, mock_client, mock_plugin):
server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
mock_image.get.return_value = None
mock_image.__iter__ = mock.Mock(return_value=iter(
['base_image_ref']))
mock_image.__getitem__ = mock.Mock(return_value='1234')
mock_base_image = mock.Mock(size=1, status='ACTIVE', min_ram=2,
min_disk=2)
mock_base_image.get.return_value = None
mock_base_image.__iter__ = mock.Mock(return_value=iter([]))
mock_flavor = mock.Mock(ram=4, disk=4)
mock_flavor.to_dict.return_value = {}
mock_plugin().get_flavor.return_value = mock_flavor
mock_plugin().get_image.side_effect = [mock_image, mock_base_image]
self.assertIsNone(server.validate())
| apache-2.0 | -159,389,097,521,806,660 | 40.346908 | 79 | 0.563163 | false |
tomwright01/AO_Registration | example.py | 1 | 1605 | import AoRegistration.AoRecording as AoRecording
import timeit
import logging
import argparse
def main():
"""
"""
logging.info('Reading file:%s','data/sample.avi')
vid = AoRecording.AoRecording(filepath='data/sample.avi')
vid.load_video()
logging.info('Starting parallel processing')
tic=timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align_parallel()
vid.create_average_frame()
vid.create_stdev_frame()
toc = timeit.default_timer()
print 'Parallel Process took {}:'.format(toc-tic)
vid.create_stdev_frame()
logging.info('writing output')
vid.write_video('output/output_parallel.avi')
vid.write_average_frame('output/lucky_average_parallel.png')
vid.write_frame('output/lucky_stdev.png','stdev')
logging.info('Starting serial processing')
tic=timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align()
vid.create_average_frame()
toc = timeit.default_timer()
print 'Serial Process took {}:'.format(toc-tic)
logging.info('writing output')
vid.write_video('output/output_serial.avi')
vid.write_frame('output/lucky_average_serial.png','average')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Register frames from an AO video')
parser.add_argument('-v','--verbose',help='Increase the amount of output', action='store_true')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.info('started')
main() | mit | 5,573,137,991,126,652,000 | 29.301887 | 99 | 0.674766 | false |
rmadapur/networking-brocade | networking_brocade/mlx/ml2/fi_ni/driver_factory.py | 1 | 3345 | # Copyright 2015 Brocade Communications Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Returns the driver based on the firmware version of the device
"""
from neutron.i18n import _LE
from neutron.i18n import _LI
from oslo_log import log as logging
from oslo_utils import importutils
LOG = logging.getLogger(__name__)
CONNECTION_FACTORY = ("networking_brocade.mlx.ml2.connector_factory."
"ConnectorFactory")
FI_DRIVER = "networking_brocade.mlx.ml2.fi_ni.fi_driver.FastIronDriver"
NI_DRIVER = "networking_brocade.mlx.ml2.fi_ni.ni_driver.NetIronDriver"
NETIRON = "NetIron"
FASTIRON = "ICX"
FI = "FI"
NI = "NI"
class BrocadeDriverFactory(object):
"""
Factory class that decides which driver to use based on the
device type. It uses FastIron driver for ICX devices and
NetIron driver for MLX devices
"""
def get_driver(self, device):
"""
Returns the driver based on the firmware.
:param:device: A dictionary which has the device details
:returns: Appropriate driver for the device based on the firmware
version, None otherwise
:raises: Exception
"""
driver = None
address = device.get('address')
os_type = device.get('ostype')
if os_type == FI:
driver = importutils.import_object(FI_DRIVER, device)
elif os_type == NI:
driver = importutils.import_object(NI_DRIVER, device)
else:
connector = importutils.import_object(CONNECTION_FACTORY
).get_connector(device)
connector.connect()
version = connector.get_version()
connector.close_session()
if NETIRON in version:
LOG.info(
_LI("OS Type of the device %(host)s is as NetIron"),
{'host': address})
driver = importutils.import_object(NI_DRIVER, device)
device.update({'ostype': NI})
elif FASTIRON in version:
LOG.info(
_LI("OS Type of the device %(host)s is as FastIron"),
{'host': device.get('address')})
driver = importutils.import_object(FI_DRIVER, device)
device.update({'ostype': FI})
else:
LOG.exception(_LE("Brocade Driver Factory: failed to "
"identify device type for device="
"%(device)s"), {'device': address})
raise Exception("Unsupported firmware %(firmware)s for device "
"%(host)s", {'firmware': version,
'host': address})
return driver
| apache-2.0 | -6,562,487,558,882,334,000 | 37.448276 | 79 | 0.592526 | false |
carragom/modoboa | modoboa/admin/models/domain.py | 1 | 9835 | """Models related to domains management."""
import datetime
from django.db import models
from django.db.models.manager import Manager
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _, ugettext_lazy
from django.contrib.contenttypes.fields import GenericRelation
from reversion import revisions as reversion
from modoboa.core import signals as core_signals
from modoboa.core.models import User, ObjectAccess
from modoboa.lib.exceptions import BadRequest, Conflict
from modoboa.parameters import tools as param_tools
from .. import signals
from .base import AdminObject
from .. import constants
class DomainManager(Manager):
def get_for_admin(self, admin):
"""Return the domains belonging to this admin
The result is a ``QuerySet`` object, so this function can be used
to fill ``ModelChoiceField`` objects.
"""
if admin.is_superuser:
return self.get_queryset()
return self.get_queryset().filter(owners__user=admin)
@python_2_unicode_compatible
class Domain(AdminObject):
"""Mail domain."""
name = models.CharField(ugettext_lazy('name'), max_length=100, unique=True,
help_text=ugettext_lazy("The domain name"))
quota = models.IntegerField()
enabled = models.BooleanField(
ugettext_lazy('enabled'),
help_text=ugettext_lazy("Check to activate this domain"),
default=True
)
owners = GenericRelation(ObjectAccess)
type = models.CharField(default="domain", max_length=20)
enable_dns_checks = models.BooleanField(
ugettext_lazy("Enable DNS checks"), default=True,
help_text=ugettext_lazy("Check to enable DNS checks for this domain")
)
objects = DomainManager()
class Meta:
permissions = (
("view_domain", "View domain"),
("view_domains", "View domains"),
)
ordering = ["name"]
app_label = "admin"
def __init__(self, *args, **kwargs):
"""Save name for further use."""
super(Domain, self).__init__(*args, **kwargs)
self.old_mail_homes = None
self.oldname = self.name
@property
def domainalias_count(self):
return self.domainalias_set.count()
@property
def mailbox_count(self):
return self.mailbox_set.count()
@property
def mbalias_count(self):
return self.alias_set.filter(internal=False).count()
@property
def identities_count(self):
"""Total number of identities in this domain."""
return (
self.mailbox_set.count() +
self.alias_set.filter(internal=False).count())
@property
def tags(self):
if self.type == "domain":
return [{"name": "domain", "label": _("Domain"), "type": "dom"}]
results = signals.get_domain_tags.send(
sender=self.__class__, domain=self)
return reduce(lambda a, b: a + b, [result[1] for result in results])
@property
def admins(self):
"""Return the domain administrators of this domain.
:return: a list of User objects
"""
return User.objects.filter(
is_superuser=False,
objectaccess__content_type__model="domain",
objectaccess__object_id=self.pk)
@property
def aliases(self):
return self.domainalias_set
@property
def uses_a_reserved_tld(self):
"""Does this domain use a reserved TLD."""
tld = self.name.split(".", 1)[-1]
return tld in constants.RESERVED_TLD
@property
def just_created(self):
"""Return true if the domain was created in the latest 24h."""
now = timezone.now()
delta = datetime.timedelta(days=1)
return self.creation + delta > now
def awaiting_checks(self):
"""Return true if the domain has no valid MX record and was created
in the latest 24h."""
if (not self.mxrecord_set.has_valids()) and self.just_created:
return True
return False
@cached_property
def dnsbl_status_color(self):
"""Shortcut to DNSBL results."""
if not self.dnsblresult_set.exists():
return "warning"
elif self.dnsblresult_set.blacklisted().exists():
return "danger"
else:
return "success"
def add_admin(self, account):
"""Add a new administrator to this domain.
:param User account: the administrator
"""
from modoboa.lib.permissions import grant_access_to_object
core_signals.can_create_object.send(
sender=self.__class__, context=self, object_type="domain_admins")
grant_access_to_object(account, self)
for mb in self.mailbox_set.all():
if mb.user.has_perm("admin.add_domain"):
continue
grant_access_to_object(account, mb)
grant_access_to_object(account, mb.user)
for al in self.alias_set.all():
grant_access_to_object(account, al)
def remove_admin(self, account):
"""Remove an administrator of this domain.
:param User account: administrator to remove
"""
from modoboa.lib.permissions import ungrant_access_to_object
ungrant_access_to_object(self, account)
for mb in self.mailbox_set.all():
if mb.user.has_perm("admin.add_domain"):
continue
ungrant_access_to_object(mb, account)
ungrant_access_to_object(mb.user, account)
for al in self.alias_set.all():
ungrant_access_to_object(al, account)
def save(self, *args, **kwargs):
"""Store current data if domain is renamed."""
if self.oldname != self.name:
self.old_mail_homes = (
dict((mb.id, mb.mail_home) for mb in self.mailbox_set.all())
)
super(Domain, self).save(*args, **kwargs)
def delete(self, fromuser, keepdir=False):
"""Custom delete method."""
from modoboa.lib.permissions import ungrant_access_to_objects
from .mailbox import Quota
if self.domainalias_set.count():
ungrant_access_to_objects(self.domainalias_set.all())
if self.alias_set.count():
ungrant_access_to_objects(self.alias_set.all())
if param_tools.get_global_parameter("auto_account_removal"):
for account in User.objects.filter(mailbox__domain=self):
account.delete(fromuser, keepdir)
elif self.mailbox_set.count():
Quota.objects.filter(username__contains="@%s" % self.name).delete()
ungrant_access_to_objects(self.mailbox_set.all())
super(Domain, self).delete()
def __str__(self):
return smart_text(self.name)
def from_csv(self, user, row):
"""Create a new domain from a CSV entry.
The expected fields order is the following::
"domain", name, quota, enabled
:param ``core.User`` user: user creating the domain
:param str row: a list containing domain's definition
"""
if len(row) < 4:
raise BadRequest(_("Invalid line"))
self.name = row[1].strip()
if Domain.objects.filter(name=self.name).exists():
raise Conflict
try:
self.quota = int(row[2].strip())
except ValueError:
raise BadRequest(
_("Invalid quota value for domain '%s'") % self.name)
self.enabled = (row[3].strip() in ["True", "1", "yes", "y"])
self.save(creator=user)
def to_csv(self, csvwriter):
csvwriter.writerow(["domain", self.name, self.quota, self.enabled])
for dalias in self.domainalias_set.all():
dalias.to_csv(csvwriter)
def post_create(self, creator):
"""Post creation actions.
:param ``User`` creator: user whos created this domain
"""
super(Domain, self).post_create(creator)
for domalias in self.domainalias_set.all():
domalias.post_create(creator)
reversion.register(Domain)
class MXQuerySet(models.QuerySet):
"""Custom manager for MXRecord."""
def has_valids(self):
"""Return managed results."""
if param_tools.get_global_parameter("valid_mxs").strip():
return self.filter(managed=True).exists()
return self.exists()
class MXRecord(models.Model):
"""A model used to store MX records for Domain."""
domain = models.ForeignKey(Domain)
name = models.CharField(max_length=254)
address = models.GenericIPAddressField()
managed = models.BooleanField(default=False)
updated = models.DateTimeField()
objects = models.Manager.from_queryset(MXQuerySet)()
def is_managed(self):
if not param_tools.get_global_parameter("enable_mx_checks"):
return False
return bool(param_tools.get_global_parameter("valid_mxs").strip())
def __unicode__(self):
return u"{0.name} ({0.address}) for {0.domain} ".format(self)
class DNSBLQuerySet(models.QuerySet):
"""Custom manager for DNSBLResultManager."""
def blacklisted(self):
"""Return blacklisted results."""
return self.exclude(status="")
class DNSBLResult(models.Model):
"""Store a DNSBL query result."""
domain = models.ForeignKey(Domain)
provider = models.CharField(max_length=254, db_index=True)
mx = models.ForeignKey(MXRecord)
status = models.CharField(max_length=45, blank=True, db_index=True)
objects = models.Manager.from_queryset(DNSBLQuerySet)()
class Meta:
app_label = "admin"
unique_together = [("domain", "provider", "mx")]
| isc | -5,286,976,501,532,225,000 | 32.003356 | 79 | 0.621556 | false |
nubakery/smith3 | python/relcaspt2/queue_split.py | 1 | 2189 | #!/opt/local/bin/python
import string
import os
import re
def header(n) :
return "//\n\
// BAGEL - Brilliantly Advanced General Electronic Structure Library\n\
// Filename: RelCASPT2" + n + ".cc\n\
// Copyright (C) 2014 Toru Shiozaki\n\
//\n\
// Author: Toru Shiozaki <[email protected]>\n\
// Maintainer: Shiozaki group\n\
//\n\
// This file is part of the BAGEL package.\n\
//\n\
// This program is free software: you can redistribute it and/or modify\n\
// it under the terms of the GNU General Public License as published by\n\
// the Free Software Foundation, either version 3 of the License, or\n\
// (at your option) any later version.\n\
//\n\
// This program is distributed in the hope that it will be useful,\n\
// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
// GNU General Public License for more details.\n\
//\n\
// You should have received a copy of the GNU General Public License\n\
// along with this program. If not, see <http://www.gnu.org/licenses/>.\n\
//\n\
\n\
#include <bagel_config.h>\n\
#ifdef COMPILE_SMITH\n\
\n\
\n\
#include <src/smith/relcaspt2/RelCASPT2.h>\n"
def insert():
return "#include <src/smith/relcaspt2/RelCASPT2_tasks.h>\n"
def header2():
return "\n\
using namespace std;\n\
using namespace bagel;\n\
using namespace bagel::SMITH;\n\
\n\
"
footer = "#endif\n"
f = open('RelCASPT2.cc', 'r')
lines = f.read().split("\n")[34:]
tasks = []
tmp = ""
for line in lines:
if (len(line) >= 17 and (line[0:17] == "shared_ptr<Queue>" or line[0:17] == "RelCASPT2::RelCAS")):
if (tmp != ""):
tasks.append(tmp)
tmp = ""
tmp += line + "\n"
if (line == "}"):
tmp += "\n"
tasks.append(tmp)
p = re.compile('make_[a-z0-9]+q')
for task in tasks[0:-1]:
tag = p.search(task).group()[5:]
fout = open("RelCASPT2_" + tag + ".cc", "w")
out = header("_" + tag + "q") + insert() + header2() + task + footer
fout.write(out)
fout.close()
os.remove("RelCASPT2.cc")
fout = open("RelCASPT2.cc", "w")
out = header("") + header2() + tasks[len(tasks)-1] + footer
fout.write(out)
fout.close()
| gpl-2.0 | 3,854,441,453,575,640,600 | 26.3625 | 102 | 0.636364 | false |
hb9kns/PyBitmessage | src/bitmessagecurses/__init__.py | 1 | 54314 | # Copyright (c) 2014 Luke Montalvo <[email protected]>
# This file adds a alternative commandline interface, feel free to critique and fork
#
# This has only been tested on Arch Linux and Linux Mint
# Dependencies:
# * from python2-pip
# * python2-pythondialog
# * dialog
import os
import sys
import StringIO
from textwrap import *
import time
from time import strftime, localtime
from threading import Timer
import curses
import dialog
from dialog import Dialog
from helper_sql import *
from helper_ackPayload import genAckPayload
from addresses import *
import ConfigParser
from bmconfigparser import BMConfigParser
from inventory import Inventory
import l10n
from pyelliptic.openssl import OpenSSL
import queues
import shared
import shutdown
quit = False
menutab = 1
menu = ["Inbox", "Send", "Sent", "Your Identities", "Subscriptions", "Address Book", "Blacklist", "Network Status"]
naptime = 100
log = ""
logpad = None
inventorydata = 0
startuptime = time.time()
inbox = []
inboxcur = 0
sentbox = []
sentcur = 0
addresses = []
addrcur = 0
addrcopy = 0
subscriptions = []
subcur = 0
addrbook = []
abookcur = 0
blacklist = []
blackcur = 0
bwtype = "black"
BROADCAST_STR = "[Broadcast subscribers]"
class printLog:
def write(self, output):
global log
log += output
def flush(self):
pass
class errLog:
def write(self, output):
global log
log += "!"+output
def flush(self):
pass
printlog = printLog()
errlog = errLog()
def cpair(a):
r = curses.color_pair(a)
if r not in range(1, curses.COLOR_PAIRS-1):
r = curses.color_pair(0)
return r
def ascii(s):
r = ""
for c in s:
if ord(c) in range(128):
r += c
return r
def drawmenu(stdscr):
menustr = " "
for i in range(0, len(menu)):
if menutab == i+1:
menustr = menustr[:-1]
menustr += "["
menustr += str(i+1)+menu[i]
if menutab == i+1:
menustr += "] "
elif i != len(menu)-1:
menustr += " "
stdscr.addstr(2, 5, menustr, curses.A_UNDERLINE)
def set_background_title(d, title):
try:
d.set_background_title(title)
except:
d.add_persistent_args(("--backtitle", title))
def scrollbox(d, text, height=None, width=None):
try:
d.scrollbox(text, height, width, exit_label = "Continue")
except:
d.msgbox(text, height or 0, width or 0, ok_label = "Continue")
def resetlookups():
global inventorydata
inventorydata = Inventory().numberOfInventoryLookupsPerformed
Inventory().numberOfInventoryLookupsPerformed = 0
Timer(1, resetlookups, ()).start()
def drawtab(stdscr):
if menutab in range(1, len(menu)+1):
if menutab == 1: # Inbox
stdscr.addstr(3, 5, "To", curses.A_BOLD)
stdscr.addstr(3, 40, "From", curses.A_BOLD)
stdscr.addstr(3, 80, "Subject", curses.A_BOLD)
stdscr.addstr(3, 120, "Time Received", curses.A_BOLD)
stdscr.hline(4, 5, '-', 121)
for i, item in enumerate(inbox[max(min(len(inbox)-curses.LINES+6, inboxcur-5), 0):]):
if 6+i < curses.LINES:
a = 0
if i == inboxcur - max(min(len(inbox)-curses.LINES+6, inboxcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
if item[7] == False: # If not read, highlight
a = a | curses.A_BOLD
stdscr.addstr(5+i, 5, item[1][:34], a)
stdscr.addstr(5+i, 40, item[3][:39], a)
stdscr.addstr(5+i, 80, item[5][:39], a)
stdscr.addstr(5+i, 120, item[6][:39], a)
elif menutab == 3: # Sent
stdscr.addstr(3, 5, "To", curses.A_BOLD)
stdscr.addstr(3, 40, "From", curses.A_BOLD)
stdscr.addstr(3, 80, "Subject", curses.A_BOLD)
stdscr.addstr(3, 120, "Status", curses.A_BOLD)
stdscr.hline(4, 5, '-', 121)
for i, item in enumerate(sentbox[max(min(len(sentbox)-curses.LINES+6, sentcur-5), 0):]):
if 6+i < curses.LINES:
a = 0
if i == sentcur - max(min(len(sentbox)-curses.LINES+6, sentcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
stdscr.addstr(5+i, 5, item[0][:34], a)
stdscr.addstr(5+i, 40, item[2][:39], a)
stdscr.addstr(5+i, 80, item[4][:39], a)
stdscr.addstr(5+i, 120, item[5][:39], a)
elif menutab == 2 or menutab == 4: # Send or Identities
stdscr.addstr(3, 5, "Label", curses.A_BOLD)
stdscr.addstr(3, 40, "Address", curses.A_BOLD)
stdscr.addstr(3, 80, "Stream", curses.A_BOLD)
stdscr.hline(4, 5, '-', 81)
for i, item in enumerate(addresses[max(min(len(addresses)-curses.LINES+6, addrcur-5), 0):]):
if 6+i < curses.LINES:
a = 0
if i == addrcur - max(min(len(addresses)-curses.LINES+6, addrcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
if item[1] == True and item[3] not in [8,9]: # Embolden enabled, non-special addresses
a = a | curses.A_BOLD
stdscr.addstr(5+i, 5, item[0][:34], a)
stdscr.addstr(5+i, 40, item[2][:39], cpair(item[3]) | a)
stdscr.addstr(5+i, 80, str(1)[:39], a)
elif menutab == 5: # Subscriptions
stdscr.addstr(3, 5, "Label", curses.A_BOLD)
stdscr.addstr(3, 80, "Address", curses.A_BOLD)
stdscr.addstr(3, 120, "Enabled", curses.A_BOLD)
stdscr.hline(4, 5, '-', 121)
for i, item in enumerate(subscriptions[max(min(len(subscriptions)-curses.LINES+6, subcur-5), 0):]):
if 6+i < curses.LINES:
a = 0
if i == subcur - max(min(len(subscriptions)-curses.LINES+6, subcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
if item[2] == True: # Embolden enabled subscriptions
a = a | curses.A_BOLD
stdscr.addstr(5+i, 5, item[0][:74], a)
stdscr.addstr(5+i, 80, item[1][:39], a)
stdscr.addstr(5+i, 120, str(item[2]), a)
elif menutab == 6: # Address book
stdscr.addstr(3, 5, "Label", curses.A_BOLD)
stdscr.addstr(3, 40, "Address", curses.A_BOLD)
stdscr.hline(4, 5, '-', 41)
for i, item in enumerate(addrbook[max(min(len(addrbook)-curses.LINES+6, abookcur-5), 0):]):
if 6+i < curses.LINES:
a = 0
if i == abookcur - max(min(len(addrbook)-curses.LINES+6, abookcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
stdscr.addstr(5+i, 5, item[0][:34], a)
stdscr.addstr(5+i, 40, item[1][:39], a)
elif menutab == 7: # Blacklist
stdscr.addstr(3, 5, "Type: "+bwtype)
stdscr.addstr(4, 5, "Label", curses.A_BOLD)
stdscr.addstr(4, 80, "Address", curses.A_BOLD)
stdscr.addstr(4, 120, "Enabled", curses.A_BOLD)
stdscr.hline(5, 5, '-', 121)
for i, item in enumerate(blacklist[max(min(len(blacklist)-curses.LINES+6, blackcur-5), 0):]):
if 7+i < curses.LINES:
a = 0
if i == blackcur - max(min(len(blacklist)-curses.LINES+6, blackcur-5), 0): # Highlight current address
a = a | curses.A_REVERSE
if item[2] == True: # Embolden enabled subscriptions
a = a | curses.A_BOLD
stdscr.addstr(6+i, 5, item[0][:74], a)
stdscr.addstr(6+i, 80, item[1][:39], a)
stdscr.addstr(6+i, 120, str(item[2]), a)
elif menutab == 8: # Network status
# Connection data
stdscr.addstr(4, 5, "Total Connections: "+str(len(shared.connectedHostsList)).ljust(2))
stdscr.addstr(6, 6, "Stream #", curses.A_BOLD)
stdscr.addstr(6, 18, "Connections", curses.A_BOLD)
stdscr.hline(7, 6, '-', 23)
streamcount = []
for host, stream in shared.connectedHostsList.items():
if stream >= len(streamcount):
streamcount.append(1)
else:
streamcount[stream] += 1
for i, item in enumerate(streamcount):
if i < 4:
if i == 0:
stdscr.addstr(8+i, 6, "?")
else:
stdscr.addstr(8+i, 6, str(i))
stdscr.addstr(8+i, 18, str(item).ljust(2))
# Uptime and processing data
stdscr.addstr(6, 35, "Since startup on "+l10n.formatTimestamp(startuptime, False))
stdscr.addstr(7, 40, "Processed "+str(shared.numberOfMessagesProcessed).ljust(4)+" person-to-person messages.")
stdscr.addstr(8, 40, "Processed "+str(shared.numberOfBroadcastsProcessed).ljust(4)+" broadcast messages.")
stdscr.addstr(9, 40, "Processed "+str(shared.numberOfPubkeysProcessed).ljust(4)+" public keys.")
# Inventory data
stdscr.addstr(11, 35, "Inventory lookups per second: "+str(inventorydata).ljust(3))
# Log
stdscr.addstr(13, 6, "Log", curses.A_BOLD)
n = log.count('\n')
if n > 0:
l = log.split('\n')
if n > 512:
del l[:(n-256)]
logpad.erase()
n = len(l)
for i, item in enumerate(l):
a = 0
if len(item) > 0 and item[0] == '!':
a = curses.color_pair(1)
item = item[1:]
logpad.addstr(i, 0, item, a)
logpad.refresh(n-curses.LINES+2, 0, 14, 6, curses.LINES-2, curses.COLS-7)
stdscr.refresh()
def redraw(stdscr):
stdscr.erase()
stdscr.border()
drawmenu(stdscr)
stdscr.refresh()
def dialogreset(stdscr):
stdscr.clear()
stdscr.keypad(1)
curses.curs_set(0)
def handlech(c, stdscr):
if c != curses.ERR:
global inboxcur, addrcur, sentcur, subcur, abookcur, blackcur
if c in range(256):
if chr(c) in '12345678':
global menutab
menutab = int(chr(c))
elif chr(c) == 'q':
global quit
quit = True
elif chr(c) == '\n':
curses.curs_set(1)
d = Dialog(dialog="dialog")
if menutab == 1:
set_background_title(d, "Inbox Message Dialog Box")
r, t = d.menu("Do what with \""+inbox[inboxcur][5]+"\" from \""+inbox[inboxcur][3]+"\"?",
choices=[("1", "View message"),
("2", "Mark message as unread"),
("3", "Reply"),
("4", "Add sender to Address Book"),
("5", "Save message as text file"),
("6", "Move to trash")])
if r == d.DIALOG_OK:
if t == "1": # View
set_background_title(d, "\""+inbox[inboxcur][5]+"\" from \""+inbox[inboxcur][3]+"\" to \""+inbox[inboxcur][1]+"\"")
data = ""
ret = sqlQuery("SELECT message FROM inbox WHERE msgid=?", inbox[inboxcur][0])
if ret != []:
for row in ret:
data, = row
data = shared.fixPotentiallyInvalidUTF8Data(data)
msg = ""
for i, item in enumerate(data.split("\n")):
msg += fill(item, replace_whitespace=False)+"\n"
scrollbox(d, unicode(ascii(msg)), 30, 80)
sqlExecute("UPDATE inbox SET read=1 WHERE msgid=?", inbox[inboxcur][0])
inbox[inboxcur][7] = 1
else:
scrollbox(d, unicode("Could not fetch message."))
elif t == "2": # Mark unread
sqlExecute("UPDATE inbox SET read=0 WHERE msgid=?", inbox[inboxcur][0])
inbox[inboxcur][7] = 0
elif t == "3": # Reply
curses.curs_set(1)
m = inbox[inboxcur]
fromaddr = m[4]
ischan = False
for i, item in enumerate(addresses):
if fromaddr == item[2] and item[3] != 0:
ischan = True
break
if not addresses[i][1]:
scrollbox(d, unicode("Sending address disabled, please either enable it or choose a different address."))
return
toaddr = m[2]
if ischan:
toaddr = fromaddr
subject = m[5]
if not m[5][:4] == "Re: ":
subject = "Re: "+m[5]
body = ""
ret = sqlQuery("SELECT message FROM inbox WHERE msgid=?", m[0])
if ret != []:
body = "\n\n------------------------------------------------------\n"
for row in ret:
body, = row
sendMessage(fromaddr, toaddr, ischan, subject, body, True)
dialogreset(stdscr)
elif t == "4": # Add to Address Book
addr = inbox[inboxcur][4]
if addr not in [item[1] for i,item in enumerate(addrbook)]:
r, t = d.inputbox("Label for address \""+addr+"\"")
if r == d.DIALOG_OK:
label = t
sqlExecute("INSERT INTO addressbook VALUES (?,?)", label, addr)
# Prepend entry
addrbook.reverse()
addrbook.append([label, addr])
addrbook.reverse()
else:
scrollbox(d, unicode("The selected address is already in the Address Book."))
elif t == "5": # Save message
set_background_title(d, "Save \""+inbox[inboxcur][5]+"\" as text file")
r, t = d.inputbox("Filename", init=inbox[inboxcur][5]+".txt")
if r == d.DIALOG_OK:
msg = ""
ret = sqlQuery("SELECT message FROM inbox WHERE msgid=?", inbox[inboxcur][0])
if ret != []:
for row in ret:
msg, = row
fh = open(t, "a") # Open in append mode just in case
fh.write(msg)
fh.close()
else:
scrollbox(d, unicode("Could not fetch message."))
elif t == "6": # Move to trash
sqlExecute("UPDATE inbox SET folder='trash' WHERE msgid=?", inbox[inboxcur][0])
del inbox[inboxcur]
scrollbox(d, unicode("Message moved to trash. There is no interface to view your trash, \nbut the message is still on disk if you are desperate to recover it."))
elif menutab == 2:
a = ""
if addresses[addrcur][3] != 0: # if current address is a chan
a = addresses[addrcur][2]
sendMessage(addresses[addrcur][2], a)
elif menutab == 3:
set_background_title(d, "Sent Messages Dialog Box")
r, t = d.menu("Do what with \""+sentbox[sentcur][4]+"\" to \""+sentbox[sentcur][0]+"\"?",
choices=[("1", "View message"),
("2", "Move to trash")])
if r == d.DIALOG_OK:
if t == "1": # View
set_background_title(d, "\""+sentbox[sentcur][4]+"\" from \""+sentbox[sentcur][3]+"\" to \""+sentbox[sentcur][1]+"\"")
data = ""
ret = sqlQuery("SELECT message FROM sent WHERE subject=? AND ackdata=?", sentbox[sentcur][4], sentbox[sentcur][6])
if ret != []:
for row in ret:
data, = row
data = shared.fixPotentiallyInvalidUTF8Data(data)
msg = ""
for i, item in enumerate(data.split("\n")):
msg += fill(item, replace_whitespace=False)+"\n"
scrollbox(d, unicode(ascii(msg)), 30, 80)
else:
scrollbox(d, unicode("Could not fetch message."))
elif t == "2": # Move to trash
sqlExecute("UPDATE sent SET folder='trash' WHERE subject=? AND ackdata=?", sentbox[sentcur][4], sentbox[sentcur][6])
del sentbox[sentcur]
scrollbox(d, unicode("Message moved to trash. There is no interface to view your trash, \nbut the message is still on disk if you are desperate to recover it."))
elif menutab == 4:
set_background_title(d, "Your Identities Dialog Box")
if len(addresses) <= addrcur:
r, t = d.menu("Do what with addresses?",
choices=[("1", "Create new address")])
else:
r, t = d.menu("Do what with \""+addresses[addrcur][0]+"\" : \""+addresses[addrcur][2]+"\"?",
choices=[("1", "Create new address"),
("2", "Send a message from this address"),
("3", "Rename"),
("4", "Enable"),
("5", "Disable"),
("6", "Delete"),
("7", "Special address behavior")])
if r == d.DIALOG_OK:
if t == "1": # Create new address
set_background_title(d, "Create new address")
scrollbox(d, unicode("Here you may generate as many addresses as you like.\n"
"Indeed, creating and abandoning addresses is encouraged.\n"
"Deterministic addresses have several pros and cons:\n"
"\nPros:\n"
" * You can recreate your addresses on any computer from memory\n"
" * You need not worry about backing up your keys.dat file as long as you \n can remember your passphrase\n"
"Cons:\n"
" * You must remember (or write down) your passphrase in order to recreate \n your keys if they are lost\n"
" * You must also remember the address version and stream numbers\n"
" * If you choose a weak passphrase someone may be able to brute-force it \n and then send and receive messages as you"))
r, t = d.menu("Choose an address generation technique",
choices=[("1", "Use a random number generator"),
("2", "Use a passphrase")])
if r == d.DIALOG_OK:
if t == "1":
set_background_title(d, "Randomly generate address")
r, t = d.inputbox("Label (not shown to anyone except you)")
label = ""
if r == d.DIALOG_OK and len(t) > 0:
label = t
r, t = d.menu("Choose a stream",
choices=[("1", "Use the most available stream"),("", "(Best if this is the first of many addresses you will create)"),
("2", "Use the same stream as an existing address"),("", "(Saves you some bandwidth and processing power)")])
if r == d.DIALOG_OK:
if t == "1":
stream = 1
elif t == "2":
addrs = []
for i, item in enumerate(addresses):
addrs.append([str(i), item[2]])
r, t = d.menu("Choose an existing address's stream", choices=addrs)
if r == d.DIALOG_OK:
stream = decodeAddress(addrs[int(t)][1])[2]
shorten = False
r, t = d.checklist("Miscellaneous options",
choices=[("1", "Spend time shortening the address", 1 if shorten else 0)])
if r == d.DIALOG_OK and "1" in t:
shorten = True
queues.addressGeneratorQueue.put(("createRandomAddress", 4, stream, label, 1, "", shorten))
elif t == "2":
set_background_title(d, "Make deterministic addresses")
r, t = d.passwordform("Enter passphrase",
[("Passphrase", 1, 1, "", 2, 1, 64, 128),
("Confirm passphrase", 3, 1, "", 4, 1, 64, 128)],
form_height=4, insecure=True)
if r == d.DIALOG_OK:
if t[0] == t[1]:
passphrase = t[0]
r, t = d.rangebox("Number of addresses to generate",
width=48, min=1, max=99, init=8)
if r == d.DIALOG_OK:
number = t
stream = 1
shorten = False
r, t = d.checklist("Miscellaneous options",
choices=[("1", "Spend time shortening the address", 1 if shorten else 0)])
if r == d.DIALOG_OK and "1" in t:
shorten = True
scrollbox(d, unicode("In addition to your passphrase, be sure to remember the following numbers:\n"
"\n * Address version number: "+str(4)+"\n"
" * Stream number: "+str(stream)))
queues.addressGeneratorQueue.put(('createDeterministicAddresses', 4, stream, "unused deterministic address", number, str(passphrase), shorten))
else:
scrollbox(d, unicode("Passphrases do not match"))
elif t == "2": # Send a message
a = ""
if addresses[addrcur][3] != 0: # if current address is a chan
a = addresses[addrcur][2]
sendMessage(addresses[addrcur][2], a)
elif t == "3": # Rename address label
a = addresses[addrcur][2]
label = addresses[addrcur][0]
r, t = d.inputbox("New address label", init=label)
if r == d.DIALOG_OK:
label = t
BMConfigParser().set(a, "label", label)
# Write config
BMConfigParser().save()
addresses[addrcur][0] = label
elif t == "4": # Enable address
a = addresses[addrcur][2]
BMConfigParser().set(a, "enabled", "true") # Set config
# Write config
BMConfigParser().save()
# Change color
if BMConfigParser().safeGetBoolean(a, 'chan'):
addresses[addrcur][3] = 9 # orange
elif BMConfigParser().safeGetBoolean(a, 'mailinglist'):
addresses[addrcur][3] = 5 # magenta
else:
addresses[addrcur][3] = 0 # black
addresses[addrcur][1] = True
shared.reloadMyAddressHashes() # Reload address hashes
elif t == "5": # Disable address
a = addresses[addrcur][2]
BMConfigParser().set(a, "enabled", "false") # Set config
addresses[addrcur][3] = 8 # Set color to gray
# Write config
BMConfigParser().save()
addresses[addrcur][1] = False
shared.reloadMyAddressHashes() # Reload address hashes
elif t == "6": # Delete address
r, t = d.inputbox("Type in \"I want to delete this address\"", width=50)
if r == d.DIALOG_OK and t == "I want to delete this address":
BMConfigParser().remove_section(addresses[addrcur][2])
BMConfigParser().save()
del addresses[addrcur]
elif t == "7": # Special address behavior
a = addresses[addrcur][2]
set_background_title(d, "Special address behavior")
if BMConfigParser().safeGetBoolean(a, "chan"):
scrollbox(d, unicode("This is a chan address. You cannot use it as a pseudo-mailing list."))
else:
m = BMConfigParser().safeGetBoolean(a, "mailinglist")
r, t = d.radiolist("Select address behavior",
choices=[("1", "Behave as a normal address", not m),
("2", "Behave as a pseudo-mailing-list address", m)])
if r == d.DIALOG_OK:
if t == "1" and m == True:
BMConfigParser().set(a, "mailinglist", "false")
if addresses[addrcur][1]:
addresses[addrcur][3] = 0 # Set color to black
else:
addresses[addrcur][3] = 8 # Set color to gray
elif t == "2" and m == False:
try:
mn = BMConfigParser().get(a, "mailinglistname")
except ConfigParser.NoOptionError:
mn = ""
r, t = d.inputbox("Mailing list name", init=mn)
if r == d.DIALOG_OK:
mn = t
BMConfigParser().set(a, "mailinglist", "true")
BMConfigParser().set(a, "mailinglistname", mn)
addresses[addrcur][3] = 6 # Set color to magenta
# Write config
BMConfigParser().save()
elif menutab == 5:
set_background_title(d, "Subscriptions Dialog Box")
if len(subscriptions) <= subcur:
r, t = d.menu("Do what with subscription to \""+subscriptions[subcur][0]+"\"?",
choices=[("1", "Add new subscription")])
else:
r, t = d.menu("Do what with subscription to \""+subscriptions[subcur][0]+"\"?",
choices=[("1", "Add new subscription"),
("2", "Delete this subscription"),
("3", "Enable"),
("4", "Disable")])
if r == d.DIALOG_OK:
if t == "1":
r, t = d.inputbox("New subscription address")
if r == d.DIALOG_OK:
addr = addBMIfNotPresent(t)
if not shared.isAddressInMySubscriptionsList(addr):
r, t = d.inputbox("New subscription label")
if r == d.DIALOG_OK:
label = t
# Prepend entry
subscriptions.reverse()
subscriptions.append([label, addr, True])
subscriptions.reverse()
sqlExecute("INSERT INTO subscriptions VALUES (?,?,?)", label, addr, True)
shared.reloadBroadcastSendersForWhichImWatching()
elif t == "2":
r, t = d.inpuxbox("Type in \"I want to delete this subscription\"")
if r == d.DIALOG_OK and t == "I want to delete this subscription":
sqlExecute("DELETE FROM subscriptions WHERE label=? AND address=?", subscriptions[subcur][0], subscriptions[subcur][1])
shared.reloadBroadcastSendersForWhichImWatching()
del subscriptions[subcur]
elif t == "3":
sqlExecute("UPDATE subscriptions SET enabled=1 WHERE label=? AND address=?", subscriptions[subcur][0], subscriptions[subcur][1])
shared.reloadBroadcastSendersForWhichImWatching()
subscriptions[subcur][2] = True
elif t == "4":
sqlExecute("UPDATE subscriptions SET enabled=0 WHERE label=? AND address=?", subscriptions[subcur][0], subscriptions[subcur][1])
shared.reloadBroadcastSendersForWhichImWatching()
subscriptions[subcur][2] = False
elif menutab == 6:
set_background_title(d, "Address Book Dialog Box")
if len(addrbook) <= abookcur:
r, t = d.menu("Do what with addressbook?",
choices=[("3", "Add new address to Address Book")])
else:
r, t = d.menu("Do what with \""+addrbook[abookcur][0]+"\" : \""+addrbook[abookcur][1]+"\"",
choices=[("1", "Send a message to this address"),
("2", "Subscribe to this address"),
("3", "Add new address to Address Book"),
("4", "Delete this address")])
if r == d.DIALOG_OK:
if t == "1":
sendMessage(recv=addrbook[abookcur][1])
elif t == "2":
r, t = d.inputbox("New subscription label")
if r == d.DIALOG_OK:
label = t
# Prepend entry
subscriptions.reverse()
subscriptions.append([label, addr, True])
subscriptions.reverse()
sqlExecute("INSERT INTO subscriptions VALUES (?,?,?)", label, addr, True)
shared.reloadBroadcastSendersForWhichImWatching()
elif t == "3":
r, t = d.inputbox("Input new address")
if r == d.DIALOG_OK:
addr = t
if addr not in [item[1] for i,item in enumerate(addrbook)]:
r, t = d.inputbox("Label for address \""+addr+"\"")
if r == d.DIALOG_OK:
sqlExecute("INSERT INTO addressbook VALUES (?,?)", t, addr)
# Prepend entry
addrbook.reverse()
addrbook.append([t, addr])
addrbook.reverse()
else:
scrollbox(d, unicode("The selected address is already in the Address Book."))
elif t == "4":
r, t = d.inputbox("Type in \"I want to delete this Address Book entry\"")
if r == d.DIALOG_OK and t == "I want to delete this Address Book entry":
sqlExecute("DELETE FROM addressbook WHERE label=? AND address=?", addrbook[abookcur][0], addrbook[abookcur][1])
del addrbook[abookcur]
elif menutab == 7:
set_background_title(d, "Blacklist Dialog Box")
r, t = d.menu("Do what with \""+blacklist[blackcur][0]+"\" : \""+blacklist[blackcur][1]+"\"?",
choices=[("1", "Delete"),
("2", "Enable"),
("3", "Disable")])
if r == d.DIALOG_OK:
if t == "1":
r, t = d.inputbox("Type in \"I want to delete this Blacklist entry\"")
if r == d.DIALOG_OK and t == "I want to delete this Blacklist entry":
sqlExecute("DELETE FROM blacklist WHERE label=? AND address=?", blacklist[blackcur][0], blacklist[blackcur][1])
del blacklist[blackcur]
elif t == "2":
sqlExecute("UPDATE blacklist SET enabled=1 WHERE label=? AND address=?", blacklist[blackcur][0], blacklist[blackcur][1])
blacklist[blackcur][2] = True
elif t== "3":
sqlExecute("UPDATE blacklist SET enabled=0 WHERE label=? AND address=?", blacklist[blackcur][0], blacklist[blackcur][1])
blacklist[blackcur][2] = False
dialogreset(stdscr)
else:
if c == curses.KEY_UP:
if menutab == 1 and inboxcur > 0:
inboxcur -= 1
if (menutab == 2 or menutab == 4) and addrcur > 0:
addrcur -= 1
if menutab == 3 and sentcur > 0:
sentcur -= 1
if menutab == 5 and subcur > 0:
subcur -= 1
if menutab == 6 and abookcur > 0:
abookcur -= 1
if menutab == 7 and blackcur > 0:
blackcur -= 1
elif c == curses.KEY_DOWN:
if menutab == 1 and inboxcur < len(inbox)-1:
inboxcur += 1
if (menutab == 2 or menutab == 4) and addrcur < len(addresses)-1:
addrcur += 1
if menutab == 3 and sentcur < len(sentbox)-1:
sentcur += 1
if menutab == 5 and subcur < len(subscriptions)-1:
subcur += 1
if menutab == 6 and abookcur < len(addrbook)-1:
abookcur += 1
if menutab == 7 and blackcur < len(blacklist)-1:
blackcur += 1
elif c == curses.KEY_HOME:
if menutab == 1:
inboxcur = 0
if menutab == 2 or menutab == 4:
addrcur = 0
if menutab == 3:
sentcur = 0
if menutab == 5:
subcur = 0
if menutab == 6:
abookcur = 0
if menutab == 7:
blackcur = 0
elif c == curses.KEY_END:
if menutab == 1:
inboxcur = len(inbox)-1
if menutab == 2 or menutab == 4:
addrcur = len(addresses)-1
if menutab == 3:
sentcur = len(sentbox)-1
if menutab == 5:
subcur = len(subscriptions)-1
if menutab == 6:
abookcur = len(addrbook)-1
if menutab == 7:
blackcur = len(blackcur)-1
redraw(stdscr)
def sendMessage(sender="", recv="", broadcast=None, subject="", body="", reply=False):
if sender == "":
return
d = Dialog(dialog="dialog")
set_background_title(d, "Send a message")
if recv == "":
r, t = d.inputbox("Recipient address (Cancel to load from the Address Book or leave blank to broadcast)", 10, 60)
if r != d.DIALOG_OK:
global menutab
menutab = 6
return
recv = t
if broadcast == None and sender != recv:
r, t = d.radiolist("How to send the message?",
choices=[("1", "Send to one or more specific people", 1),
("2", "Broadcast to everyone who is subscribed to your address", 0)])
if r != d.DIALOG_OK:
return
broadcast = False
if t == "2": # Broadcast
broadcast = True
if subject == "" or reply:
r, t = d.inputbox("Message subject", width=60, init=subject)
if r != d.DIALOG_OK:
return
subject = t
if body == "" or reply:
r, t = d.inputbox("Message body", 10, 80, init=body)
if r != d.DIALOG_OK:
return
body = t
body = body.replace("\\n", "\n").replace("\\t", "\t")
if not broadcast:
recvlist = []
for i, item in enumerate(recv.replace(",", ";").split(";")):
recvlist.append(item.strip())
list(set(recvlist)) # Remove exact duplicates
for addr in recvlist:
if addr != "":
status, version, stream, ripe = decodeAddress(addr)
if status != "success":
set_background_title(d, "Recipient address error")
err = "Could not decode" + addr + " : " + status + "\n\n"
if status == "missingbm":
err += "Bitmessage addresses should start with \"BM-\"."
elif status == "checksumfailed":
err += "The address was not typed or copied correctly."
elif status == "invalidcharacters":
err += "The address contains invalid characters."
elif status == "versiontoohigh":
err += "The address version is too high. Either you need to upgrade your Bitmessage software or your acquaintance is doing something clever."
elif status == "ripetooshort":
err += "Some data encoded in the address is too short. There might be something wrong with the software of your acquaintance."
elif status == "ripetoolong":
err += "Some data encoded in the address is too long. There might be something wrong with the software of your acquaintance."
elif status == "varintmalformed":
err += "Some data encoded in the address is malformed. There might be something wrong with the software of your acquaintance."
else:
err += "It is unknown what is wrong with the address."
scrollbox(d, unicode(err))
else:
addr = addBMIfNotPresent(addr)
if version > 4 or version <= 1:
set_background_title(d, "Recipient address error")
scrollbox(d, unicode("Could not understand version number " + version + "of address" + addr + "."))
continue
if stream > 1 or stream == 0:
set_background_title(d, "Recipient address error")
scrollbox(d, unicode("Bitmessage currently only supports stream numbers of 1, unlike as requested for address " + addr + "."))
continue
if len(shared.connectedHostsList) == 0:
set_background_title(d, "Not connected warning")
scrollbox(d, unicode("Because you are not currently connected to the network, "))
stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel')
ackdata = genAckPayload(streamNumber, stealthLevel)
sqlExecute(
"INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
"",
addr,
ripe,
sender,
subject,
body,
ackdata,
int(time.time()), # sentTime (this will never change)
int(time.time()), # lastActionTime
0, # sleepTill time. This will get set when the POW gets done.
"msgqueued",
0, # retryNumber
"sent",
2, # encodingType
BMConfigParser().getint('bitmessagesettings', 'ttl'))
queues.workerQueue.put(("sendmessage", addr))
else: # Broadcast
if recv == "":
set_background_title(d, "Empty sender error")
scrollbox(d, unicode("You must specify an address to send the message from."))
else:
# dummy ackdata, no need for stealth
ackdata = genAckPayload(streamNumber, 0)
recv = BROADCAST_STR
ripe = ""
sqlExecute(
"INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
"",
recv,
ripe,
sender,
subject,
body,
ackdata,
int(time.time()), # sentTime (this will never change)
int(time.time()), # lastActionTime
0, # sleepTill time. This will get set when the POW gets done.
"broadcastqueued",
0, # retryNumber
"sent", # folder
2, # encodingType
BMConfigParser().getint('bitmessagesettings', 'ttl'))
queues.workerQueue.put(('sendbroadcast', ''))
def loadInbox():
sys.stdout = sys.__stdout__
print("Loading inbox messages...")
sys.stdout = printlog
where = "toaddress || fromaddress || subject || message"
what = "%%"
ret = sqlQuery("""SELECT msgid, toaddress, fromaddress, subject, received, read
FROM inbox WHERE folder='inbox' AND %s LIKE ?
ORDER BY received
""" % (where,), what)
for row in ret:
msgid, toaddr, fromaddr, subject, received, read = row
subject = ascii(shared.fixPotentiallyInvalidUTF8Data(subject))
# Set label for to address
try:
if toaddr == BROADCAST_STR:
tolabel = BROADCAST_STR
else:
tolabel = BMConfigParser().get(toaddr, "label")
except:
tolabel = ""
if tolabel == "":
tolabel = toaddr
tolabel = shared.fixPotentiallyInvalidUTF8Data(tolabel)
# Set label for from address
fromlabel = ""
if BMConfigParser().has_section(fromaddr):
fromlabel = BMConfigParser().get(fromaddr, "label")
if fromlabel == "": # Check Address Book
qr = sqlQuery("SELECT label FROM addressbook WHERE address=?", fromaddr)
if qr != []:
for r in qr:
fromlabel, = r
if fromlabel == "": # Check Subscriptions
qr = sqlQuery("SELECT label FROM subscriptions WHERE address=?", fromaddr)
if qr != []:
for r in qr:
fromlabel, = r
if fromlabel == "":
fromlabel = fromaddr
fromlabel = shared.fixPotentiallyInvalidUTF8Data(fromlabel)
# Load into array
inbox.append([msgid, tolabel, toaddr, fromlabel, fromaddr, subject,
l10n.formatTimestamp(received, False), read])
inbox.reverse()
def loadSent():
sys.stdout = sys.__stdout__
print("Loading sent messages...")
sys.stdout = printlog
where = "toaddress || fromaddress || subject || message"
what = "%%"
ret = sqlQuery("""SELECT toaddress, fromaddress, subject, status, ackdata, lastactiontime
FROM sent WHERE folder='sent' AND %s LIKE ?
ORDER BY lastactiontime
""" % (where,), what)
for row in ret:
toaddr, fromaddr, subject, status, ackdata, lastactiontime = row
subject = ascii(shared.fixPotentiallyInvalidUTF8Data(subject))
# Set label for to address
tolabel = ""
qr = sqlQuery("SELECT label FROM addressbook WHERE address=?", toaddr)
if qr != []:
for r in qr:
tolabel, = r
if tolabel == "":
qr = sqlQuery("SELECT label FROM subscriptions WHERE address=?", toaddr)
if qr != []:
for r in qr:
tolabel, = r
if tolabel == "":
if BMConfigParser().has_section(toaddr):
tolabel = BMConfigParser().get(toaddr, "label")
if tolabel == "":
tolabel = toaddr
# Set label for from address
fromlabel = ""
if BMConfigParser().has_section(fromaddr):
fromlabel = BMConfigParser().get(fromaddr, "label")
if fromlabel == "":
fromlabel = fromaddr
# Set status string
if status == "awaitingpubkey":
statstr = "Waiting for their public key. Will request it again soon"
elif status == "doingpowforpubkey":
statstr = "Encryption key request queued"
elif status == "msgqueued":
statstr = "Message queued"
elif status == "msgsent":
t = l10n.formatTimestamp(lastactiontime, False)
statstr = "Message sent at "+t+".Waiting for acknowledgement."
elif status == "msgsentnoackexpected":
t = l10n.formatTimestamp(lastactiontime, False)
statstr = "Message sent at "+t+"."
elif status == "doingmsgpow":
statstr = "The proof of work required to send the message has been queued."
elif status == "ackreceived":
t = l10n.formatTimestamp(lastactiontime, False)
statstr = "Acknowledgment of the message received at "+t+"."
elif status == "broadcastqueued":
statstr = "Broadcast queued."
elif status == "broadcastsent":
t = l10n.formatTimestamp(lastactiontime, False)
statstr = "Broadcast sent at "+t+"."
elif status == "forcepow":
statstr = "Forced difficulty override. Message will start sending soon."
elif status == "badkey":
statstr = "Warning: Could not encrypt message because the recipient's encryption key is no good."
elif status == "toodifficult":
statstr = "Error: The work demanded by the recipient is more difficult than you are willing to do."
else:
t = l10n.formatTimestamp(lastactiontime, False)
statstr = "Unknown status "+status+" at "+t+"."
# Load into array
sentbox.append([tolabel, toaddr, fromlabel, fromaddr, subject, statstr, ackdata,
l10n.formatTimestamp(lastactiontime, False)])
sentbox.reverse()
def loadAddrBook():
sys.stdout = sys.__stdout__
print("Loading address book...")
sys.stdout = printlog
ret = sqlQuery("SELECT label, address FROM addressbook")
for row in ret:
label, addr = row
label = shared.fixPotentiallyInvalidUTF8Data(label)
addrbook.append([label, addr])
addrbook.reverse()
def loadSubscriptions():
ret = sqlQuery("SELECT label, address, enabled FROM subscriptions")
for row in ret:
label, address, enabled = row
subscriptions.append([label, address, enabled])
subscriptions.reverse()
def loadBlackWhiteList():
global bwtype
bwtype = BMConfigParser().get("bitmessagesettings", "blackwhitelist")
if bwtype == "black":
ret = sqlQuery("SELECT label, address, enabled FROM blacklist")
else:
ret = sqlQuery("SELECT label, address, enabled FROM whitelist")
for row in ret:
label, address, enabled = row
blacklist.append([label, address, enabled])
blacklist.reverse()
def runwrapper():
sys.stdout = printlog
#sys.stderr = errlog
# Load messages from database
loadInbox()
loadSent()
loadAddrBook()
loadSubscriptions()
loadBlackWhiteList()
stdscr = curses.initscr()
global logpad
logpad = curses.newpad(1024, curses.COLS)
stdscr.nodelay(0)
curses.curs_set(0)
stdscr.timeout(1000)
curses.wrapper(run)
doShutdown()
def run(stdscr):
# Schedule inventory lookup data
resetlookups()
# Init color pairs
if curses.has_colors():
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK) # red
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK) # green
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK) # yellow
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK) # blue
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK) # magenta
curses.init_pair(6, curses.COLOR_CYAN, curses.COLOR_BLACK) # cyan
curses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_BLACK) # white
if curses.can_change_color():
curses.init_color(8, 500, 500, 500) # gray
curses.init_pair(8, 8, 0)
curses.init_color(9, 844, 465, 0) # orange
curses.init_pair(9, 9, 0)
else:
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_BLACK) # grayish
curses.init_pair(9, curses.COLOR_YELLOW, curses.COLOR_BLACK) # orangish
# Init list of address in 'Your Identities' tab
configSections = BMConfigParser().addressses()
for addressInKeysFile in configSections:
isEnabled = BMConfigParser().getboolean(addressInKeysFile, "enabled")
addresses.append([BMConfigParser().get(addressInKeysFile, "label"), isEnabled, addressInKeysFile])
# Set address color
if not isEnabled:
addresses[len(addresses)-1].append(8) # gray
elif BMConfigParser().safeGetBoolean(addressInKeysFile, 'chan'):
addresses[len(addresses)-1].append(9) # orange
elif BMConfigParser().safeGetBoolean(addressInKeysFile, 'mailinglist'):
addresses[len(addresses)-1].append(5) # magenta
else:
addresses[len(addresses)-1].append(0) # black
addresses.reverse()
stdscr.clear()
redraw(stdscr)
while quit == False:
drawtab(stdscr)
handlech(stdscr.getch(), stdscr)
def doShutdown():
sys.stdout = sys.__stdout__
print("Shutting down...")
sys.stdout = printlog
shutdown.doCleanShutdown()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
os._exit(0)
| mit | -7,257,180,772,672,811,000 | 50.288008 | 191 | 0.455812 | false |
cheery/essence | interpret.py | 1 | 2438 | # This file is part of Essential Editor Research Project (EERP)
#
# EERP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EERP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EERP. If not, see <http://www.gnu.org/licenses/>.
from essence import load
import sys
class closure(object):
def __init__(self, arguments, body, env):
self.arguments = arguments
self.body = body
self.env = env
def apply(self, arguments):
env = dict(zip(self.arguments, arguments))
env['__parent__'] = self.env
res = None
for expr in self.body.array:
res = interpret(expr, env)
return res
def interpret(expr, env):
name = expr.get('name')
if name == 'int':
return int(expr.string) # on early versions it's a string.
if name == 'mul':
left, right = expr.array
return interpret(left, env) * interpret(right, env)
if name == 'add':
left, right = expr.array
return interpret(left, env) + interpret(right, env)
if name == 'set':
left, right = expr.array
env[left] = interpret(right, env)
if name == 'variable':
variable = expr.string
if not variable in env:
raise Exception("%r not in %r" % (variable, env))
return env[variable]
if name == 'define':
name, arglist, body = expr.array
arguments = []
for argument in arglist.array:
assert argument.get('name') == 'variable'
arguments.append(argument.string)
env[name] = closure(arguments, body, env)
if name == 'call':
caller, arguments = expr.array
caller = interpret(caller, env)
arguments = [interpret(arg, env) for arg in arguments]
return caller.apply(arguments)
raise Exception("unknown clause %r", expr)
program = load(sys.argv[1])
assert program.get('name') == 's-expr'
env = {}
res = None
for item in program.array:
res = interpret(item, env)
print res
| gpl-3.0 | 6,000,114,890,642,886,000 | 33.338028 | 70 | 0.634126 | false |
dsweet04/rekall | rekall-core/rekall/plugins/response/renderers.py | 1 | 3128 | # Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <[email protected]>"
from rekall.ui import text
from rekall.plugins.renderers import data_export
from rekall_lib import utils
class FileSpec_Text(text.TextObjectRenderer):
renders_type = "FileSpec"
def render_row(self, target, width=None, **_):
if target.filesystem == "API":
return text.Cell(unicode(target.name), width=width)
else:
return text.Cell(u"%s (%s)" % (target.name, target.filesystem),
width=width)
class FileInformation_TextObjectRenderer(text.TextObjectRenderer):
renders_type = "FileInformation"
def render_row(self, target, **options):
return FileSpec_Text(
renderer=self.renderer, session=self.session).render_row(
target.filename, **options)
class UserTextObjectRenderer(text.TextObjectRenderer):
renders_type = "User"
def render_row(self, item, **_):
if item.username:
return text.Cell(u"%s (%s)" % (item.username, item.uid))
return text.Cell(unicode(item.uid))
class GroupTextObjectRenderer(text.TextObjectRenderer):
renders_type = "Group"
def render_row(self, item, **_):
if item.group_name:
return text.Cell(u"%s (%s)" % (item.group_name, item.gid))
return text.Cell(unicode(item.gid))
class DataExportFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "FileSpec"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(filesystem=item.filesystem, name=item.name)
class PermissionsFileSpecObjectRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "Permissions"
def Summary(self, item, **_):
return utils.SmartStr(item)
def GetState(self, item, **options):
return dict(perm=str(item), int_perm=int(item))
class LiveProcessTextRenderer(text.TextObjectRenderer):
renders_type = "LiveProcess"
def render_row(self, target, width=None, **_):
return text.Cell("%s (%s)" % (target.name, target.pid), width=width)
class LiveProcessDataExportRenderer(
data_export.DataExportBaseObjectRenderer):
renders_type = "LiveProcess"
def GetState(self, item, **_):
return item.as_dict()
| gpl-2.0 | -267,481,225,361,954,530 | 30.59596 | 76 | 0.684783 | false |
phoebe-project/phoebe2 | tests/nosetests/test_blackbody/test_blackbody.py | 1 | 1553 | """
"""
import phoebe
from phoebe import u
import numpy as np
import matplotlib.pyplot as plt
def test_binary(plot=False):
b = phoebe.Bundle.default_binary()
# Two spherical suns
b.set_value_all('teff', value=5772.)
b.set_value('sma', component='binary', value=100.)
b.set_value('period', component='binary', value=81.955)
b.add_dataset('lc', times=np.linspace(0,100,21))
b.add_compute('phoebe', compute='phoebe2')
b.add_compute('legacy', compute='phoebe1')
# set matching atmospheres
b.set_value_all('atm', 'extern_planckint')
# turn off limb-darkening:
b.set_value_all('ld_mode_bol', 'manual')
b.set_value_all('ld_func_bol', 'linear')
b.set_value_all('ld_coeffs_bol', [0.0])
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'linear')
b.set_value_all('ld_coeffs', [0.0])
#turn off albedos (legacy requirement)
b.set_value_all('irrad_frac_refl_bol', 0.0)
if plot: print("running phoebe2 model...")
b.run_compute(compute='phoebe2', irrad_method='none', model='phoebe2model')
if plot: print("running phoebe1 model...")
b.run_compute(compute='phoebe1', refl_num=0, model='phoebe1model')
phoebe2_val = b.get_value('fluxes@phoebe2model')
phoebe1_val = b.get_value('fluxes@phoebe1model')
if plot:
b.plot(dataset='lc01', show=True)
assert(np.allclose(phoebe2_val, phoebe1_val, rtol=1e-3, atol=0.))
return b
if __name__ == '__main__':
logger = phoebe.logger(clevel='INFO')
b = test_binary(plot=True)
| gpl-3.0 | 923,983,890,196,022,900 | 26.732143 | 79 | 0.641339 | false |
wojtask/CormenPy | test/test_chapter15/test_exercise15_5_1.py | 1 | 2830 | import io
import re
from contextlib import redirect_stdout
from unittest import TestCase
from hamcrest import *
from chapter15.exercise15_5_1 import construct_optimal_bst
from chapter15.textbook15_5 import optimal_bst
from test_chapter15.test_textbook15_5 import get_probabilities_for_optimal_bst
def assert_optimal_bst_output(actual_output, root):
n = root.length
root_id = int(re.search('k(\d+) is the root', actual_output[0]).group(1))
assert_that(root_id, is_(equal_to(root[1, n])))
line_no = assert_left_child_output(actual_output, root, 1, root_id - 1, 1)
line_no = assert_right_child_output(actual_output, root, root_id + 1, n, line_no + 1)
assert_that(actual_output, has_length(line_no + 1))
def assert_left_child_output(actual_output, root, i, j, line_no):
parent = j + 1
comp = re.compile('([kd])(\d+) is the left child of k(\d+)')
node_type = comp.search(actual_output[line_no]).group(1)
node_id = int(comp.search(actual_output[line_no]).group(2))
actual_parent = int(comp.search(actual_output[line_no]).group(3))
assert_that(actual_parent, is_(equal_to(parent)))
if i <= j:
assert_that(node_type, is_(equal_to('k')))
assert_that(node_id, is_(equal_to(root[i, j])))
line_no = assert_left_child_output(actual_output, root, i, node_id - 1, line_no + 1)
line_no = assert_right_child_output(actual_output, root, node_id + 1, j, line_no + 1)
else:
assert_that(node_type, is_(equal_to('d')))
assert_that(node_id, is_(equal_to(j)))
return line_no
def assert_right_child_output(actual_output, root, i, j, line_no):
parent = i - 1
comp = re.compile('([kd])(\d+) is the right child of k(\d+)')
node_type = comp.search(actual_output[line_no]).group(1)
node_id = int(comp.search(actual_output[line_no]).group(2))
actual_parent = int(comp.search(actual_output[line_no]).group(3))
assert_that(actual_parent, is_(equal_to(parent)))
if i <= j:
assert_that(node_type, is_(equal_to('k')))
assert_that(node_id, is_(equal_to(root[i, j])))
line_no = assert_left_child_output(actual_output, root, i, node_id - 1, line_no + 1)
line_no = assert_right_child_output(actual_output, root, node_id + 1, j, line_no + 1)
else:
assert_that(node_type, is_(equal_to('d')))
assert_that(node_id, is_(equal_to(j)))
return line_no
class TestExercise15_5_1(TestCase):
def test_construct_optimal_bst(self):
p, q = get_probabilities_for_optimal_bst()
_, root = optimal_bst(p, q, p.length)
captured_output = io.StringIO()
with redirect_stdout(captured_output):
construct_optimal_bst(root)
actual_output = captured_output.getvalue().splitlines()
assert_optimal_bst_output(actual_output, root)
| gpl-3.0 | 5,799,465,066,080,409,000 | 40.014493 | 93 | 0.645936 | false |
rahlk/CSC579__Computer_Performance_Modeling | simulation/proj1/tasks/task5.py | 1 | 2063 | from __future__ import division
from __future__ import print_function
import os
import sys
import functools
# Update path
root = os.path.join(os.getcwd().split('proj1')[0], 'proj1')
if root not in sys.path:
sys.path.append(root)
import numpy as np
import pandas as pd
import multiprocessing
from pdb import set_trace
from Simulator import simulate
from Utils.PlotsUtils import line, line2
from Utils.RandomUtil import Random
from Utils.MisclUtils import TimeUtil
rand = Random()
timer = TimeUtil()
# Set seed
rand.set_seed(seed_val=12458)
def customer_loss_rate(customers):
served = np.sum([customer.serviced for customer in customers])
total = len(customers)
return served / total
def plot_runtime(x=None, y=None):
line(x, y, x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def plot_runtime_vs_avg(x, y, y_1):
line2(x, y, x, y_1, label_1="Actual Runtimes", label_2="Expected value of $\rho$", x_label=r"$\rho$", y_label=r"Run Times", the_title=r"$\mathrm{Run\ Times\ in\ }\mu\mathrm{s\ vs.\ }\rho$")
def task_5():
rho_list = np.arange(0.05, 1, 0.1)
C = 1e5
elapsed = []
for rho in rho_list:
start_time = timer.current_time()
serviced = simulate(l = rho, server_lim = 40, max_serviced=C, L=1, verbose=False)
end_time = timer.current_time()
elapsed.append(end_time-start_time)
data = pd.DataFrame([[a,b] for a, b in zip(rho_list, elapsed)], columns=["Rho", "Seconds"])
data.to_csv(os.path.abspath(os.path.join(root,"tasks/task5.csv")))
def task5_plot():
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], data["Seconds"])
set_trace()
def compare_plot():
rho_list = np.arange(0.05, 1, 0.1)
average_rho = [np.mean([rand.exponential(lam=p) for _ in xrange(10000)]) for p in rho_list]
data = pd.read_csv(os.path.abspath("tasks/task5.csv"))
plot_runtime(data["Rho"], average_rho)
if __name__ == "__main__":
task_5()
task5_plot()
compare_plot()
| mit | 711,134,336,705,916,900 | 28.898551 | 193 | 0.654387 | false |
JackDesBwa/IrisMonitor | decoders/test.py | 1 | 1272 | #!/usr/bin/env python
import sys, os
if os.path.dirname(os.path.abspath(__file__)) in sys.path:
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
if os.path.dirname(os.path.dirname(os.path.abspath(__file__))) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import decoders, events
def onAllMessages(event, data):
sys.stdout.write('%f : ' % data[0])
sys.stdout.write('%s - ' % event)
print(data[1])
events.register_all(decoders.IrisDecoder, onAllMessages)
sys.stdout.write('Available decoders are :\n')
i = 0
declist = decoders.get_list()
for d in declist:
sys.stdout.write(' %3d %s\n' % (i, d))
i += 1
try:
if len(declist) == 0:
sys.stderr.write('No channel available.\n')
if len(declist) == 1:
a = 0
else:
sys.stdout.write('Which one would be tested ? ')
sys.stdout.flush()
a = int(sys.stdin.readline())
sys.stdout.write('Test of `%s`\n' % declist[a])
theclass = decoders.get_class(declist[a])
decoder = theclass()
except ValueError:
sys.exit('Invalid number.\n')
except IndexError:
sys.exit('Unknowed entry.\n')
except KeyboardInterrupt:
sys.exit('Interrupted test.\n')
sys.stdout.write('The decoder has to be tested in conjunction with a channel.\n')
from channels import test
| mit | 8,655,242,273,459,935,000 | 27.909091 | 81 | 0.691824 | false |
gwct/grampa | lib/mul_recon.py | 1 | 12960 | #!/usr/bin/python
#############################################################################
# The main algorithmic functions for MUL-reconciliation mapping.
# Gregg Thomas
# Fall 2015, Combo algorithm implemented Spring 2016
#############################################################################
import os, itertools, recontree as RT, mul_tree as MT, reconcore as RC, gene_tree as GT, global_vars as globs
import pickle
#############################################################################
def reconLCA(lca_ginfo, sinfo, lca_maps, retmap=False):
# The LCA reconciliation mapping algorithm.
internal_nodes = RT.sortNodes(lca_ginfo);
# Sort the internal nodes for a post order traversal.
score = 0;
if retmap:
dups, losses = {}, {};
for g in lca_ginfo:
dups[g], losses[g] = 0, 0;
for g in internal_nodes:
g = "<" + str(g) + ">";
d1, d2 = RT.getDesc(g, lca_ginfo);
is_dup = 0;
g_clade = RT.getClade(g, lca_ginfo);
clade_maps = [];
for g_tip in g_clade:
clade_maps.append(lca_maps[g_tip][0]);
# Get the species in the clade of the current node. Then get all
# the possible maps from those species.
lca_maps[g].append(RT.LCA(clade_maps,sinfo)[0]);
if lca_maps[g][0] == lca_maps[d1][0] or lca_maps[g][0] == lca_maps[d2][0]:
if retmap:
dups[g] += 1;
score += 1;
is_dup = 1;
#Now, if the map of g is identical to one of its descendants, it is a duplication node.
cur_depth = len(RT.nodeDepth(lca_maps[g][0],sinfo))
if lca_ginfo[g][2] == 'root':
if retmap:
losses[g] += cur_depth;
score += cur_depth;
# The number of losses at the root of the gene tree is equal to the depth of its map.
d1_depth = len(RT.nodeDepth(lca_maps[d1][0],sinfo));
d1_loss = (d1_depth - cur_depth - 1) + is_dup;
score += d1_loss
if retmap:
losses[d1] += d1_loss;
d2_depth = len(RT.nodeDepth(lca_maps[d2][0],sinfo))
d2_loss = (d2_depth - cur_depth - 1) + is_dup;
score += d2_loss;
if retmap:
losses[d2] += d2_loss;
# Counting losses for each of the descendents of the current node.
if retmap:
return lca_maps, dups, losses;
return score;
# Return the total number of duplication nodes.
#############################################################################
def getSis(gs_node, check_node, check_clade, gs_dict):
# Gets the hybrid and copy sister species.
d1, d2 = RT.getDesc(gs_node, gs_dict);
if d1 == check_node:
sis_node = d2;
elif d2 == check_node:
sis_node = d1;
sis_clade = RT.getClade(sis_node, gs_dict);
if any(c in check_clade for c in sis_clade):
return [];
else:
return sis_clade;
#############################################################################
def collapseGroups(mul_input, gene_trees_filtered_cg, spec_type_cg, v, pickle_dir, nmt):
# The collapseGroups function goes through all gene tree-MUL-tree combos to collapse the groups.
mul_num, mul_tree = mul_input;
if v == 1:
print("# " + RC.getDateTime() + " --> Collapsing groups for MUL-tree # " + str(mul_num) + " / " + str(nmt));
if mul_num == 0:
return mul_num, [];
gt_groups = {};
mt, minfo, hybrid_clade, hybrid_node, copy_node = mul_tree[0], mul_tree[1], mul_tree[2], mul_tree[3], mul_tree[4];
for gene_num in gene_trees_filtered_cg:
gene_tree = gene_trees_filtered_cg[gene_num];
if len(gene_tree) == 1:
continue;
# If the gene tree was previously filtered, the list will only contain the filter message and it should be skipped here.
gt,ginfo = gene_tree;
internal_nodes = RT.sortNodes(ginfo);
# Sort the internal nodes for a post order traversal.
singles, groups = {}, {};
for g in ginfo:
if ginfo[g][2] == 'tip':
if g[g.rfind("_")+1:] in hybrid_clade:
cur_anc = ginfo[g][1];
anc_clade = RT.getClade(cur_anc, ginfo);
anc_clade.remove(g);
singles[g] = anc_clade;
# First, all hybrid species nodes in the gene tree are added to the singles list.
## GETS SINGLETONS
for g in internal_nodes:
g = "<" + str(g) + ">";
# Next, for any non-tip node, we find out if the species that define it can be grouped
d1, d2 = RT.getDesc(g, ginfo);
d1_clade = RT.getClade(d1, ginfo);
d1_spec_clade = [spec[spec.rfind("_")+1:] for spec in d1_clade];
d2_clade = RT.getClade(d2,ginfo);
d2_spec_clade = [spec[spec.rfind("_")+1:] for spec in d2_clade];
# The clades for the descendants of both nodes are retrieved, and their corresponding
# species are stored.
if all(s in hybrid_clade for s in d1_spec_clade) and all(s in hybrid_clade for s in d2_spec_clade):
# If the descendants from both nodes are all hybrid clade species, then we may be able to group them.
if not any(s in d2_spec_clade for s in d1_spec_clade):
# However, only if there is not more than one copy of a species among the clades can they be grouped.
cur_clade = RT.getClade(g, ginfo);
cur_anc = ginfo[g][1];
anc_clade = RT.getClade(cur_anc, ginfo);
anc_clade = [spec for spec in anc_clade if spec not in cur_clade];
cur_nodes = RT.getCladeNode(g, ginfo);
for node in cur_nodes:
if node in groups:
del groups[node];
groups[g] = [cur_clade, anc_clade];
## CHECKS GROUPINGS
for group in groups:
for g in groups[group][0]:
if g in singles:
del singles[g];
# Removes any singles that are in a group.
final_groups = [];
for node in groups:
final_groups.append(groups[node]);
for single in singles:
final_groups.append([[single], singles[single]]);
# Restructures the final groups and adds singles.
sisters = {};
if spec_type_cg == 's':
mul_hybrid_node = [n for n in minfo if set(RT.getClade(n, minfo)) == set(hybrid_clade)][0];
copy_clade = [c + "*" for c in hybrid_clade];
mul_copy_node = [n for n in minfo if set(RT.getClade(n, minfo)) == set(copy_clade)][0];
# The copy clade is defined.
elif spec_type_cg == 'm':
copy_clade = RT.getClade(copy_node, minfo);
mul_hybrid_node = hybrid_node;
mul_copy_node = copy_node;
hybrid_anc = minfo[mul_hybrid_node][1];
copy_anc = minfo[mul_copy_node][1];
sisters[''] = getSis(hybrid_anc, mul_hybrid_node, copy_clade, minfo);
sisters['*'] = getSis(copy_anc, mul_copy_node, hybrid_clade, minfo);
# These lines get any sister species from the hybrid and copy clades in the MUL-tree and that
# clade's corresponding map. If there are no sisters, it stores an empty list.
groups, fixed_groups = [], [];
for group in final_groups:
group_sis = [spec[spec.rfind("_")+1:] for spec in group[1]];
if group_sis == []:
groups.append(group[0]);
continue;
if all(spec in sisters[''] for spec in group_sis):
fixed_groups.append([group[0],'']);
elif all(spec in sisters['*'] for spec in group_sis):
fixed_groups.append([group[0],'*']);
else:
groups.append(group[0]);
# This checks the sister species of all the groups for the gene tree. If all the sister species
# of a group are also in the sister species of the hybrid or copy clade in the MUL-tree, then we
# can fix the mapping of that node.
## FINDS FIXED SISTER GROUPS
gt_groups[gene_num] = [groups, fixed_groups];
# Adding the groups and fixed groups to the current gt_groups.
groupoutfile = os.path.join(pickle_dir, str(mul_num) + "_groups.pickle");
pickle.dump(gt_groups, open(groupoutfile, "wb"));
del groups, fixed_groups, final_groups, gene_trees_filtered_cg, gt_groups;
#############################################################################
def mulRecon(mul_input, gene_trees, v, pickle_dir, nmt, retmap=False):
# The basis of the MUL-reconciliation algorithm is that there are now nodes that
# have more than one possible map. We try all combinations of mappings for these
# nodes and find which combination(s) results in the most parsimonious mutation score
# (# duplication + # losses).
#
# A few prelminary steps are taken to ensure the quickest mapping groups:
# 1. Identify whether the hybrid or copy clade in the MUL-tree have sister groups. If so, we can use
# them to fix similar nodes in the gene tree.
# 2. Find nodes that contain only one or zero copies of the hybrid node species and species from one
# of the sister groups. Fix the mappings of these nodes.
# 3. Any other nodes that contain only one or zero copies of the hybrid node species can be grouped
# and should be mapped consistently, though we will still have to try both maps.
# 4. Remaining single hybrid nodes must be tried with both maps.
#
# Once these steps are done (in the collapseGroups function), a list of node groups is obtained, for
# which we generate every combination of map and try to reconcile to the MUL-tree. A score is obtained
# for each combination and the minimum score is kept as the correct map.
mul_num, mul_tree = mul_input
#main_output, det_output, min_num, min_score, min_maps, multiple_maps = {}, [], '', 9999999, {}, 0;
# mulpicklefile = os.path.join(pickle_dir, str(mul_num) + "_tree.pickle");
# mul_tree = pickle.load(open(mulpicklefile, "rb"));
if v == 1:
print("# " + RC.getDateTime() + " --> Reconciling to MUL-tree # " + str(mul_num) + " / " + str(nmt));
min_maps = {};
total_score = 0;
if mul_num != 0:
groupfilename = os.path.join(pickle_dir, str(mul_num) + "_groups.pickle");
cur_groups = pickle.load(open(groupfilename, "rb"));
for gene_num, gene_tree in gene_trees.items():
gt, ginfo = gene_tree;
gene_score = 99999;
min_maps[gene_num] = [];
if mul_num == 0:
sinfo = mul_tree[1];
init_maps = {};
for g in ginfo:
if ginfo[g][2] == 'tip':
speclabel = g[g.rfind("_")+1:];
init_maps[g] = [speclabel];
else:
init_maps[g] = [];
# Initialize the maps.
if retmap:
maps, node_dups, node_loss = reconLCA(ginfo, sinfo, init_maps, retmap);
num_dups = sum(node_dups.values());
num_loss = sum(node_loss.values());
gene_score = num_dups + num_loss;
min_maps[gene_num].append([gene_score, num_dups, num_loss, maps, node_dups, node_loss]);
else:
gene_score = reconLCA(ginfo, sinfo, init_maps);
total_score += gene_score;
# Some counting.
else:
mt, minfo, hybrid_clade, hybrid_node, copy_node, = mul_tree[0], mul_tree[1], mul_tree[2], mul_tree[3], mul_tree[4];
# Aggregate variables for the current GENE tree.
gt_groups, gt_fixed = cur_groups[gene_num][0], cur_groups[gene_num][1];
num_groups = len(gt_groups);
# Retrieve gene tree info and collapsed groups for this gene tree-MUL-tree combo
for combo in itertools.product(['','*'], repeat=num_groups):
# We get all combinations of mappings for each node group. This is the time constraining step.
group_map = [];
for i in range(len(combo)):
for node in gt_groups[i]:
group_map.append(node + combo[i]);
# This builds the current map for each group.
for fixed in gt_fixed:
for node in fixed[0]:
group_map.append(node + fixed[1]);
# This adds the fixed maps onto the current combination of group mappings.
# Now we do LCA mapping for the current combination of maps for the hybrid clade species.
maps = {};
for g in ginfo:
if ginfo[g][2] == 'tip':
speclabel = g[g.rfind("_")+1:];
if g in group_map:
maps[g] = [speclabel];
# If the node is in a hybrid clade, use the map in the current combination.
elif g + "*" in group_map:
maps[g] = [speclabel + "*"];
else:
maps[g] = [speclabel];
# Otherwise, the map is just the species label.
else:
maps[g] = [];
# And if the node is not a tip, the map is empty.
if retmap:
maps, node_dups, node_loss = reconLCA(ginfo, minfo, maps, retmap);
num_dups = sum(node_dups.values());
num_loss = sum(node_loss.values());
cur_score = num_dups + num_loss;
if cur_score <= gene_score:
if cur_score < gene_score:
gene_score = cur_score;
min_maps[gene_num] = [];
min_maps[gene_num].append([gene_score, num_dups, num_loss, maps, node_dups, node_loss])
else:
cur_score = reconLCA(ginfo, minfo, maps);
if cur_score < gene_score:
gene_score = cur_score;
# Once the current maps have been initialized, we can simply call the normal LCA mapping algorithm
## End mapping of one gene tree.
total_score += gene_score;
## End mapping all gene trees.
if retmap:
return min_maps;
else:
return mul_num, total_score;
# #############################################################################
# A couple ways to get the map combos:
# combo_ind = list(itertools.product(['','*'], repeat=len(node_ind)));
# if v == -2:
# print "num combos", len(combo_ind);
# combos = list(itertools.product(['','*'], repeat=len(node_ind)));
# Old loading:
# if v == 0 and numiters > 100:
# numbars, donepercent = RC.loadingBar(itercount, numiters, donepercent, numbars);
# itercount = itercount + 1;
# # Only the loading bar displays when the program is running if -v is set to 0.
| gpl-3.0 | 5,744,836,206,608,884,000 | 33.195251 | 122 | 0.626157 | false |
diofant/diofant | diofant/domains/domain.py | 1 | 8541 | """Implementation of :class:`Domain` class."""
import abc
import inspect
from ..core import Expr
from ..core.compatibility import HAS_GMPY
from ..polys.orderings import lex
from ..polys.polyerrors import CoercionFailed, UnificationFailed
from ..polys.polyutils import _unify_gens
from ..printing.defaults import DefaultPrinting
from .domainelement import DomainElement
class Domain(DefaultPrinting, abc.ABC):
"""Represents an abstract domain."""
is_Ring = False
is_Field = False
has_assoc_Ring = False
is_FiniteField = False
is_IntegerRing = False
is_RationalField = False
is_RealField = False
is_ComplexField = False
is_AlgebraicField = False
is_RealAlgebraicField = False
is_ComplexAlgebraicField = False
is_PolynomialRing = False
is_FractionField = False
is_ExpressionDomain = False
is_Exact = True
is_Numerical = False
def __hash__(self):
return hash((self.__class__.__name__, self.dtype))
def __call__(self, *args):
"""Construct an element of ``self`` domain from ``args``."""
return self.dtype(*args)
def __getstate__(self):
return {}
@abc.abstractmethod
def from_expr(self, expr):
"""Convert Diofant's expression ``expr`` to ``dtype``."""
raise NotImplementedError
@abc.abstractmethod
def to_expr(self, element):
"""Convert domain ``element`` to Diofant expression."""
raise NotImplementedError
def convert_from(self, element, base):
"""Convert ``element`` to ``self.dtype`` given the base domain."""
for superclass in inspect.getmro(base.__class__):
method = '_from_' + superclass.__name__
convert = getattr(self, method, None)
if convert:
result = convert(element, base)
if result is not None:
return result
raise CoercionFailed(f"can't convert {element} of type {type(element)} "
f'from {base} to {self}')
def convert(self, element, base=None):
"""Convert ``element`` to ``self.dtype``."""
if base is not None:
return self.convert_from(element, base)
if isinstance(element, self.dtype):
return element
from . import ComplexField, PythonRational, RealField
from .expressiondomain import ExpressionDomain
from .integerring import GMPYIntegerRing, PythonIntegerRing
from .rationalfield import GMPYRationalField, PythonRationalField
if isinstance(element, int):
return self.convert_from(element, PythonIntegerRing())
if isinstance(element, PythonRational):
return self.convert_from(element, PythonRationalField())
if HAS_GMPY:
integers = GMPYIntegerRing()
if isinstance(element, integers.dtype):
return self.convert_from(element, integers)
rationals = GMPYRationalField()
if isinstance(element, rationals.dtype):
return self.convert_from(element, rationals)
if isinstance(element, float):
parent = RealField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, complex):
parent = ComplexField(tol=False)
return self.convert_from(parent(element), parent)
if isinstance(element, DomainElement):
return self.convert_from(element, element.parent)
if isinstance(element, ExpressionDomain.Expression):
return self.convert_from(element, ExpressionDomain())
if isinstance(element, Expr):
try:
return self.from_expr(element)
except (TypeError, ValueError):
pass
raise CoercionFailed(f"can't convert {element} of type {type(element)} to {self}")
def __contains__(self, a):
"""Check if ``a`` belongs to this domain."""
try:
self.convert(a)
return True
except CoercionFailed:
return False
def _from_PolynomialRing(self, a, K0):
if a.is_ground:
return self.convert(a.LC, K0.domain)
def _from_FractionField(self, a, K0):
if a.numerator.is_ground and a.denominator == 1:
return self.convert(a.numerator.LC, K0.domain.ring)
def unify(self, K1, symbols=()):
"""
Construct a minimal domain that contains elements of ``self`` and ``K1``.
Known domains (from smallest to largest):
- ``GF(p)``
- ``ZZ``
- ``QQ``
- ``RR(prec, tol)``
- ``CC(prec, tol)``
- ``ALG(a, b, c)``
- ``K[x, y, z]``
- ``K(x, y, z)``
- ``EX``
"""
from .compositedomain import CompositeDomain
if symbols:
if any(isinstance(d, CompositeDomain) and (set(d.symbols) & set(symbols))
for d in [self, K1]):
raise UnificationFailed(f"Can't unify {self} with {K1}, "
f'given {symbols} generators')
return self.unify(K1)
if self == K1:
return self
if self.is_ExpressionDomain:
return self
if K1.is_ExpressionDomain:
return K1
if any(isinstance(d, CompositeDomain) for d in (self, K1)):
if isinstance(self, CompositeDomain):
self_ground = self.domain
self_symbols = self.symbols
order = self.order
else:
self_ground = self
self_symbols = ()
order = K1.order
if isinstance(K1, CompositeDomain):
K1_ground = K1.domain
K1_symbols = K1.symbols
else:
K1_ground = K1
K1_symbols = ()
domain = self_ground.unify(K1_ground)
symbols = _unify_gens(self_symbols, K1_symbols)
if ((self.is_FractionField and K1.is_PolynomialRing or
K1.is_FractionField and self.is_PolynomialRing) and
(not self_ground.is_Field or not K1_ground.is_Field) and domain.has_assoc_Ring):
domain = domain.ring
if isinstance(self, CompositeDomain) and (not isinstance(K1, CompositeDomain) or self.is_FractionField or K1.is_PolynomialRing):
cls = self.__class__
else:
cls = K1.__class__
return cls(domain, symbols, order)
def mkinexact(cls, K0, K1):
prec = max(K0.precision, K1.precision)
tol = max(K0.tolerance, K1.tolerance)
return cls(prec=prec, tol=tol)
if self.is_ComplexField and K1.is_ComplexField:
return mkinexact(self.__class__, self, K1)
if self.is_ComplexField and K1.is_RealField:
return mkinexact(self.__class__, self, K1)
if self.is_RealField and K1.is_ComplexField:
return mkinexact(K1.__class__, K1, self)
if self.is_RealField and K1.is_RealField:
return mkinexact(self.__class__, self, K1)
if self.is_ComplexField or self.is_RealField:
return self
if K1.is_ComplexField or K1.is_RealField:
return K1
if self.is_AlgebraicField and K1.is_AlgebraicField:
return self.__class__(self.domain.unify(K1.domain), *_unify_gens(self.gens, K1.gens))
elif self.is_AlgebraicField:
return self
elif K1.is_AlgebraicField:
return K1
if self.is_RationalField:
return self
if K1.is_RationalField:
return K1
if self.is_FiniteField and self.domain == K1:
return self
if K1.is_FiniteField and K1.domain == self:
return K1
raise NotImplementedError
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent."""
return isinstance(other, Domain) and self.dtype == other.dtype
def get_exact(self):
return self
def poly_ring(self, *symbols, **kwargs):
"""Returns a polynomial ring, i.e. `K[X]`."""
from ..polys import PolynomialRing
return PolynomialRing(self, symbols, kwargs.get('order', lex))
def frac_field(self, *symbols, **kwargs):
"""Returns a fraction field, i.e. `K(X)`."""
from ..polys import FractionField
return FractionField(self, symbols, kwargs.get('order', lex))
| bsd-3-clause | 1,690,075,486,520,412,000 | 32.104651 | 140 | 0.580026 | false |
lowRISC/ibex | vendor/google_riscv-dv/pygen/pygen_src/riscv_asm_program_gen.py | 2 | 42834 | """
Copyright 2020 Google LLC
Copyright 2020 PerfectVIPs Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import logging
import random
import copy
import sys
import vsc
from importlib import import_module
from pygen_src.riscv_instr_sequence import riscv_instr_sequence
from pygen_src.riscv_instr_pkg import (pkg_ins, privileged_reg_t,
privileged_mode_t, mtvec_mode_t,
misa_ext_t, riscv_instr_group_t,
satp_mode_t, exception_cause_t)
from pygen_src.riscv_signature_pkg import (signature_type_t, core_status_t,
test_result_t)
from pygen_src.riscv_instr_gen_config import cfg
from pygen_src.riscv_data_page_gen import riscv_data_page_gen
from pygen_src.riscv_privileged_common_seq import riscv_privileged_common_seq
from pygen_src.riscv_utils import factory
rcs = import_module("pygen_src.target." + cfg.argv.target + ".riscv_core_setting")
'''
RISC-V assembly program generator
This is the main class to generate a complete RISC-V program, including the init routine,
instruction section, data section, stack section, page table, interrupt and exception
handling etc. Check gen_program() function to see how the program is generated.
'''
class riscv_asm_program_gen:
def __init__(self):
self.instr_stream = []
self.directed_instr_stream_ratio = {}
self.hart = 0
self.page_table_list = []
self.main_program = []
self.sub_program = []
self.data_page_gen = None
# Main function to generate the whole program
# This is the main function to generate all sections of the program.
def gen_program(self):
# Generate program header
self.instr_stream.clear()
self.gen_program_header()
for hart in range(cfg.num_of_harts):
# Commenting out for now
# sub_program_name = []
self.instr_stream.append(f"h{int(hart)}_start:")
if not cfg.bare_program_mode:
self.setup_misa()
# Create all page tables
self.create_page_table(hart)
# Setup privileged mode registers and enter target privileged mode
self.pre_enter_privileged_mode(hart)
# Init section
self.gen_init_section(hart)
# To DO
'''
If PMP is supported, we want to generate the associated trap handlers and the test_done
section at the start of the program so we can allow access through the pmpcfg0 CSR
'''
if(rcs.support_pmp and not(cfg.bare_program_mode)):
self.gen_trap_handlers(hart)
# Ecall handler
self.gen_ecall_handler(hart)
# Instruction fault handler
self.gen_instr_fault_handler(hart)
# Load fault handler
self.gen_load_fault_handler(hart)
# Store fault handler
self.gen_store_fault_handler(hart)
if hart == 0:
self.gen_test_done()
# Generate main program
gt_lbl_str = pkg_ins.get_label("main", hart)
label_name = gt_lbl_str
gt_lbl_str = riscv_instr_sequence()
self.main_program.append(gt_lbl_str)
self.main_program[hart].instr_cnt = cfg.main_program_instr_cnt
self.main_program[hart].is_debug_program = 0
self.main_program[hart].label_name = label_name
self.generate_directed_instr_stream(hart=hart,
label=self.main_program[hart].label_name,
original_instr_cnt=
self.main_program[hart].instr_cnt,
min_insert_cnt=1,
instr_stream=self.main_program[hart].directed_instr)
self.main_program[hart].gen_instr(is_main_program=1, no_branch=cfg.no_branch_jump)
self.main_program[hart].post_process_instr()
self.main_program[hart].generate_instr_stream()
logging.info("Generating main program instruction stream...done")
self.instr_stream.extend(self.main_program[hart].instr_string_list)
"""
If PMP is supported, need to jump from end of main program
to test_done section at the end of main_program, as the test_done
will have moved to the beginning of the program
"""
self.instr_stream.append("{}j test_done".format(pkg_ins.indent))
'''
Test done section
If PMP isn't supported, generate this in the normal location
'''
if(hart == 0 and not(rcs.support_pmp)):
self.gen_test_done()
logging.info("Main/sub program generation...done")
# program end
self.gen_program_end(hart)
if not cfg.bare_program_mode:
# Generate debug rom section
if rcs.support_debug_mode:
self.gen_debug_rom(hart)
self.gen_section(pkg_ins.hart_prefix(hart) + "instr_end", ["nop"])
for hart in range(cfg.num_of_harts):
# Starting point of data section
self.gen_data_page_begin(hart)
if not cfg.no_data_page:
# User data section
self.gen_data_page(hart)
# AMO memory region
if(hart == 0 and riscv_instr_group_t.RV32A in rcs.supported_isa):
self.gen_data_page(hart, amo = 1)
self.gen_stack_section(hart)
if not cfg.bare_program_mode:
# Generate kernel program/data/stack section
self.gen_kernel_sections(hart)
# Page table
self.gen_page_table_section(hart)
def gen_kernel_sections(self, hart):
if rcs.SATP_MODE != satp_mode_t.BARE:
self.instr_stream.append(".align 12")
else:
self.instr_stream.append(".align 2")
self.instr_stream.append(pkg_ins.get_label("kernel_instr_start:", hart))
self.instr_stream.append(".text")
self.gen_all_trap_handler(hart)
for mode in rcs.supported_privileged_mode:
self.gen_interrupt_handler_section(mode, hart)
self.instr_stream.append(pkg_ins.get_label("kernel_instr_end: nop", hart))
self.gen_kernel_stack_section(hart)
def gen_kernel_program(self, hart, seq):
pass
def gen_sub_program(self, hart, sub_program,
sub_program_name, num_sub_program,
is_debug = 0, prefix = "sub"):
pass
def gen_callstack(self, main_program, sub_program,
sub_program_name, num_sub_program):
pass
def insert_sub_program(self, sub_program, instr_list):
pass
def gen_program_header(self):
string = []
self.instr_stream.append(".include \"user_define.h\"")
self.instr_stream.append(".globl _start")
self.instr_stream.append(".section .text")
if cfg.disable_compressed_instr:
self.instr_stream.append(".option norvc;")
string.append(".include \"user_init.s\"")
string.append("csrr x5, mhartid")
for hart in range(cfg.num_of_harts):
string.append("li x6, {}\n{}beq x5, x6, {}f".format(hart, pkg_ins.indent, hart))
self.gen_section("_start", string)
for hart in range(cfg.num_of_harts):
self.instr_stream.append("{}: j h{}_start".format(hart, hart))
def gen_program_end(self, hart):
if hart == 0:
self.gen_section("write_tohost", ["sw gp, tohost, t5"])
self.gen_section("_exit", ["j write_tohost"])
def gen_data_page_begin(self, hart):
self.instr_stream.append(".section .data")
if hart == 0:
self.instr_stream.append(".align 6; .global tohost; tohost: .dword 0;")
self.instr_stream.append(".align 6; .global fromhost; fromhost: .dword 0;")
def gen_data_page(self, hart, is_kernel = 0, amo = 0):
self.data_page_gen = riscv_data_page_gen()
self.data_page_gen.gen_data_page(hart, cfg.data_page_pattern, is_kernel, amo)
self.instr_stream.extend(self.data_page_gen.data_page_str)
def gen_stack_section(self, hart):
hart_prefix_string = pkg_ins.hart_prefix(hart)
if cfg.use_push_data_section:
self.instr_stream.append(
".pushsection .{}user_stack,\"aw\",@progbits;".format(hart_prefix_string))
else:
self.instr_stream.append(
".section .{}user_stack,\"aw\",@progbits;".format(hart_prefix_string))
if rcs.SATP_MODE != satp_mode_t.BARE:
self.instr_stream.append(".align 12")
else:
self.instr_stream.append(".align 2")
self.instr_stream.append(pkg_ins.get_label("user_stack_start:", hart))
self.instr_stream.append(".rept {}".format(cfg.stack_len - 1))
self.instr_stream.append(".{}byte 0x0".format(rcs.XLEN // 8))
self.instr_stream.append(".endr")
self.instr_stream.append(pkg_ins.get_label("user_stack_end:", hart))
self.instr_stream.append(".{}byte 0x0".format(rcs.XLEN // 8))
if cfg.use_push_data_section:
self.instr_stream.push_back(".popsection;")
def gen_kernel_stack_section(self, hart):
hart_prefix_string = pkg_ins.hart_prefix(hart)
if cfg.use_push_data_section:
self.instr_stream.append(
".pushsection .{}kernel_stack,\"aw\",@progbits;".format(hart_prefix_string))
else:
self.instr_stream.append(
".section .{}kernel_stack,\"aw\",@progbits;".format(hart_prefix_string))
if rcs.SATP_MODE != satp_mode_t.BARE:
self.instr_stream.append(".align 12")
else:
self.instr_stream.append(".align 2")
self.instr_stream.append(pkg_ins.get_label("kernel_stack_start:", hart))
self.instr_stream.append(".rept {}".format(cfg.kernel_stack_len - 1))
self.instr_stream.append(".{}byte 0x0".format(rcs.XLEN // 8))
self.instr_stream.append(".endr")
self.instr_stream.append(pkg_ins.get_label("kernel_stack_end:", hart))
self.instr_stream.append(".{}byte 0x0".format(rcs.XLEN // 8))
if cfg.use_push_data_section:
self.instr_stream.push_back(".popsection;")
def gen_init_section(self, hart):
string = pkg_ins.format_string(pkg_ins.get_label("init:", hart), pkg_ins.LABEL_STR_LEN)
self.instr_stream.append(string)
if cfg.enable_floating_point:
self.init_floating_point_gpr()
self.init_gpr()
# Init stack pointer to point to the end of the user stack
string = "{}la x{}, {}user_stack_end".format(
pkg_ins.indent, cfg.sp, pkg_ins.hart_prefix(hart))
self.instr_stream.append(string)
if cfg.enable_vector_extension:
self.init_vector_engine()
self.core_is_initialized()
self.gen_dummy_csr_write()
if rcs.support_pmp:
string = pkg_ins.indent + "j main"
self.instr_stream.append(string)
# Setup MISA based on supported extensions
def setup_misa(self):
misa = vsc.bit_t(rcs.XLEN)
if rcs.XLEN == 32:
misa[rcs.XLEN - 1:rcs.XLEN - 2] = 1
elif rcs.XLEN == 64:
misa[rcs.XLEN - 1:rcs.XLEN - 2] = 2
else:
misa[rcs.XLEN - 1:rcs.XLEN - 2] = 3
if cfg.check_misa_init_val:
self.instr_stream.append("{}csrr x15, {}".format(pkg_ins.indent,
hex(privileged_reg_t.MISA)))
for group in rcs.supported_isa:
if group in [riscv_instr_group_t.RV32C,
riscv_instr_group_t.RV64C,
riscv_instr_group_t.RV128C]:
misa[misa_ext_t.MISA_EXT_C] = 1
elif group in [riscv_instr_group_t.RV32I,
riscv_instr_group_t.RV64I,
riscv_instr_group_t.RV128I]:
misa[misa_ext_t.MISA_EXT_I] = 1
elif group in [riscv_instr_group_t.RV32M,
riscv_instr_group_t.RV64M]:
misa[misa_ext_t.MISA_EXT_M] = 1
elif group in [riscv_instr_group_t.RV32A,
riscv_instr_group_t.RV64A]:
misa[misa_ext_t.MISA_EXT_A] = 1
elif group in [riscv_instr_group_t.RV32B,
riscv_instr_group_t.RV64B]:
misa[misa_ext_t.MISA_EXT_B] = 1
elif group in [riscv_instr_group_t.RV32F,
riscv_instr_group_t.RV64F,
riscv_instr_group_t.RV32FC]:
misa[misa_ext_t.MISA_EXT_F] = 1
elif group in [riscv_instr_group_t.RV32D,
riscv_instr_group_t.RV64D,
riscv_instr_group_t.RV32DC]:
misa[misa_ext_t.MISA_EXT_D] = 1
elif group in [riscv_instr_group_t.RVV]:
misa[misa_ext_t.MISA_EXT_V] = 1
elif group in [riscv_instr_group_t.RV32X,
riscv_instr_group_t.RV64X]:
misa[misa_ext_t.MISA_EXT_X] = 1
else:
logging.critical("{} is not yet supported".format(group.name))
sys.exit(1)
if privileged_mode_t.SUPERVISOR_MODE.name in rcs.supported_privileged_mode:
misa[misa_ext_t.MISA_EXT_S] = 1
self.instr_stream.append("{}li x{}, {}".format(pkg_ins.indent, cfg.gpr[0],
hex(misa.get_val())))
self.instr_stream.append("{}csrw {}, x{}".format(pkg_ins.indent, hex(privileged_reg_t.MISA),
cfg.gpr[0]))
def core_is_initialized(self):
pass
def gen_dummy_csr_write(self):
pass
def init_gpr(self):
reg_val = vsc.rand_bit_t(pkg_ins.DATA_WIDTH)
for i in range(rcs.NUM_GPR):
if i in [cfg.sp.value, cfg.tp.value]:
continue
try:
with vsc.randomize_with(reg_val):
vsc.dist(reg_val, [vsc.weight(0, 1), vsc.weight(0x80000000, 1),
vsc.weight(vsc.rng(0x1, 0xf), 1),
vsc.weight(vsc.rng(0x10, 0xefffffff), 1),
vsc.weight(vsc.rng(0xf0000000, 0xffffffff), 1)])
except Exception:
logging.critical("Cannot Randomize reg_val")
sys.exit(1)
init_string = "{}li x{}, {}".format(pkg_ins.indent, i, hex(reg_val.get_val()))
self.instr_stream.append(init_string)
def init_floating_point_gpr(self):
for i in range(rcs.NUM_FLOAT_GPR):
vsc.randselect([
(1, lambda: self.init_floating_point_gpr_with_spf(i)),
(riscv_instr_group_t.RV64D in rcs.supported_isa,
lambda: self.init_floating_point_gpr_with_dpf(i))])
# Initialize rounding mode of FCSR
fsrmi_instr = "{}fsrmi {}".format(pkg_ins.indent, cfg.fcsr_rm)
self.instr_stream.append(fsrmi_instr)
def init_floating_point_gpr_with_spf(self, int_floating_gpr):
imm = self.get_rand_spf_value()
li_instr = "{}li x{}, {}".format(pkg_ins.indent, cfg.gpr[0], hex(imm))
fmv_instr = "{}fmv.w.x f{}, x{}".format(pkg_ins.indent, int_floating_gpr,
cfg.gpr[0])
self.instr_stream.extend((li_instr, fmv_instr))
def init_floating_point_gpr_with_dpf(self, int_floating_gpr):
imm = vsc.bit_t(64)
imm = self.get_rand_dpf_value()
int_gpr1 = cfg.gpr[0].value
int_gpr2 = cfg.gpr[1].value
li_instr0 = "{}li x{}, {}".format(pkg_ins.indent, int_gpr1, imm[63:32])
# shift to upper 32bits
for _ in range(2):
slli_instr = "{}slli x{}, x{}, 16".format(pkg_ins.indent, int_gpr1, int_gpr1)
li_instr1 = "{}li x{}, {}".format(pkg_ins.indent, int_gpr2, imm[31:0])
or_instr = "{}or x{}, x{}, x{}".format(pkg_ins.indent, int_gpr2, int_gpr2, int_gpr1)
fmv_instr = "{}fmv.d.x f{}, x{}".format(pkg_ins.indent, int_floating_gpr, int_gpr2)
self.instr_stream.extend((li_instr0, slli_instr, li_instr1, or_instr, fmv_instr))
# Get a random single precision floating value
def get_rand_spf_value(self):
# TODO randcase
value = random.randrange(0, 2**32 - 1)
return value
# Get a random double precision floating value
def get_rand_dpf_value(self):
value = vsc.bit_t(64)
# TODO randcase
return value
def init_vector_engine(self):
pass
def gen_test_done(self):
string = pkg_ins.format_string("test_done:", pkg_ins.LABEL_STR_LEN)
self.instr_stream.append(string)
self.instr_stream.append(pkg_ins.indent + "li gp, 1")
if cfg.bare_program_mode:
self.instr_stream.append(pkg_ins.indent + "j write_tohost")
else:
self.instr_stream.append(pkg_ins.indent + "ecall")
def gen_register_dump(self):
string = ""
# load base address
string = "{}la x{}, _start".format(pkg_ins.indent, cfg.gpr[0])
self.instr_stream.append(string)
# Generate sw/sd instructions
for i in range(32):
if rcs.XLEN == 64:
string = "{}sd x{}, {}(x{})".format(
pkg_ins.indent, i, i * (rcs.XLEN / 8), cfg.gpr[0])
else:
string = "{}sw x{}, {}(x{})".format(
pkg_ins.indent, i, int(i * (rcs.XLEN / 8)), cfg.gpr[0])
self.instr_stream.append(string)
def pre_enter_privileged_mode(self, hart):
instr = []
string = []
string.append("la x{}, {}kernel_stack_end".format(cfg.tp, pkg_ins.hart_prefix(hart)))
self.gen_section(pkg_ins.get_label("kernel_sp", hart), string)
if not cfg.no_delegation and (cfg.init_privileged_mode != privileged_mode_t.MACHINE_MODE):
self.gen_delegation(hart)
self.trap_vector_init(hart)
self.setup_pmp(hart)
if cfg.virtual_addr_translation_on:
self.page_table_list.process_page_table(instr)
self.gen_section(pkg_ins.get_label("process_pt", hart), instr)
self.setup_epc(hart)
self.gen_privileged_mode_switch_routine(hart)
def gen_privileged_mode_switch_routine(self, hart):
privil_seq = riscv_privileged_common_seq()
for i in range(len(rcs.supported_privileged_mode)):
instr = []
# csr_handshake = []
if rcs.supported_privileged_mode[i] != cfg.init_privileged_mode:
continue
logging.info("Generating privileged mode routing for {}"
.format(rcs.supported_privileged_mode[i]))
# Enter Privileged mode
privil_seq.hart = hart
privil_seq.randomize()
privil_seq.enter_privileged_mode(rcs.supported_privileged_mode[i], instr)
# TODO
if cfg.require_signature_addr:
pass
self.instr_stream.extend(instr)
def setup_epc(self, hart):
instr = []
instr.append("la x{}, {}init".format(cfg.gpr[0], pkg_ins.hart_prefix(hart)))
if cfg.virtual_addr_translation_on:
# For supervisor and user mode, use virtual address instead of physical address.
# Virtual address starts from address 0x0, here only the lower 12 bits are kept
# as virtual address offset.
instr.append("slli x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0], rcs.XLEN - 12) +
"srli x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0], rcs.XLEN - 12))
mode_name = cfg.init_privileged_mode.name
instr.append("csrw {}, x{}".format(hex(privileged_reg_t.MEPC), cfg.gpr[0]))
if not rcs.support_pmp:
instr.append("j {}init_{}".format(pkg_ins.hart_prefix(hart), mode_name.lower()))
self.gen_section(pkg_ins.get_label("mepc_setup", hart), instr)
def setup_pmp(self, hart):
pass
def gen_delegation(self, hart):
self.gen_delegation_instr(hart, "MEDELEG", "MIDELEG",
cfg.m_mode_exception_delegation,
cfg.m_mode_interrupt_delegation)
if rcs.support_umode_trap:
self.gen_delegation_instr(hart, "SEDELEG", "SIDELEG",
cfg.s_mode_exception_delegation,
cfg.s_mode_interrupt_delegation)
def gen_delegation_instr(self, hart, edeleg, ideleg,
edeleg_enable, ideleg_enable):
pass
def trap_vector_init(self, hart):
instr = []
for mode in rcs.supported_privileged_mode:
if mode == privileged_mode_t.MACHINE_MODE:
trap_vec_reg = privileged_reg_t.MTVEC
elif mode == privileged_mode_t.SUPERVISOR_MODE:
trap_vec_reg = privileged_reg_t.STVEC
elif mode == privileged_mode_t.USER_MODE:
trap_vec_reg = privileged_reg_t.UTVEC
else:
logging.critical("Unsupported privileged_mode {}".format(mode.name))
sys.exit(1)
if(mode == privileged_mode_t.USER_MODE and not (rcs.support_umode_trap)):
continue
if mode < cfg.init_privileged_mode:
continue
tvec_name = trap_vec_reg.name
tvec_name = tvec_name.lower()
instr.append("la x{}, {}{}_handler".format(
cfg.gpr[0], pkg_ins.hart_prefix(hart), tvec_name))
if(rcs.SATP_MODE != satp_mode_t.BARE and mode != privileged_mode_t.MACHINE_MODE):
instr.append("slli x{}, x{}, {}\n".format(cfg.gpr[0], cfg.gpr[0], rcs.XLEN - 20) +
"srli x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0], rcs.XLEN - 20))
instr.append("ori x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0], cfg.mtvec_mode))
instr.append("csrw {}, x{} # {}".format(
hex(trap_vec_reg), cfg.gpr[0], trap_vec_reg.name))
self.gen_section(pkg_ins.get_label("trap_vec_init", hart), instr)
def gen_all_trap_handler(self, hart):
if not rcs.support_pmp:
self.gen_trap_handlers(hart)
self.gen_ecall_handler(hart)
self.gen_instr_fault_handler(hart)
self.gen_load_fault_handler(hart)
self.gen_store_fault_handler(hart)
self.gen_illegal_instr_handler(hart)
def gen_trap_handlers(self, hart):
self.gen_trap_handler_section(hart, "m", privileged_reg_t.MCAUSE,
privileged_reg_t.MTVEC, privileged_reg_t.MTVAL,
privileged_reg_t.MEPC, privileged_reg_t.MSCRATCH,
privileged_reg_t.MSTATUS, privileged_reg_t.MIE,
privileged_reg_t.MIP)
def gen_trap_handler_section(self, hart, mode, cause, tvec,
tval, epc, scratch, status, ie, ip):
# is_interrupt = 1
tvec_name = ""
instr = []
if cfg.mtvec_mode == mtvec_mode_t.VECTORED:
self.gen_interrupt_vector_table(hart, mode, status, cause, ie, ip, scratch, instr)
else:
# Push user mode GPR to kernel stack before executing exception handling,
# this is to avoid exception handling routine modify user program state
# unexpectedly
# TODO
pkg_ins.push_gpr_to_kernel_stack(
status, scratch, cfg.mstatus_mprv, cfg.sp, cfg.tp, instr)
# Checking xStatus can be optional if ISS (like spike) has different implementation of
# certain fields compared with the RTL processor.
if cfg.check_xstatus:
instr.append("csrr x{}, {} # {}".format(
cfg.gpr[0], hex(status), status.name))
instr.append("csrr x{}, {} # {}\n".format(cfg.gpr[0], hex(cause),
cause.name) +
"{}srli x{}, x{}, {}\n".format(pkg_ins.indent, cfg.gpr[0],
cfg.gpr[0], rcs.XLEN - 1) +
"{}bne x{}, x0, {}{}mode_intr_handler".format(pkg_ins.indent,
cfg.gpr[0],
pkg_ins.hart_prefix(hart),
mode))
# The trap handler will occupy one 4KB page, it will be allocated one entry in
# the page table with a specific privileged mode.
if rcs.SATP_MODE != satp_mode_t.BARE:
self.instr_stream.append(".align 12")
else:
self.instr_stream.append(".align {}".format(cfg.tvec_alignment))
tvec_name = tvec.name
self.gen_section(pkg_ins.get_label("{}_handler".format(tvec_name.lower()), hart), instr)
# TODO Exception handlers
instr = []
if cfg.mtvec_mode == mtvec_mode_t.VECTORED:
pkg_ins.push_gpr_to_kernel_stack(status, scratch,
cfg.mstatus_mprv, cfg.sp, cfg.tp, instr)
self.gen_signature_handshake(instr, signature_type_t.CORE_STATUS,
core_status_t.HANDLING_EXCEPTION)
# The trap is caused by an exception, read back xCAUSE, xEPC to see if these
# CSR values are set properly. The checking is done by comparing against the log
# generated by ISA simulator (spike).
instr.extend(("csrr x{}, 0x{} # {}".format(cfg.gpr[0], epc, epc.name),
"csrr x{}, 0x{} # {}".format(cfg.gpr[0], cause, cause.name),
# Illegal instruction exception
"li x{}, {} # ILLEGAL_INSTRUCTION".format(
cfg.gpr[1], hex(exception_cause_t.ILLEGAL_INSTRUCTION)),
"beq x{}, x{}, {}illegal_instr_handler".format(
cfg.gpr[0], cfg.gpr[1], pkg_ins.hart_prefix(hart)),
# Skip checking tval for illegal instruction as it's implementation specific
"csrr x{}, {} # {}".format(cfg.gpr[1], hex(tval), tval.name),
# use JALR to jump to test_done.
"1: la x{}, test_done".format(cfg.scratch_reg),
"jalr x1, x{}, 0".format(cfg.scratch_reg)))
self.gen_section(pkg_ins.get_label("{}mode_exception_handler".format(mode), hart), instr)
def gen_interrupt_vector_table(self, hart, mode, status, cause, ie,
ip, scratch, instr):
'''In vector mode, the BASE address is shared between interrupt 0 and exception handling.
When vectored interrupts are enabled, interrupt cause 0, which corresponds to user-mode
software interrupts, are vectored to the same location as synchronous exceptions. This
ambiguity does not arise in practice, since user-mode software interrupts are either
disabled or delegated'''
instr.extend((".option norvc;", "j {}{}mode_exception_handler".format(
pkg_ins.hart_prefix(hart), mode)))
# Redirect the interrupt to the corresponding interrupt handler
for i in range(1, rcs.max_interrupt_vector_num):
instr.append("j {}{}mode_intr_vector_{}".format(pkg_ins.hart_prefix(hart), mode, i))
if not cfg.disable_compressed_instr:
instr.append(".option rvc;")
for i in range(1, rcs.max_interrupt_vector_num):
intr_handler = []
pkg_ins.push_gpr_to_kernel_stack(
status, scratch, cfg.mstatus_mprv, cfg.sp, cfg.tp, intr_handler)
self.gen_signature_handshake(instr=intr_handler,
signature_type=signature_type_t.CORE_STATUS,
core_status=core_status_t.HANDLING_IRQ)
intr_handler.extend(("csrr x{}, {} # {}".format(
cfg.gpr[0], hex(cause), cause.name),
# Terminate the test if xCause[31] != 0 (indicating exception)
"srli x{}, x{}, {}".format(
cfg.gpr[0], cfg.gpr[0], hex(rcs.XLEN - 1)),
"beqz x{}, 1f".format(cfg.gpr[0])))
csr_list = [status, cause, ie, ip]
for csr_t in csr_list:
self.gen_signature_handshake(
instr=intr_handler, signature_type=signature_type_t.WRITE_CSR, csr=csr_t)
# Jump to commmon interrupt handling routine
intr_handler.extend(("j {}{}mode_intr_handler".format(pkg_ins.hart_prefix(hart), mode),
"1: la x{}, test_done".format(cfg.scratch_reg),
"jalr x0, x{}, 0".format(cfg.scratch_reg)))
self.gen_section(pkg_ins.get_label(
"{}mode_intr_vector_{}".format(mode, i), hart), intr_handler)
def gen_ecall_handler(self, hart):
string = ""
string = pkg_ins.format_string(pkg_ins.get_label(
"ecall_handler:", hart), pkg_ins.LABEL_STR_LEN)
self.instr_stream.append(string)
self.dump_perf_stats()
self.gen_register_dump()
string = pkg_ins.format_string(" ", pkg_ins.LABEL_STR_LEN)
string = string + "j write_tohost"
self.instr_stream.append(string)
def gen_ebreak_handler(self, hart):
pass
def gen_illegal_instr_handler(self, hart):
instr = []
self.gen_signature_handshake(instr, signature_type_t.CORE_STATUS,
core_status_t.ILLEGAL_INSTR_EXCEPTION)
self.gen_signature_handshake(instr, signature_type_t.WRITE_CSR, privileged_reg_t.MCAUSE)
instr.extend(("csrr x{}, {}".format(cfg.gpr[0], hex(privileged_reg_t.MEPC)),
"addi x{}, x{}, 4".format(cfg.gpr[0], cfg.gpr[0]),
"csrw {}, x{}".format(hex(privileged_reg_t.MEPC), cfg.gpr[0])))
pkg_ins.pop_gpr_from_kernel_stack(privileged_reg_t.MSTATUS, privileged_reg_t.MSCRATCH,
cfg.mstatus_mprv, cfg.sp, cfg.tp, instr)
instr.append("mret")
self.gen_section(pkg_ins.get_label("illegal_instr_handler", hart), instr)
def gen_instr_fault_handler(self, hart):
pass
def gen_load_fault_handler(self, hart):
pass
def gen_store_fault_handler(self, hart):
pass
def create_page_table(self, hart):
pass
def gen_page_table_section(self, hart):
pass
def gen_plic_section(self, interrupt_handler_instr):
pass
def gen_interrupt_handler_section(self, mode, hart):
interrupt_handler_instr = []
# ls_unit = "w" if rcs.XLEN == 32 else "d"
if mode < cfg.init_privileged_mode:
return
if(mode is privileged_mode_t.USER_MODE and not (rcs.support_umode_trap)):
return
if mode == privileged_mode_t.MACHINE_MODE:
mode_prefix = "m"
status = privileged_reg_t.MSTATUS
ip = privileged_reg_t.MIP
ie = privileged_reg_t.MIE
scratch = privileged_reg_t.MSCRATCH
elif mode is privileged_mode_t.SUPERVISOR_MODE:
mode_prefix = "s"
status = privileged_reg_t.SSTATUS
ip = privileged_reg_t.SIP
ie = privileged_reg_t.SIE
scratch = privileged_reg_t.SSCRATCH
elif mode == privileged_mode_t.USER_MODE:
mode_prefix = "u"
status = privileged_reg_t.USTATUS
ip = privileged_reg_t.UIP
ie = privileged_reg_t.UIE
scratch = privileged_reg_t.USCRATCH
else:
logging.critical("Unsupported mode: {}".format(mode.name))
sys.exit(1)
if cfg.enable_nested_interrupt:
interrupt_handler_instr.append("csrr x{}, {}".format(cfg.gpr[0], hex(scratch)))
interrupt_handler_instr.append("bgtz x{}, 1f".format(cfg.gpr[0]))
interrupt_handler_instr.append("csrwi {}, 0x1".format(hex(scratch)))
if status == privileged_reg_t.MSTATUS:
interrupt_handler_instr.append("csrsi {}, {}".format(hex(status), hex(8)))
elif status == privileged_reg_t.SSTATUS:
interrupt_handler_instr.append("csrsi {}, {}".format(hex(status), hex(2)))
elif status == privileged_reg_t.USTATUS:
interrupt_handler_instr.append("csrsi {}, {}".format(hex(status), hex(1)))
else:
logging.critical("Unsupported status {}".format(status.name))
sys.exit(1)
interrupt_handler_instr.append("1: csrwi {},0".format(hex(scratch)))
to_extend_interrupt_hanlder_instr = ["csrr x{}, {} # {};".format(cfg.gpr[0],
hex(status),
status.name),
"csrr x{}, {} # {};".format(cfg.gpr[0],
hex(ie), ie.name),
"csrr x{}, {} # {};".format(cfg.gpr[0],
hex(ip), ip.name),
"csrrc x{}, {}, x{} # {};".format(cfg.gpr[0],
hex(ip),
cfg.gpr[0],
ip.name)]
interrupt_handler_instr.extend(to_extend_interrupt_hanlder_instr)
self.gen_plic_section(interrupt_handler_instr)
pkg_ins.pop_gpr_from_kernel_stack(status, scratch, cfg.mstatus_mprv,
cfg.sp, cfg.tp, interrupt_handler_instr)
interrupt_handler_instr.append("{}ret;".format(mode_prefix))
if rcs.SATP_MODE != satp_mode_t.BARE:
self.instr_stream.append(".align 12")
else:
self.instr_stream.append(".align 2")
self.gen_section(pkg_ins.get_label("%0smode_intr_handler" %
(mode_prefix), hart), interrupt_handler_instr)
def format_section(self, instr):
pass
def gen_section(self, label, instr):
if label != "":
string = pkg_ins.format_string("{}:".format(label), pkg_ins.LABEL_STR_LEN)
self.instr_stream.append(string)
for items in instr:
string = pkg_ins.indent + items
self.instr_stream.append(string)
self.instr_stream.append("")
def dump_perf_stats(self):
pass
def gen_test_file(self, test_name):
file = open(test_name, "w+")
for items in self.instr_stream:
file.write("{}\n".format(items))
file.close()
logging.info("{} is generated".format(test_name))
def gen_signature_handshake(self, instr, signature_type,
core_status=core_status_t.INITIALIZED,
test_result=test_result_t.TEST_FAIL,
csr=privileged_reg_t.MSCRATCH,
addr_label = ""):
if cfg.require_signature_addr:
instr.extend(("li x{}, {}".format(cfg.gpr[1], hex(cfg.signature_addr))))
# A single data word is written to the signature address.
# Bits [7:0] contain the signature_type of CORE_STATUS, and the upper
# XLEN-8 bits contain the core_status_t data.
if signature_type == signature_type_t.CORE_STATUS:
instr.extend(("li x{}, {}".format(cfg.gpr[0], hex(core_status)),
"slli x{}, x{}, 8".format(cfg.gpr[0], cfg.gpr[0]),
"addi x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0],
hex(signature_type)),
"sw x{}, 0(x{})".format(cfg.gpr[0], cfg.gpr[1])))
# A single data word is written to the signature address.
# Bits [7:0] contain the signature_type of TEST_RESULT, and the upper
# XLEN-8 bits contain the test_result_t data.
elif signature_type == test_result_t.TEST_RESULT:
instr.extend(("li x{}, {}".format(cfg.gpr[0], hex(test_result)),
"slli x{}, x{}, 8".format(cfg.gpr[0], cfg.gpr[0]),
"addi x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0],
hex(signature_type)),
"sw x{}, 0(x{})".format(cfg.gpr[0], cfg.gpr[1])))
# The first write to the signature address contains just the
# signature_type of WRITE_GPR.
# It is followed by 32 consecutive writes to the signature address,
# each writing the data contained in one GPR, starting from x0 as the
# first write, and ending with x31 as the 32nd write.
elif signature_type == signature_type_t.WRITE_GPR:
instr.extend(("li x{}, {}".format(cfg.gpr[0], hex(signature_type)),
"sw x{}, 0(x{})".format(cfg.gpr[0], cfg.gpr[1])))
for i in range(32):
instr.append("sw x{},0(x{})".format(i, cfg.gpr[1]))
# The first write to the signature address contains the
# signature_type of WRITE_CSR in bits [7:0], and the CSR address in
# the upper XLEN-8 bits.
# It is followed by a second write to the signature address,
# containing the data stored in the specified CSR.
elif signature_type == signature_type_t.WRITE_CSR:
if csr not in rcs.implemented_csr:
return
instr.extend(("li x{}, {}".format(cfg.gpr[0], hex(csr)),
"slli x{}, x{}, 8".format(cfg.gpr[0], cfg.gpr[0]),
"addi x{}, x{}, {}".format(cfg.gpr[0], cfg.gpr[0],
hex(signature_type)),
"sw x{}, 0(x{})".format(cfg.gpr[0], cfg.gpr[1]),
"csrr x{}, {}".format(cfg.gpr[0], hex(csr)),
"sw x{}, 0(x{})".format(cfg.gpr[0], cfg.gpr[1])))
else:
logging.critical("signature_type is not defined")
sys.exit(1)
def add_directed_instr_stream(self, name, ratio):
self.directed_instr_stream_ratio[name] = ratio
logging.info("Adding directed instruction stream:%0s ratio:%0d/1000", name, ratio)
def get_directed_instr_stream(self):
opts = []
for i in range(cfg.max_directed_instr_stream_seq):
arg = "directed_instr_{}".format(i)
stream_name_opts = "stream_name_{}".format(i)
stream_freq_opts = "stream_freq_{}".format(i)
if cfg.args_dict[arg]:
val = cfg.args_dict[arg]
opts = val.split(",")
if len(opts) != 2:
logging.critical(
"Incorrect directed instruction format : %0s, expect: name,ratio", val)
sys.exit(1)
else:
self.add_directed_instr_stream(opts[0], int(opts[1]))
elif cfg.args_dict[stream_name_opts] and cfg.args_dict[stream_freq_opts]:
stream_name = cfg.args_dict[stream_name_opts]
stream_freq = cfg.args_dict[stream_freq_opts]
self.add_directed_instr_stream(stream_name, stream_freq)
def generate_directed_instr_stream(self, hart = 0, label = "", original_instr_cnt = 0,
min_insert_cnt = 0, kernel_mode = 0, instr_stream = []):
instr_insert_cnt = 0
idx = 0
if cfg.no_directed_instr:
return
for instr_stream_name in self.directed_instr_stream_ratio:
instr_insert_cnt = int(original_instr_cnt *
self.directed_instr_stream_ratio[instr_stream_name] // 1000)
if instr_insert_cnt <= min_insert_cnt:
instr_insert_cnt = min_insert_cnt
logging.info("Insert directed instr stream %0s %0d/%0d times",
instr_stream_name, instr_insert_cnt, original_instr_cnt)
for i in range(instr_insert_cnt):
name = "{}_{}".format(instr_stream_name, i)
object_h = factory(instr_stream_name)
object_h.name = name
if not object_h:
logging.critical("Cannot create instr stream %0s", name)
sys.exit(1)
new_instr_stream = copy.deepcopy(object_h)
if new_instr_stream:
new_instr_stream.hart = hart
new_instr_stream.label = "{}_{}".format(label, idx)
new_instr_stream.kernel_mode = kernel_mode
new_instr_stream.randomize()
instr_stream.append(new_instr_stream)
else:
logging.critical("Cannot Create instr stream %0s", name)
sys.exit(1)
idx += 1
random.shuffle(instr_stream)
def gen_debug_rom(self, hart):
pass
| apache-2.0 | -3,656,137,791,055,222,300 | 47.182227 | 100 | 0.533665 | false |
PX4/ecl | EKF/python/wind_cov_init/derivation.py | 1 | 2105 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 14:11:58 2019
@author: roman
"""
from sympy import *
################## Here are the variables you can change to see the effects on the cov matrix ###########################
yaw_init = 0.5
# ground speed in body frame (comes from ekf2)
groundspeed_body_x_init = 5
groundspeed_body_y_init = 5
# true airspeed measured by pitot tube
V_init = 7
# heading variance
R_yaw_init = rad(15.0)**2
# sideslip variance
R_beta_init = rad(15.0)**2
# True airspeed measurement variance
R_tas_init = 1.4**2
#########################################################################################################################
# define symbols: true airspeed, sidslip angle,
V, beta, yaw, groundspeed_body_x, groundspeed_body_y = symbols('V beta yaw vx_body vy_body')
R_tas, R_beta, R_yaw = symbols('R_tas R_beta R_yaw')
# body x/y component of relative wind vector ( V is what the airspeed sensor measures)
Vx = V * cos(beta)
Vy = V * sin(beta)
# wind in body frame
wind_body_x = groundspeed_body_x - Vx
wind_body_y = groundspeed_body_y - Vy
# wind in earth frame
wind_n = cos(yaw) * wind_body_x - sin(yaw) * wind_body_y
wind_e = sin(yaw) * wind_body_x + cos(yaw) * wind_body_y
wind_earth = Matrix([wind_n, wind_e])
# jacobian of earth wind vector with respect to states with known uncertainties
G = wind_earth.jacobian([V, beta, yaw])
# initial covariance matrix
P = Matrix([[R_tas, 0, 0], [0, R_beta,0], [0,0,R_yaw]])
# earth wind covariance matrix, assume 0 sideslip angle
P_wind_earth = (G*P*G.T).subs([(beta, 0)])
P_wind_earth_numeric = P_wind_earth.subs([(V, V_init),(yaw, yaw_init), (R_tas, R_tas_init), (R_yaw, R_yaw_init), (R_beta, R_beta_init)])
P_wind_earth_numeric = P_wind_earth_numeric.subs([(groundspeed_body_x, groundspeed_body_x_init), (groundspeed_body_y, groundspeed_body_y_init) ])
print('P[22][22] = ' + str(P_wind_earth_numeric[0,0]))
print('P[22][23] = ' + str(P_wind_earth_numeric[0,1]))
print('P[23][22] = ' + str(P_wind_earth_numeric[1,0]))
print('P[23][23] = ' + str(P_wind_earth_numeric[1,1])) | bsd-3-clause | 2,197,566,679,472,614,100 | 30.432836 | 145 | 0.618527 | false |
scrapinghub/python-hubstorage | tests/test_jobsmeta.py | 1 | 3937 | """
Test job metadata
System tests for operations on stored job metadata
"""
from .hstestcase import HSTestCase
class JobsMetadataTest(HSTestCase):
def _assertMetadata(self, meta1, meta2):
def _clean(m):
return dict((k, v) for k, v in m.items() if k != 'updated_time')
meta1 = _clean(meta1)
meta2 = _clean(meta2)
self.assertEqual(meta1, meta2)
def test_basic(self):
job = self.project.push_job(self.spidername)
self.assertTrue('auth' not in job.metadata)
self.assertTrue('state' in job.metadata)
self.assertEqual(job.metadata['spider'], self.spidername)
# set some metadata and forget it
job.metadata['foo'] = 'bar'
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.expire()
self.assertTrue('foo' not in job.metadata)
# set it again and persist it
job.metadata['foo'] = 'bar'
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.save()
self.assertEqual(job.metadata['foo'], 'bar')
job.metadata.expire()
self.assertEqual(job.metadata['foo'], 'bar')
# refetch the job and compare its metadata
job2 = self.hsclient.get_job(job.key)
self._assertMetadata(job2.metadata, job.metadata)
# delete foo but do not persist it
del job.metadata['foo']
self.assertTrue('foo' not in job.metadata)
job.metadata.expire()
self.assertEqual(job.metadata.get('foo'), 'bar')
# persist it to be sure it is not removed
job.metadata.save()
self.assertEqual(job.metadata.get('foo'), 'bar')
# and finally delete again and persist it
del job.metadata['foo']
self.assertTrue('foo' not in job.metadata)
job.metadata.save()
self.assertTrue('foo' not in job.metadata)
job.metadata.expire()
self.assertTrue('foo' not in job.metadata)
job2 = self.hsclient.get_job(job.key)
self._assertMetadata(job.metadata, job2.metadata)
def test_updating(self):
job = self.project.push_job(self.spidername)
self.assertIsNone(job.metadata.get('foo'))
job.update_metadata({'foo': 'bar'})
# metadata attr should change
self.assertEqual(job.metadata.get('foo'), 'bar')
# as well as actual metadata
job = self.project.get_job(job.key)
self.assertEqual(job.metadata.get('foo'), 'bar')
job.update_metadata({'foo': None})
self.assertFalse(job.metadata.get('foo', False))
# there are ignored fields like: auth, _key, state
state = job.metadata['state']
job.update_metadata({'state': 'running'})
self.assertEqual(job.metadata['state'], state)
def test_representation(self):
job = self.project.push_job(self.spidername)
meta = job.metadata
self.assertNotEqual(str(meta), repr(meta))
self.assertEqual(meta, eval(str(meta)))
self.assertTrue(meta.__class__.__name__ in repr(meta))
self.assertFalse(meta.__class__.__name__ in str(meta))
def test_jobauth(self):
job = self.project.push_job(self.spidername)
self.assertIsNone(job.jobauth)
self.assertEqual(job.auth, self.project.auth)
self.assertEqual(job.items.auth, self.project.auth)
samejob = self.hsclient.get_job(job.key)
self.assertIsNone(samejob.auth)
self.assertIsNone(samejob.jobauth)
self.assertEqual(samejob.items.auth, self.project.auth)
def test_authtoken(self):
pendingjob = self.project.push_job(self.spidername)
runningjob = self.start_job()
self.assertEqual(pendingjob.key, runningjob.key)
self.assertTrue(runningjob.jobauth)
self.assertEqual(runningjob.jobauth, runningjob.auth)
self.assertEqual(runningjob.auth[0], runningjob.key)
self.assertTrue(runningjob.auth[1])
| bsd-3-clause | -4,820,492,767,777,208,000 | 36.495238 | 76 | 0.630937 | false |
franapoli/pyleaf | pyleaf/rrc.py | 1 | 5065 | # The MIT License (MIT)
# Copyright (c) 2012-2013 Francesco Napolitano, [email protected]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from pyleaf import log
import pickle
import inspect
class resource():
def __init__(self, name, path):
log.send('Initializing resource ' + name + ' with path ' + path, 3)
self._name=name
self._path = path
if self.isDumped():
self.load()
def clear(self):
self._contents = None
self._fingerprint = None
def name(self):
return str(self)
def update(self):
if self.changed():
if self._fingerprint != None:
log.send(self.name() + ' has changed: updating.')
else:
log.send(self.name() + ' is new: building fingerprint.')
self.updateFingerprint()
self.dump()
else:
log.send(self.name() + ' has not changed.', 2)
def clearDump(self):
if self.isDumped():
os.remove(self._path)
def load(self):
if self.isDumped():
log.send(self.name() + ' is dumped in ' + self._path + ': loading it.')
res = pickle.load(open(self._path, 'rb'))
## Now it should be a "self = res" but I currently don't
## trust that.
self._timestamp = res._timestamp
self._buildtime = res._buildtime
self._fingerprint = res._fingerprint
self.setDumpPath(res.getDumpPath())
self.setIsFile(res.isFile())
self.setValue(res.getValue())
else:
log.send(self.name() + ' is not dumped.', 2)
def isDumped(self):
log.send('Checking ' + str(self) + ' in file: ' + self._path, 3)
if os.path.exists(self._path):
log.send('Available ' + str(self), 3)
return True
log.send('Unavailable: ' + str(self), 3)
return False
def dump(self):
if not self._dodump:
log.send('Dumping is switched off, so skipping.', 2)
return
log.send('Dumping resource: ' + self._name ,2)
log.send('object: ' + str(self), 3)
log.send('value: ' + str(self._contents), 3)
log.send('fingerprint: ' + str(self._fingerprint), 3)
log.send('Dumping to file: ' + self._path, 2)
pickle.dump(self, open(self._path, 'wb'))
def isAvailable(self):
return self._contents != None
def setValue(self, v):
log.send('New value is: ' + str(v), 3)
self._contents = v
def getValue(self):
return self._contents
def setIsFile(self, isit = True):
log.send('isFile value: ' + str(isit), 3)
self._isfile = isit
def isFile(self):
return self._isfile
def setDumpPath(self, path):
log.send('Updating path: ' + str(path),2)
self._path = path
def getDumpPath(self):
return self._path
def changed(self):
return self._fingerprint != self._makeFingerprint(self._contents)
def _makeFingerprint(self, obj):
try:
inspect.getsource(obj)
log.send('Source got:', 3)
log.send(inspect.getsource(obj), 3)
return inspect.getsource(obj)
except Exception:
log.send('No source: passing object', 3)
return obj
def getFingerprint(self):
return self._fingerprint
def updateFingerprint(self):
self._fingerprint = self._makeFingerprint(self._contents)
log.send('Fingerprint is: ' + str(self._fingerprint), 3)
def name(self):
return self._name
def setDump(self, d):
self._dodump = d
_name = ''
_contents = None
_dodump = True
_fingerprint = None
_path = None
_isfile = False
_timestamp = None
_buildtime = None
| mit | 4,220,487,148,200,134,000 | 31.261146 | 83 | 0.576308 | false |
cjayb/mne-python | mne/preprocessing/ssp.py | 3 | 14022 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from ..epochs import Epochs
from ..proj import compute_proj_evoked, compute_proj_epochs
from ..utils import logger, verbose, warn
from ..io.pick import pick_types
from ..io import make_eeg_average_ref_proj
from .ecg import find_ecg_events
from .eog import find_eog_events
def _safe_del_key(dict_, key):
"""Aux function.
Use this function when preparing rejection parameters
instead of directly deleting keys.
"""
if key in dict_:
del dict_[key]
def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name,
reject, flat, bads, avg_ref, no_proj, event_id,
exg_l_freq, exg_h_freq, tstart, qrs_threshold,
filter_method, iir_params, return_drop_log, copy,
meg, verbose):
"""Compute SSP/PCA projections for ECG or EOG artifacts."""
raw = raw.copy() if copy else raw
del copy
raw.load_data() # we will filter it later
if no_proj:
projs = []
else:
projs = cp.deepcopy(raw.info['projs'])
logger.info('Including %d SSP projectors from raw file'
% len(projs))
if avg_ref:
eeg_proj = make_eeg_average_ref_proj(raw.info)
projs.append(eeg_proj)
if raw_event is None:
raw_event = raw
assert mode in ('ECG', 'EOG') # internal function
logger.info('Running %s SSP computation' % mode)
if mode == 'ECG':
events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
event_id=event_id, l_freq=exg_l_freq,
h_freq=exg_h_freq, tstart=tstart,
qrs_threshold=qrs_threshold,
filter_length=filter_length)
else: # mode == 'EOG':
events = find_eog_events(raw_event, event_id=event_id,
l_freq=exg_l_freq, h_freq=exg_h_freq,
filter_length=filter_length, ch_name=ch_name,
tstart=tstart)
# Check to make sure we actually got at least one usable event
if events.shape[0] < 1:
warn('No %s events found, returning None for projs' % mode)
return (None, events) + (([],) if return_drop_log else ())
logger.info('Computing projector')
my_info = cp.deepcopy(raw.info)
my_info['bads'] += bads
# Handler rejection parameters
if reject is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eog')
if flat is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eog')
# exclude bad channels from projection
# keep reference channels if compensation channels are present
ref_meg = len(my_info['comps']) > 0
picks = pick_types(my_info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=ref_meg, exclude='bads')
raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
n_jobs=n_jobs, method=filter_method, iir_params=iir_params,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_design='firwin2')
epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
picks=picks, reject=reject, flat=flat, proj=True)
drop_log = epochs.drop_log
if epochs.events.shape[0] < 1:
warn('No good epochs found, returning None for projs')
return (None, events) + ((drop_log,) if return_drop_log else ())
if average:
evoked = epochs.average()
ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, meg=meg)
else:
ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, n_jobs=n_jobs, meg=meg)
for p in ev_projs:
p['desc'] = mode + "-" + p['desc']
projs.extend(ev_projs)
logger.info('Done.')
return (projs, events) + ((drop_log,) if return_drop_log else ())
@verbose
def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
ch_name=None, reject=dict(grad=2000e-13, mag=3000e-15,
eeg=50e-6, eog=250e-6),
flat=None, bads=[], avg_ref=False,
no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
tstart=0., qrs_threshold='auto', filter_method='fir',
iir_params=None, copy=True, return_drop_log=False,
meg='separate', verbose=None):
"""Compute SSP/PCA projections for ECG artifacts.
.. note:: raw data will be loaded if it is not already.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
ch_name : str | None
Channel to use for ECG detection (Required if no ECG found).
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
ecg_l_freq : float
Low pass frequency applied to the ECG channel for event detection.
ecg_h_freq : float
High pass frequency applied to the ECG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj : list
Computed SSP projectors.
ecg_events : ndarray
Detected ECG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_ecg_events
create_ecg_epochs
Notes
-----
Filtering is applied to the ECG channel while finding events using
``ecg_l_freq`` and ``ecg_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'ECG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart,
qrs_threshold, filter_method, iir_params, return_drop_log, copy,
meg, verbose)
@verbose
def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
eog=np.inf), flat=None, bads=[],
avg_ref=False, no_proj=False, event_id=998, eog_l_freq=1,
eog_h_freq=10, tstart=0., filter_method='fir',
iir_params=None, ch_name=None, copy=True,
return_drop_log=False, meg='separate', verbose=None):
"""Compute SSP/PCA projections for EOG artifacts.
.. note:: raw data must be preloaded.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
eog_l_freq : float
Low pass frequency applied to the E0G channel for event detection.
eog_h_freq : float
High pass frequency applied to the EOG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
ch_name : str | None
If not None, specify EOG channel name.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj: list
Computed SSP projectors.
eog_events: ndarray
Detected EOG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_eog_events
create_eog_epochs
Notes
-----
Filtering is applied to the EOG channel while finding events using
``eog_l_freq`` and ``eog_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'EOG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, eog_l_freq, eog_h_freq, tstart,
'auto', filter_method, iir_params, return_drop_log, copy, meg,
verbose)
| bsd-3-clause | 1,329,410,272,909,505,500 | 37.726519 | 79 | 0.580569 | false |
cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py | 1 | 13426 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
from webkitpy.common.system.executive_mock import MockExecutive
class TestRebaseline(unittest.TestCase):
def stub_rebaseline_test_command_and_tool(self):
class FakeZipFileSet(object):
contents = {}
def read(self, member):
return self.contents[member]
command = RebaselineTest()
tool = MockTool()
command.bind_to_tool(tool)
command._zip_file_set = lambda url: FakeZipFileSet()
return (command, tool)
def test_tests_to_update(self):
command = Rebaseline()
command.bind_to_tool(MockTool())
build = Mock()
OutputCapture().assert_outputs(self, command._tests_to_update, [build])
def test_rebaseline_updates_expectations_file_noop(self):
command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(lion_port.path_to_test_expectations_file(), """BUGB MAC LINUX XP DEBUG : fast/dom/Window/window-postmessage-clone-really-deep-array.html = PASS
BUGA DEBUG : fast/css/large-list-of-rules-crash.html = TEXT
""")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "fast/dom/Window/window-postmessage-clone-really-deep-array.html"), "Dummy test contents")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "fast/css/large-list-of-rules-crash.html"), "Dummy test contents")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test.html"), "Dummy test contents")
expected_stdout = "Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip\n"
OutputCapture().assert_outputs(self, command._rebaseline_test_and_update_expectations, ["Webkit Mac10.7", "userscripts/another-test.html", None], expected_stdout=expected_stdout)
new_expectations = tool.filesystem.read_text_file(lion_port.path_to_test_expectations_file())
self.assertEqual(new_expectations, """BUGB MAC LINUX XP DEBUG : fast/dom/Window/window-postmessage-clone-really-deep-array.html = PASS
BUGA DEBUG : fast/css/large-list-of-rules-crash.html = TEXT
""")
def test_rebaseline_updates_expectations_file(self):
command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(lion_port.path_to_test_expectations_file(), "BUGX MAC : userscripts/another-test.html = IMAGE\nBUGZ LINUX : userscripts/another-test.html = IMAGE\n")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test.html"), "Dummy test contents")
expected_stdout = "Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip\n"
OutputCapture().assert_outputs(self, command._rebaseline_test_and_update_expectations, ["Webkit Mac10.7", "userscripts/another-test.html", None], expected_stdout=expected_stdout)
new_expectations = tool.filesystem.read_text_file(lion_port.path_to_test_expectations_file())
self.assertEqual(new_expectations, "BUGX LEOPARD SNOWLEOPARD : userscripts/another-test.html = IMAGE\nBUGZ LINUX : userscripts/another-test.html = IMAGE\n")
def test_rebaseline_test(self):
command, _ = self.stub_rebaseline_test_command_and_tool()
expected_stdout = "Retrieving http://example.com/f/builders/Webkit Linux/layout-test-results.zip\n"
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Linux", "userscripts/another-test.html", None, "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test(self):
command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Copying baseline from /mock-checkout/LayoutTests/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt.
Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test_no_existing_result(self):
command, _ = self.stub_rebaseline_test_command_and_tool()
expected_stdout = """No existing baseline for userscripts/another-test.html.
Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test_with_lion_result(self):
command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Copying baseline from /mock-checkout/LayoutTests/platform/chromium-mac/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt.
Copying baseline from /mock-checkout/LayoutTests/platform/chromium-mac/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-leopard/userscripts/another-test-expected.txt.
Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard", "chromium-mac-leopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_no_overwrite_test(self):
command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
snowleopard_port = tool.port_factory.get_from_builder_name("Webkit Mac10.6")
tool.filesystem.write_text_file(os.path.join(snowleopard_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Existing baseline at /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt, not copying over it.
Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_expectations(self):
command = RebaselineExpectations()
tool = MockTool()
command.bind_to_tool(tool)
for port_name in tool.port_factory.all_port_names():
port = tool.port_factory.get(port_name)
tool.filesystem.write_text_file(port.path_to_test_expectations_file(), '')
# Don't enable logging until after we create the mock expectation files as some Port.__init__'s run subcommands.
tool.executive = MockExecutive(should_log=True)
expected_stdout = """Retrieving results for chromium-linux-x86 from Webkit Linux 32.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-linux-x86_64 from Webkit Linux.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-mac-leopard from Webkit Mac10.5.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-mac-lion from Webkit Mac10.7.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-mac-snowleopard from Webkit Mac10.6.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-win-vista from Webkit Vista.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-win-win7 from Webkit Win7.
userscripts/another-test.html
userscripts/images.svg
Retrieving results for chromium-win-xp from Webkit Win.
userscripts/another-test.html
userscripts/images.svg
"""
expected_stderr = """MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux 32', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux 32', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Linux', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.5', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.7', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.7', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Mac10.6', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Vista', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Vista', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win7', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win7', 'userscripts/images.svg'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win', 'userscripts/another-test.html'], cwd=/mock-checkout
MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win', 'userscripts/images.svg'], cwd=/mock-checkout
"""
command._tests_to_rebaseline = lambda port: ['userscripts/another-test.html', 'userscripts/images.svg']
OutputCapture().assert_outputs(self, command.execute, [MockOptions(optimize=False), [], tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
expected_stdout_with_optimize = expected_stdout + (
"Optimizing baselines for userscripts/another-test.html.\n"
"Optimizing baselines for userscripts/images.svg.\n")
expected_stderr_with_optimize = expected_stderr + (
"MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/another-test.html'], cwd=/mock-checkout\n"
"MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/images.svg'], cwd=/mock-checkout\n")
command._tests_to_rebaseline = lambda port: ['userscripts/another-test.html', 'userscripts/images.svg']
OutputCapture().assert_outputs(self, command.execute, [MockOptions(optimize=True), [], tool], expected_stdout=expected_stdout_with_optimize, expected_stderr=expected_stderr_with_optimize)
| gpl-3.0 | 2,749,226,560,659,946,500 | 63.859903 | 240 | 0.734619 | false |
breakwang/pykit | jobq/jobq.py | 1 | 3605 | import logging
import sys
import threading
import time
import types
if sys.version_info[0] == 2:
import Queue
else:
import queue as Queue
logger = logging.getLogger(__name__)
class EmptyRst(object):
pass
class Finish(object):
pass
def run(input_it, workers, keep_order=False, timeout=None, probe=None):
endtime = time.time() + (timeout or 86400 * 365)
if probe is None:
probe = {}
sessions = []
probe['sessions'] = sessions
head_q = _make_q()
inq = head_q
for worker in workers + [_blackhole]:
if callable(worker):
worker = (worker, 1)
worker, n = worker
sess = {'worker': worker,
'threads': [],
'input': inq,
}
outq = _make_q()
if keep_order and n > 1:
# to maximize concurrency
sess['queue_of_outq'] = _make_q(n=1024 * 1024)
sess['lock'] = threading.RLock()
sess['coor_th'] = _thread(_coordinate, (sess, outq))
sess['threads'] = [_thread(_exec_in_order, (sess, _make_q()))
for ii in range(n)]
else:
sess['threads'] = [_thread(_exec, (sess, outq))
for ii in range(n)]
sessions.append(sess)
inq = outq
for args in input_it:
head_q.put(args)
for sess in sessions:
# put nr = len(threads) Finish
for th in sess['threads']:
sess['input'].put(Finish)
for th in sess['threads']:
th.join(endtime - time.time())
if 'queue_of_outq' in sess:
sess['queue_of_outq'].put(Finish)
sess['coor_th'].join(endtime - time.time())
def stat(probe):
rst = []
for sess in probe['sessions']:
o = {}
wk = sess['worker']
o['name'] = wk.__module__ + ":" + wk.__name__
o['input'] = _q_stat(sess['input'])
if 'queue_of_outq' in sess:
o['coordinator'] = _q_stat(sess['queue_of_outq'])
rst.append(o)
return rst
def _q_stat(q):
return {'size': q.qsize(),
'capa': q.maxsize
}
def _exec(sess, output_q):
while True:
args = sess['input'].get()
if args is Finish:
return
try:
rst = sess['worker'](args)
except Exception as e:
logger.exception(repr(e))
continue
_put_rst(output_q, rst)
def _exec_in_order(sess, output_q):
while True:
with sess['lock']:
args = sess['input'].get()
if args is Finish:
return
sess['queue_of_outq'].put(output_q)
try:
rst = sess['worker'](args)
except Exception as e:
logger.exception(repr(e))
output_q.put(EmptyRst)
continue
output_q.put(rst)
def _coordinate(sess, output_q):
while True:
outq = sess['queue_of_outq'].get()
if outq is Finish:
return
_put_rst(output_q, outq.get())
def _put_rst(output_q, rst):
if type(rst) == types.GeneratorType:
for rr in rst:
_put_non_empty(output_q, rr)
else:
_put_non_empty(output_q, rst)
def _blackhole(args):
return EmptyRst
def _put_non_empty(q, val):
if val is not EmptyRst:
q.put(val)
def _make_q(n=1024):
return Queue.Queue(n)
def _thread(func, args):
th = threading.Thread(target=func,
args=args)
th.daemon = True
th.start()
return th
| mit | 250,652,157,187,175,260 | 19.027778 | 73 | 0.5043 | false |
robert-b-clarke/nre-darwin-py | nredarwin/cli.py | 1 | 1612 | import argparse
from nredarwin.webservice import DarwinLdbSession
import csv
import sys
from tabulate import tabulate
from functools import partial
def rows_to_display(station_board):
"""
Iterator for tabular output of board
"""
yield (("Platform", "Destination", "Scheduled", "Due"))
for service in station_board.train_services:
yield (
service.platform,
service.destination_text,
service.std,
service.etd,
)
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"station", type=str, help="station CRS code, e.g. MAN for Manchester Piccadilly"
)
ap.add_argument(
"--destination",
type=str,
required=False,
help="Only include services travelling to this CRS code, e.g HUD",
)
ap.add_argument("--csv", action="store_true", help="output in csv format")
args = ap.parse_args()
darwin_session = DarwinLdbSession(
wsdl="https://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx"
)
# build up query
board_query = partial(darwin_session.get_station_board, args.station)
if args.destination:
board_query = partial(board_query, destination_crs=args.destination)
# convert to tabular data for display
board_rows = rows_to_display(board_query())
# output CSV if requested
if args.csv:
output_writer = csv.writer(sys.stdout, dialect="unix")
output_writer.writerows(board_rows)
return
# Otherwise output human readable table
print(tabulate(board_rows, headers="firstrow"))
| bsd-3-clause | 8,184,569,043,009,174,000 | 28.851852 | 88 | 0.651985 | false |
mikeh77/mi-instrument | mi/idk/test/test_git.py | 2 | 2743 | #!/usr/bin/env python
"""
@package mi.idk.test.test_git
@file mi.idk/test/test_git.py
@author Bill French
@brief test git
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
from os.path import basename, dirname
from os import makedirs,chdir, system
from os import remove
from os.path import exists
import sys
from nose.plugins.attrib import attr
from mock import Mock
import unittest
from mi.core.unit_test import MiUnitTest
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.idk_git import IDKGit
from mi.idk.exceptions import InvalidGitRepo
from mi.idk.exceptions import GitCommandException
REPO = "https://github.com/ooici/marine-integrations.git"
ROOTDIR="/tmp/test_git.idk_test"
# /tmp is a link on OS X
if exists("/private/tmp"):
ROOTDIR = "/private%s" % ROOTDIR
@attr('UNIT', group='mi')
class TestGit(MiUnitTest):
"""
Test the git for the IDK
"""
@classmethod
def setUpClass(cls):
system("rm -rf %s" % ROOTDIR)
if not exists(ROOTDIR):
makedirs(ROOTDIR)
idk_git = IDKGit(ROOTDIR)
log.info("clone repo %s", REPO)
idk_git.clone(REPO)
@classmethod
def tearDownClass(cls):
system("rm -rf %s" % ROOTDIR)
def setUp(self):
"""
Setup the test case
"""
log.debug("Test good git directory")
self.idk_git = IDKGit(ROOTDIR)
self.assertTrue(self.idk_git)
self.assertTrue(self.idk_git.repo)
self.assertTrue(self.idk_git.repo.isValid())
def test_bad_repo(self):
"""
Test git
"""
log.debug("Test non-git directory")
with self.assertRaises(InvalidGitRepo):
fail_git = IDKGit("/tmp")
fail_git.branches()
def test_branch(self):
branches = self.idk_git.branches()
log.debug( "Branches found: %s", branches)
# Add a branch
branch_name = 'test_idk_branch'
self.idk_git.create_branch(branch_name)
branches = self.idk_git.branches()
log.debug( "Branches found: %s", branches)
self.assertTrue(branch_name in branches)
# add the same branch again
with self.assertRaises(GitCommandException):
self.idk_git.create_branch(branch_name)
# switch to the new branch
self.idk_git.switch_branch(branch_name)
self.assertEqual(self.idk_git.get_current_branch(), branch_name)
# switch to the master
self.idk_git.switch_branch('master')
self.assertEqual(self.idk_git.get_current_branch(), 'master')
# switch to an unknown branch
with self.assertRaises(GitCommandException):
self.idk_git.switch_branch('fffsssaaa')
| bsd-2-clause | -6,532,111,846,827,004,000 | 23.936364 | 72 | 0.629967 | false |
GraphProcessor/CommunityDetectionCodes | Algorithms/2014-Heat-Kernel/src_python/demo_files/yche_numerical_linear_algebra_exp.py | 1 | 2192 | from __future__ import print_function
import numpy as np
def demo_gauss_sedel_method():
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0.0, 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
# prints the system
print("System:")
for i in range(A.shape[0]):
row = ["{}*x{}".format(A[i, j], j + 1) for j in range(A.shape[1])]
print(" + ".join(row), "=", b[i])
print()
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
print("Solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error)
def demo_jacobi_method():
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0.0, 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
# prints the system
print("System:")
for i in range(A.shape[0]):
row = ["{}*x{}".format(A[i, j], j + 1) for j in range(A.shape[1])]
print(" + ".join(row), "=", b[i])
print()
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-10):
break
x = x_new
print("Solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error)
if __name__ == '__main__':
demo_gauss_sedel_method()
demo_jacobi_method()
| gpl-2.0 | -273,464,835,275,803,520 | 24.488372 | 74 | 0.447993 | false |
centrofermi/e3pipe | dst/E3DstWeatherTree.py | 1 | 1873 | #!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2014 Luca Baldini ([email protected]) *
# * *
# * For the license terms see the file LICENSE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from e3pipe.root.E3Tree import E3Tree
from e3pipe.root.E3BranchDescriptor import E3BranchDescriptor
class E3DstWeatherTree(E3Tree):
""" Class describing the ROOT tree containing the run-by-run header
information.
"""
NAME = 'Weather'
BRANCHES = [E3BranchDescriptor('Seconds', 'i'),
E3BranchDescriptor('IndoorTemperature', 'F'),
E3BranchDescriptor('OutdoorTemperature', 'F'),
E3BranchDescriptor('Pressure', 'F')
]
def __init__(self):
""" Constructor.
"""
E3Tree.__init__(self, 'Weather tree')
def test():
"""
"""
tree = E3DstWeatherTree()
if __name__ == '__main__':
test()
| gpl-3.0 | -5,012,376,391,257,592,000 | 32.446429 | 73 | 0.577149 | false |
borgarlie/TDT4501-Specialization-Project | research/calculate_accuracy.py | 1 | 2242 | import numpy as np
from tensorboardX import SummaryWriter
from seq2seq_summarization.globals import *
from classifier.train_classifier import get_predictions, calculate_accuracy, create_single_article_category_list
from research.train import split_category_and_article, category_from_string, evaluate
def test_accuracy(config, articles, vocabulary, encoder, decoder, classifier, max_length):
print("Testing accuracy", flush=True)
writer = SummaryWriter('../log/test_accuracy1')
categories_total = []
categories_scores_total = []
print("Generating beams", flush=True)
for i in range(len(articles)):
print("Evaluating article nr: %d" % i, flush=True)
category, input_sentence = split_category_and_article(articles[i])
category = category.strip()
category_variable = category_from_string(category)
categories = [category_variable]
categories_var = Variable(torch.FloatTensor(categories))
if use_cuda:
categories_var = categories_var.cuda()
output_beams = evaluate(config, vocabulary, encoder, decoder, input_sentence, categories_var, max_length)
top1_beam = output_beams[0]
top1_sequence_output = top1_beam.decoded_word_sequence
output_sentence = ' '.join(top1_sequence_output[:-1])
sequence = indexes_from_sentence(vocabulary, output_sentence)
sequence = Variable(torch.LongTensor([sequence]))
if use_cuda:
sequence = sequence.cuda()
category = create_single_article_category_list(category)
categories_total.append(category)
categories_scores = get_category_scores(sequence, classifier)
categories_scores_total.append(categories_scores)
print("Calculating accuracy", flush=True)
np_gold_truth = np.array(categories_total)
print(np.shape(np_gold_truth), flush=True)
np_predicted = get_predictions(categories_scores_total, 0.00)
print(np.shape(np_predicted), flush=True)
epoch = 999 # random
calculate_accuracy(np_gold_truth, np_predicted, writer, epoch)
def get_category_scores(sequence, classifier):
categories_scores = classifier(sequence, mode='Test')
return categories_scores.data.cpu().numpy()[0]
| mit | -9,058,002,817,343,573,000 | 39.035714 | 113 | 0.702498 | false |
icebreaker/dotfiles | gnome/gnome2/gedit/plugins.symlink/classbrowser/parser_ruby.py | 1 | 14401 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Frederic Back ([email protected])
# Copyright (C) 2007 Kristoffer Lundén ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import gtk
import gobject
import pango
import os
import re
import options
from parserinterface import ClassParserInterface
import imagelibrary
#===============================================================================
def tokenFromString(string):
""" Parse a string containing a function or class definition and return
a tuple containing information about the function, or None if the
parsing failed.
Example:
"#def foo(bar):" would return :
{'comment':True,'type':"def",'name':"foo",'params':"bar" } """
try:
e = r"([# ]*?)([a-zA-Z0-9_]+)( +)([a-zA-Z0-9_\?\!<>\+=\.]+)(.*)"
r = re.match(e,string).groups()
token = Token()
token.comment = '#' in r[0]
token.type = r[1]
token.name = r[3]
token.params = r[4]
token.original = string
return token
except: return None # return None to skip if unable to parse
def test():
pass
#===============================================================================
class Token:
def __init__(self):
self.type = None
self.original = None # the line in the file, unparsed
self.indent = 0
self.name = None
self.comment = False # if true, the token is commented, ie. inactive
self.params = None # string containing additional info
self.expanded = False
self.access = "public"
# start and end points
self.start = 0
self.end = 0
self.rubyfile = None
self.path = None # save the position in the browser
self.parent = None
self.children = []
def get_endline(self):
""" Get the line number where this token's declaration, including all
its children, finishes. Use it for copy operations."""
if len(self.children) > 0:
return self.children[-1].get_endline()
return self.end
def test_nested():
pass
def get_toplevel_class(self):
""" Try to get the class a token is in. """
if self.type == "class":
return self
if self.parent is not None:
tc = self.parent.get_toplevel_class()
if tc is None or tc.type == "file": return self #hack
else: return tc
return None
def printout(self):
for r in range(self.indent): print "",
print self.name,
if self.parent: print " (parent: ",self.parent.name
else: print
for tok in self.children: tok.printout()
#===============================================================================
class RubyFile(Token):
""" A class that represents a ruby file.
Manages "tokens", ie. classes and functions."""
def __init__(self, doc):
Token.__init__(self)
self.doc = doc
self.uri = doc.get_uri()
self.linestotal = 0 # total line count
self.type = "file"
self.name = os.path.basename(self.uri)
self.tokens = []
def getTokenAtLine(self, line):
""" get the token at the specified line number """
for token in self.tokens:
if token.start <= line and token.end > line:
return self.__findInnermostTokenAtLine(token, line)
return None
def __findInnermostTokenAtLine(self, token, line):
"""" ruby is parsed as nested, unlike python """
for child in token.children:
if child.start <= line and child.end > line:
return self.__findInnermostTokenAtLine(child, line)
return token
def parse(self, verbose=True):
#if verbose: print "parse ----------------------------------------------"
newtokenlist = []
self.children = []
currentParent = self
self.linestotal = self.doc.get_line_count()
text = self.doc.get_text(*self.doc.get_bounds())
linecount = -1
ends_to_skip = 0
access = "public"
for line in text.splitlines():
linecount += 1
lstrip = line.lstrip()
ln = lstrip.split()
if len(ln) == 0: continue
if ln[0] == '#': continue
if ln[0] in ("class","module","def"):
token = tokenFromString(lstrip)
if token is None: continue
token.rubyfile = self
token.start = linecount
if token.type == "def":
token.access = access
#print "line",linecount
#print "name", token.name
#print "type",token.type
#print "access",token.access
#print "to",currentParent.name
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
idx = len(newtokenlist) - 1
if idx < len(self.tokens):
if newtokenlist[idx].original == self.tokens[idx].original:
newtokenlist[idx].expanded = self.tokens[idx].expanded
elif ln[0] in("begin","while","until","case","if","unless","for"):
ends_to_skip += 1
elif ln[0] in ("attr_reader","attr_writer","attr_accessor"):
for attr in ln:
m = re.match(r":(\w+)",attr)
if m:
token = Token()
token.rubyfile = self
token.type = 'def'
token.name = m.group(1)
token.start = linecount
token.end = linecount
token.original = lstrip
currentParent.children.append(token)
token.parent = currentParent
newtokenlist.append(token)
elif re.search(r"\sdo(\s+\|.*?\|)?\s*(#|$)", line):
#print "do",line
# Support for new style RSpec
if re.match(r"^(describe|it|before|after)\b", ln[0]):
token = Token()
token.rubyfile = self
token.start = linecount
if currentParent.type == "describe":
if ln[0] == "it":
token.name = " ".join(ln[1:-1])
else:
token.name = ln[0]
token.type = "def"
elif ln[0] == "describe":
token.type = "describe"
token.name = " ".join(ln[1:-1])
else:
continue
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
# Deprectated support for old style RSpec, will be removed later
elif ln[0] in ("context","specify","setup","teardown","context_setup","context_teardown"):
token = Token()
token.rubyfile = self
token.start = linecount
if currentParent.type == "context":
if ln[0] == "specify":
token.name = " ".join(ln[1:-1])
else:
token.name = ln[0]
token.type = "def"
elif ln[0] == "context":
token.type = "context"
token.name = " ".join(ln[1:-1])
else:
continue
currentParent.children.append(token)
token.parent = currentParent
currentParent = token
newtokenlist.append(token)
else:
ends_to_skip += 1
elif ln[0] in ("public","private","protected"):
if len(ln) == 1:
access = ln[0]
if re.search(r";?\s*end(?:\s*$|\s+(?:while|until))", line):
if ends_to_skip > 0:
ends_to_skip -= 1
else:
token = currentParent
#print "end",currentParent.name
token.end = linecount
currentParent = token.parent
# set new token list
self.tokens = newtokenlist
return True
#===============================================================================
class RubyParser( ClassParserInterface ):
def __init__(self):
self.rubyfile = None
def appendTokenToBrowser(self, token, parentit ):
it = self.__browsermodel.append(parentit,(token,))
token.path = self.__browsermodel.get_path(it)
#print token.path
#if token.parent:
# if token.parent.expanded:
# self.browser.expand_row(token.parent.path,False)
# pass
for child in token.children:
self.appendTokenToBrowser(child, it)
def parse(self, doc):
"""
Create a gtk.TreeModel with the class elements of the document
The parser uses the ctags command from the shell to create a ctags file,
then parses the file, and finally populates a treemodel.
"""
self.rubyfile = RubyFile(doc)
self.rubyfile.parse(options.singleton().verbose)
self.__browsermodel = gtk.TreeStore(gobject.TYPE_PYOBJECT)
for child in self.rubyfile.children:
self.appendTokenToBrowser(child,None)
return self.__browsermodel
def __private_test_method(self):
pass
def get_tag_position(self, model, path):
tok = model.get_value( model.get_iter(path), 0 )
try: return tok.rubyfile.uri, tok.start+1
except: return None
def current_line_changed(self, model, doc, line):
# parse again if line count changed
if abs(self.rubyfile.linestotal - doc.get_line_count()) > 0:
if abs(self.rubyfile.linestotal - doc.get_line_count()) > 5:
if options.singleton().verbose:
print "RubyParser: refresh because line dif > 5"
self.rubyfile.parse()
else:
it = doc.get_iter_at_line(line)
a = it.copy(); b = it.copy()
a.backward_line(); a.backward_line()
b.forward_line(); b.forward_line()
t = doc.get_text(a,b)
if t.find("class") >= 0 or t.find("def") >= 0:
if options.singleton().verbose:
print "RubyParser: refresh because line cound changed near keyword"
self.rubyfile.parse()
def get_tag_at_line(self, model, doc, linenumber):
t = self.rubyfile.getTokenAtLine(linenumber)
#print linenumber,t
if t: return t.path
def cellrenderer(self, column, ctr, model, it):
""" Render the browser cell according to the token it represents. """
tok = model.get_value(it,0)
weight = 400
style = pango.STYLE_NORMAL
name = tok.name#+tok.params
colour = options.singleton().colours[ "function" ]
# set label and colour
if tok.type == "class":
name = "class "+name
colour = options.singleton().colours[ "class" ]
weight = 600
elif tok.type == "module":
name = "module "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
# new style RSpec
elif tok.type == "describe":
name = "describe "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
# Old style RSpec, deprecated
elif tok.type == "context":
name = "context "+name
colour = options.singleton().colours[ "namespace" ]
weight = 600
elif tok.type == "def":
colour = options.singleton().colours[ "member" ]
if tok.comment: name = "#"+name
# assing properties
ctr.set_property("text", name)
ctr.set_property("style", style)
ctr.set_property("foreground-gdk", colour)
def pixbufrenderer(self, column, crp, model, it):
tok = model.get_value(it,0)
icon = "default"
if tok.type == "class":
icon = "class"
elif tok.type == "module":
icon = "namespace"
elif tok.type == "describe":
icon = "namespace"
elif tok.type == "context":
icon = "namespace"
elif tok.type == "def":
if tok.access == "public":
icon = "method"
elif tok.access == "protected":
icon = "method_prot"
elif tok.access == "private":
icon = "method_priv"
crp.set_property("pixbuf",imagelibrary.pixbufs[icon])
| mit | 4,563,122,297,649,822,700 | 33.615385 | 106 | 0.491319 | false |
DOV-Vlaanderen/pydov | tests/test_util_location.py | 1 | 18538 | """Module grouping tests for the pydov.util.location module."""
import pytest
from owslib.fes import (
And,
Or,
Not,
)
from pydov.util.location import (
Box,
Point,
Equals,
Disjoint,
Touches,
Within,
Intersects,
WithinDistance,
GmlObject
)
from owslib.etree import etree
from pydov.util.owsutil import set_geometry_column
from tests.abstract import clean_xml
class TestLocation(object):
"""Class grouping tests for the AbstractLocation subtypes."""
def test_box(self):
"""Test the default Box type.
Test whether the generated XML is correct.
"""
box = Box(94720, 186910, 112220, 202870)
xml = box.get_element()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:lowerCorner>94720.000000 186910.000000</gml:lowerCorner>'
'<gml:upperCorner>112220.000000 202870.000000</gml:upperCorner>'
'</gml:Envelope>')
def test_box_wgs84(self):
"""Test the Box type with WGS84 coordinates.
Test whether the generated XML is correct.
"""
box = Box(3.6214, 50.9850, 3.8071, 51.1270, epsg=4326)
xml = box.get_element()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">'
'<gml:lowerCorner>3.621400 50.985000</gml:lowerCorner>'
'<gml:upperCorner>3.807100 51.127000</gml:upperCorner>'
'</gml:Envelope>')
def test_box_invalid(self):
"""Test the Box type with the wrong ordering of coordinates.
Test whether a ValueError is raised.
"""
with pytest.raises(ValueError):
Box(94720, 202870, 186910, 112220)
def test_box_invalid_wgs84(self):
"""Test the Box type with the wrong ordering of WGS84 coordinates.
Test whether a ValueError is raised.
"""
with pytest.raises(ValueError):
Box(50.9850, 3.6214, 3.8071, 51.1270, epsg=4326)
def test_point(self):
"""Test the default Point type.
Test whether the generated XML is correct.
"""
point = Point(110680, 202030)
xml = point.get_element()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:pos>110680.000000 202030.000000</gml:pos></gml:Point>')
def test_point_wgs84(self):
"""Test the Point type with WGS84 coordinates.
Test whether the generated XML is correct.
"""
point = Point(3.8071, 51.1270, epsg=4326)
xml = point.get_element()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">'
'<gml:pos>3.807100 51.127000</gml:pos></gml:Point>')
def test_gmlobject_element(self):
"""Test the GmlObject type with an etree.Element.
Test whether the returned XML is correct.
"""
with open('tests/data/util/location/polygon_single_31370.gml',
'r') as gmlfile:
gml = gmlfile.read()
gml_element = etree.fromstring(gml.encode('utf8'))
gml_element = gml_element.find(
'.//{http://www.opengis.net/gml}Polygon')
gml_object = GmlObject(gml_element)
assert clean_xml(etree.tostring(
gml_object.get_element()).decode('utf8')) == clean_xml(
'<gml:Polygon '
'srsName="urn:ogc:def:crs:EPSG::31370"><gml:exterior><gml'
':LinearRing><gml:posList>108636.150020818 194960.844295764 '
'108911.922161617 194291.111953824 109195.573506438 '
'195118.42837622 108636.150020818 '
'194960.844295764</gml:posList></gml:LinearRing></gml'
':exterior></gml:Polygon>')
def test_gmlobject_bytes(self):
"""Test the GmlObject type with a GML string.
Test whether the returned XML is correct.
"""
with open('tests/data/util/location/polygon_single_31370.gml',
'r') as gmlfile:
gml = gmlfile.read()
gml_element = etree.fromstring(gml.encode('utf8'))
gml_element = gml_element.find(
'.//{http://www.opengis.net/gml}Polygon')
gml_object = GmlObject(etree.tostring(gml_element))
assert clean_xml(etree.tostring(
gml_object.get_element()).decode('utf8')) == clean_xml(
'<gml:Polygon '
'srsName="urn:ogc:def:crs:EPSG::31370"><gml:exterior><gml'
':LinearRing><gml:posList>108636.150020818 194960.844295764 '
'108911.922161617 194291.111953824 109195.573506438 '
'195118.42837622 108636.150020818 '
'194960.844295764</gml:posList></gml:LinearRing></gml'
':exterior></gml:Polygon>')
def test_gmlobject_string(self):
"""Test the GmlObject type with a GML string.
Test whether the returned XML is correct.
"""
with open('tests/data/util/location/polygon_single_31370.gml',
'r') as gmlfile:
gml = gmlfile.read()
gml_element = etree.fromstring(gml.encode('utf8'))
gml_element = gml_element.find(
'.//{http://www.opengis.net/gml}Polygon')
gml_object = GmlObject(etree.tostring(gml_element).decode('utf8'))
assert clean_xml(etree.tostring(
gml_object.get_element()).decode('utf8')) == clean_xml(
'<gml:Polygon '
'srsName="urn:ogc:def:crs:EPSG::31370"><gml:exterior><gml'
':LinearRing><gml:posList>108636.150020818 194960.844295764 '
'108911.922161617 194291.111953824 109195.573506438 '
'195118.42837622 108636.150020818 '
'194960.844295764</gml:posList></gml:LinearRing></gml'
':exterior></gml:Polygon>')
class TestBinarySpatialFilters(object):
"""Class grouping tests for the AbstractBinarySpatialFilter subtypes."""
def test_equals_point(self):
"""Test the Equals spatial filter with a Point location.
Test whether the generated XML is correct.
"""
equals = Equals(Point(150000, 150000))
equals.set_geometry_column('geom')
xml = equals.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Equals><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:pos>150000.000000 150000.000000</gml:pos></gml:Point>'
'</ogc:Equals>')
def test_equals_nogeom(self):
"""Test the Equals spatial filter without setting a geometry column.
Test whether a RuntimeError is raised.
"""
equals = Equals(Point(150000, 150000))
with pytest.raises(RuntimeError):
equals.toXML()
def test_disjoint_box(self):
"""Test the Disjoint spatial filter with a Box location.
Test whether the generated XML is correct.
"""
disjoint = Disjoint(Box(94720, 186910, 112220, 202870))
disjoint.set_geometry_column('geom')
xml = disjoint.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Disjoint><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:lowerCorner>94720.000000 186910.000000</gml:lowerCorner>'
'<gml:upperCorner>112220.000000 202870.000000</gml:upperCorner>'
'</gml:Envelope></ogc:Disjoint>')
def test_disjoint_nogeom(self):
"""Test the Disjoint spatial filter without setting a geometry column.
Test whether a RuntimeError is raised.
"""
disjoint = Disjoint(Point(150000, 150000))
with pytest.raises(RuntimeError):
disjoint.toXML()
def test_touches_box(self):
"""Test the Touches spatial filter with a Box location.
Test whether the generated XML is correct.
"""
touches = Touches(Box(94720, 186910, 112220, 202870))
touches.set_geometry_column('geom')
xml = touches.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Touches><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:lowerCorner>94720.000000 186910.000000</gml:lowerCorner>'
'<gml:upperCorner>112220.000000 202870.000000</gml:upperCorner>'
'</gml:Envelope></ogc:Touches>')
def test_touches_nogeom(self):
"""Test the Touches spatial filter without setting a geometry column.
Test whether a RuntimeError is raised.
"""
touches = Touches(Point(150000, 150000))
with pytest.raises(RuntimeError):
touches.toXML()
def test_within_box(self):
"""Test the Within spatial filter with a Box location.
Test whether the generated XML is correct.
"""
within = Within(Box(94720, 186910, 112220, 202870))
within.set_geometry_column('geom')
xml = within.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Within><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:lowerCorner>94720.000000 186910.000000</gml:lowerCorner>'
'<gml:upperCorner>112220.000000 202870.000000</gml:upperCorner>'
'</gml:Envelope></ogc:Within>')
def test_within_nogeom(self):
"""Test the Within spatial filter without setting a geometry column.
Test whether a RuntimeError is raised.
"""
within = Within(Box(94720, 186910, 112220, 202870))
with pytest.raises(RuntimeError):
within.toXML()
def test_intersects_box(self):
"""Test the Intersects spatial filter with a Box location.
Test whether the generated XML is correct.
"""
intersects = Intersects(Box(94720, 186910, 112220, 202870))
intersects.set_geometry_column('geom')
xml = intersects.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Intersects><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:lowerCorner>94720.000000 186910.000000</gml:lowerCorner>'
'<gml:upperCorner>112220.000000 202870.000000</gml:upperCorner>'
'</gml:Envelope></ogc:Intersects>')
def test_intersects_nogeom(self):
"""Test the Intersects spatial filter without setting a geometry
column.
Test whether a RuntimeError is raised.
"""
intersects = Intersects(Box(94720, 186910, 112220, 202870))
with pytest.raises(RuntimeError):
intersects.toXML()
class TestLocationFilters(object):
"""Class grouping tests for the AbstractLocationFilter subtypes."""
def test_withindistance_point(self):
"""Test the WithinDistance spatial filter with a Point location.
Test whether the generated XML is correct.
"""
withindistance = WithinDistance(Point(150000, 150000), 100)
withindistance.set_geometry_column('geom')
xml = withindistance.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:DWithin><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:pos>150000.000000 150000.000000</gml:pos></gml:Point>'
'<gml:Distance units="meter">100.000000</gml:Distance>'
'</ogc:DWithin>')
def test_withindistance_point_named_args(self):
"""Test the WithinDistance spatial filter with a Point location.
Test whether the generated XML is correct.
"""
withindistance = WithinDistance(location=Point(150000, 150000),
distance=100, distance_unit='meter')
withindistance.set_geometry_column('geom')
xml = withindistance.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:DWithin><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370">'
'<gml:pos>150000.000000 150000.000000</gml:pos></gml:Point>'
'<gml:Distance units="meter">100.000000</gml:Distance>'
'</ogc:DWithin>')
def test_withindistance_nogeom(self):
"""Test the WithinDistance spatial filter without setting a geometry
column.
Test whether a RuntimeError is raised.
"""
withindistance = WithinDistance(Point(150000, 150000), 100)
with pytest.raises(RuntimeError):
withindistance.toXML()
def test_withindistance_point_wgs84(self):
"""Test the WithinDistance spatial filter with a Point location
using WGS84 coordinates.
Test whether the generated XML is correct.
"""
withindistance = WithinDistance(Point(51.1270, 3.8071, epsg=4326), 100)
withindistance.set_geometry_column('geom')
xml = withindistance.toXML()
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:DWithin><ogc:PropertyName>geom</ogc:PropertyName>'
'<gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">'
'<gml:pos>51.127000 3.807100</gml:pos></gml:Point>'
'<gml:Distance units="meter">100.000000</gml:Distance>'
'</ogc:DWithin>')
class TestLocationFilterExpressions(object):
"""Class grouping tests for expressions with spatial filters."""
def test_point_and_box(self):
"""Test a location filter expression using a Within(Box) and a
WithinDistance(Point) filter.
Test whether the generated XML is correct.
"""
point_and_box = And([WithinDistance(Point(150000, 150000), 100),
Within(Box(94720, 186910, 112220, 202870))])
xml = set_geometry_column(point_and_box, 'geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:And><ogc:DWithin><ogc:PropertyName>geom</ogc:PropertyName'
'><gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':pos>150000.000000 '
'150000.000000</gml:pos></gml:Point><gml:Distance '
'units="meter">100.000000</gml:Distance></ogc:DWithin><ogc'
':Within><ogc:PropertyName>geom</ogc:PropertyName><gml'
':Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':lowerCorner>94720.000000 '
'186910.000000</gml:lowerCorner><gml:upperCorner>112220.000000 '
'202870.000000</gml:upperCorner></gml:Envelope></ogc:Within'
'></ogc:And>')
def test_box_or_box(self):
"""Test a location filter expression using an Intersects(Box) and a
Within(Box) filter.
Test whether the generated XML is correct.
"""
box_or_box = Or([
Intersects(Box(50.9850, 3.6214, 51.1270, 3.8071, epsg=4326)),
Within(Box(94720, 186910, 112220, 202870))])
xml = set_geometry_column(box_or_box, 'geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:Or><ogc:Intersects><ogc:PropertyName>geom</ogc'
':PropertyName><gml:Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#4326"><gml'
':lowerCorner>50.985000 '
'3.621400</gml:lowerCorner><gml:upperCorner>51.127000 '
'3.807100</gml:upperCorner></gml:Envelope></ogc:Intersects><ogc'
':Within><ogc:PropertyName>geom</ogc:PropertyName><gml:Envelope '
'srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml '
':lowerCorner>94720.000000 '
'186910.000000</gml:lowerCorner><gml:upperCorner>112220.000000 '
'202870.000000</gml:upperCorner></gml:Envelope></ogc:Within'
'></ogc:Or>')
def test_recursive(self):
"""Test a location filter expression using a recursive expression
with And(Not(WithinDistance(Point) filter.
Test whether the generated XML is correct.
"""
point_and_box = And([Not([WithinDistance(Point(150000, 150000), 100)]),
Within(Box(94720, 186910, 112220, 202870))])
xml = set_geometry_column(point_and_box, 'geom')
assert clean_xml(etree.tostring(xml).decode('utf8')) == clean_xml(
'<ogc:And><ogc:Not><ogc:DWithin><ogc:PropertyName>geom</ogc'
':PropertyName><gml:Point srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':pos>150000.000000 '
'150000.000000</gml:pos></gml:Point><gml:Distance '
'units="meter">100.000000</gml:Distance></ogc:DWithin></ogc:Not'
'><ogc:Within><ogc:PropertyName>geom</ogc:PropertyName><gml'
':Envelope srsDimension="2" '
'srsName="http://www.opengis.net/gml/srs/epsg.xml#31370"><gml'
':lowerCorner>94720.000000 '
'186910.000000</gml:lowerCorner><gml:upperCorner>112220.000000 '
'202870.000000</gml:upperCorner></gml:Envelope></ogc:Within'
'></ogc:And>')
| mit | -1,593,256,956,958,755,600 | 37.144033 | 79 | 0.600766 | false |
rho2/30DaysOfCode | day18.py | 1 | 1039 | import sys
class Solution:
def __init__(self):
self.stack = list()
self.queue = list()
def pushCharacter(self, char):
self.stack.append(char)
def popCharacter(self):
return(self.stack.pop(-1))
def enqueueCharacter(self, char):
self.queue.append(char)
def dequeueCharacter(self):
return(self.queue.pop(0))
# read the string s
s=input()
#Create the Solution class object
obj=Solution()
l=len(s)
# push/enqueue all the characters of string s to stack
for i in range(l):
obj.pushCharacter(s[i])
obj.enqueueCharacter(s[i])
isPalindrome=True
'''
pop the top character from stack
dequeue the first character from queue
compare both the characters
'''
for i in range(l // 2):
if obj.popCharacter()!=obj.dequeueCharacter():
isPalindrome=False
break
#finally print whether string s is palindrome or not.
if isPalindrome:
print("The word, "+s+", is a palindrome.")
else:
print("The word, "+s+", is not a palindrome.")
| mit | -2,207,952,508,544,627,000 | 22.088889 | 54 | 0.647738 | false |
sealevelresearch/tide-wrangler | tide_wrangler/scratch/convert_garston_to_csv.py | 1 | 1918 | #!/usr/bin/env python
import pytz
import datetime
import csv
from os import path
from collections import namedtuple
Row = namedtuple('Row', 'when,height_m')
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
_NAIVE_DATETIME_FORMAT = '%y%m%dT%H:%M:00Z'
_FIELDNAMES = ['datetime', 'observed_sea_level']
def main(filenames):
for in_filename in filenames:
csv_filename = make_output_filename(in_filename)
date_str = get_date_str_from_input_filename(in_filename)
convert_file(in_filename, csv_filename, date_str)
def make_output_filename(in_filename):
"""
>>> make_output_filename('/tmp/data.GAR')
'/tmp/data.GAR.csv'
"""
return in_filename + '.csv'
def get_date_str_from_input_filename(in_filename):
"""
>>> get_date_str_from_input_filename('/tmp/13120610.GAR')
'131206'
"""
return path.basename(path.splitext(in_filename)[0])[:-2]
def convert_file(in_filename, csv_filename, date_str):
with open(in_filename, 'r') as f, open(csv_filename, 'w') as g:
csvreader = csv.DictReader(f, fieldnames=_FIELDNAMES)
csvwriter = csv.DictWriter(g, fieldnames=_FIELDNAMES)
csvwriter.writeheader()
count = 1
for line in csvreader:
row = parse_line(line, date_str)
csvwriter.writerow(
{_FIELDNAMES[0]: row[0].strftime(DATETIME_FORMAT),
_FIELDNAMES[1]: row[1]})
count += 1
if count % 50000 == 0:
print(count)
print('Converted {} lines.'.format(count))
def parse_line(line, date_str):
datetime_str = date_str + "T" + line["datetime"] + "Z"
when = datetime.datetime.strptime(
datetime_str, _NAIVE_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
height_m = float(line["observed_sea_level"])
return [when, height_m]
if __name__ == '__main__':
import sys
filenames = sys.argv[1:]
main(filenames)
| mit | -227,173,322,786,278,400 | 26.797101 | 70 | 0.616788 | false |
z-plot/z-plot | examples/barplots/manybars.py | 1 | 1257 | #! /usr/bin/env python
import sys
from zplot import *
bartypes = [('hline', 1, 1),
('vline', 1, 1),
('hvline', 1, 1),
('dline1', 1, 2),
('dline2', 1, 2),
('dline12', 0.5, 2),
('circle', 1, 2),
('square', 1, 1),
('triangle', 2, 2),
('utriangle', 2, 2)]
bartypes = [('hline', 1, 1),
('vline', 1, 1),
('hvline', 1, 1),
('dline1', 1, 2),
('dline2', 1, 2),
('dline12', 0.5, 2),
('circle', 1, 2),
('square', 1, 1),
('triangle', 2, 2),
('utriangle', 2, 2)]
L = len(bartypes)
ctype = 'eps' if len(sys.argv) < 2 else sys.argv[1]
c = canvas(ctype, title='manybars', dimensions=[L*10, 110])
print(c.version)
d = drawable(canvas=c, xrange=[0,L+1], yrange=[0,10], coord=[0,5],
dimensions=[L*10,100])
t = table(file='manybars.data')
p = plotter()
for btype, fsize, fskip in bartypes:
p.verticalbars(drawable=d, table=t, xfield='c0', yfield='c1', fill=True,
fillcolor='darkgray', fillstyle=btype, barwidth=0.9,
fillsize=fsize, fillskip=fskip)
t.update(set='c0=c0+1')
c.render()
| bsd-3-clause | -2,772,117,638,909,082,600 | 22.716981 | 76 | 0.461416 | false |
binhqnguyen/lena | test.py | 1 | 75970 | #! /usr/bin/env python26
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2009 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import sys
import time
import optparse
import subprocess
import threading
import Queue
import signal
import xml.dom.minidom
import shutil
import re
from utils import get_list_from_file
#
# XXX This should really be part of a waf command to list the configuration
# items relative to optional ns-3 pieces.
#
# A list of interesting configuration items in the waf configuration
# cache which we may be interested in when deciding on which examples
# to run and how to run them. These are set by waf during the
# configuration phase and the corresponding assignments are usually
# found in the associated subdirectory wscript files.
#
interesting_config_items = [
"NS3_ENABLED_MODULES",
"NS3_MODULE_PATH",
"NSC_ENABLED",
"ENABLE_REAL_TIME",
"ENABLE_THREADING",
"ENABLE_EXAMPLES",
"ENABLE_TESTS",
"EXAMPLE_DIRECTORIES",
"ENABLE_PYTHON_BINDINGS",
"ENABLE_CLICK",
"ENABLE_BRITE",
"ENABLE_OPENFLOW",
"APPNAME",
"BUILD_PROFILE",
"VERSION",
"PYTHON",
"VALGRIND_FOUND",
]
NSC_ENABLED = False
ENABLE_REAL_TIME = False
ENABLE_THREADING = False
ENABLE_EXAMPLES = True
ENABLE_TESTS = True
ENABLE_CLICK = False
ENABLE_BRITE = False
ENABLE_OPENFLOW = False
EXAMPLE_DIRECTORIES = []
APPNAME = ""
BUILD_PROFILE = ""
BUILD_PROFILE_SUFFIX = ""
VERSION = ""
PYTHON = ""
VALGRIND_FOUND = True
#
# This will be given a prefix and a suffix when the waf config file is
# read.
#
test_runner_name = "test-runner"
#
# If the user has constrained us to run certain kinds of tests, we can tell waf
# to only build
#
core_kinds = ["bvt", "core", "performance", "system", "unit"]
#
# There are some special cases for test suites that kill valgrind. This is
# because NSC causes illegal instruction crashes when run under valgrind.
#
core_valgrind_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
"routing-click",
"lte-rr-ff-mac-scheduler",
"lte-tdmt-ff-mac-scheduler",
"lte-fdmt-ff-mac-scheduler",
"lte-pf-ff-mac-scheduler",
"lte-tta-ff-mac-scheduler",
"lte-fdbet-ff-mac-scheduler",
"lte-ttbet-ff-mac-scheduler",
"lte-fdtbfq-ff-mac-scheduler",
"lte-tdtbfq-ff-mac-scheduler",
"lte-pss-ff-mac-scheduler",
]
#
# There are some special cases for test suites that fail when NSC is
# missing.
#
core_nsc_missing_skip_tests = [
"ns3-tcp-cwnd",
"nsc-tcp-loss",
"ns3-tcp-interoperability",
]
#
# Parse the examples-to-run file if it exists.
#
# This function adds any C++ examples or Python examples that are to be run
# to the lists in example_tests and python_tests, respectively.
#
def parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests):
# Look for the examples-to-run file exists.
if os.path.exists(examples_to_run_path):
# Each tuple in the C++ list of examples to run contains
#
# (example_name, do_run, do_valgrind_run)
#
# where example_name is the executable to be run, do_run is a
# condition under which to run the example, and do_valgrind_run is
# a condition under which to run the example under valgrind. This
# is needed because NSC causes illegal instruction crashes with
# some tests when they are run under valgrind.
#
# Note that the two conditions are Python statements that
# can depend on waf configuration variables. For example,
#
# ("tcp-nsc-lfn", "NSC_ENABLED == True", "NSC_ENABLED == False"),
#
cpp_examples = get_list_from_file(examples_to_run_path, "cpp_examples")
for example_name, do_run, do_valgrind_run in cpp_examples:
# Seperate the example name from its arguments.
example_name_original = example_name
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s%s" % (APPNAME, VERSION, example_name, BUILD_PROFILE_SUFFIX)
# Set the full path for the example.
example_path = os.path.join(cpp_executable_dir, example_name)
# Add all of the C++ examples that were built, i.e. found
# in the directory, to the list of C++ examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
example_tests.append((example_path, do_run, do_valgrind_run))
example_names_original.append(example_name_original)
# Each tuple in the Python list of examples to run contains
#
# (example_name, do_run)
#
# where example_name is the Python script to be run and
# do_run is a condition under which to run the example.
#
# Note that the condition is a Python statement that can
# depend on waf configuration variables. For example,
#
# ("realtime-udp-echo.py", "ENABLE_REAL_TIME == True"),
#
python_examples = get_list_from_file(examples_to_run_path, "python_examples")
for example_name, do_run in python_examples:
# Seperate the example name from its arguments.
example_name_parts = example_name.split(' ', 1)
if len(example_name_parts) == 1:
example_name = example_name_parts[0]
example_arguments = ""
else:
example_name = example_name_parts[0]
example_arguments = example_name_parts[1]
# Set the full path for the example.
example_path = os.path.join(python_script_dir, example_name)
# Add all of the Python examples that were found to the
# list of Python examples to run.
if os.path.exists(example_path):
# Add any arguments to the path.
if len(example_name_parts) != 1:
example_path = "%s %s" % (example_path, example_arguments)
# Add this example.
python_tests.append((example_path, do_run))
#
# The test suites are going to want to output status. They are running
# concurrently. This means that unless we are careful, the output of
# the test suites will be interleaved. Rather than introducing a lock
# file that could unintentionally start serializing execution, we ask
# the tests to write their output to a temporary directory and then
# put together the final output file when we "join" the test tasks back
# to the main thread. In addition to this issue, the example programs
# often write lots and lots of trace files which we will just ignore.
# We put all of them into the temp directory as well, so they can be
# easily deleted.
#
TMP_OUTPUT_DIR = "testpy-output"
def read_test(test):
result = test.find('Result').text
name = test.find('Name').text
if not test.find('Time') is None:
time_real = test.find('Time').get('real')
else:
time_real = ''
return (result, name, time_real)
#
# A simple example of writing a text file with a test result summary. It is
# expected that this output will be fine for developers looking for problems.
#
def node_to_text (test, f):
(result, name, time_real) = read_test(test)
output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
for details in test.findall('FailureDetails'):
f.write(" Details:\n")
f.write(" Message: %s\n" % details.find('Message').text)
f.write(" Condition: %s\n" % details.find('Condition').text)
f.write(" Actual: %s\n" % details.find('Actual').text)
f.write(" Limit: %s\n" % details.find('Limit').text)
f.write(" File: %s\n" % details.find('File').text)
f.write(" Line: %s\n" % details.find('Line').text)
for child in test.findall('Test'):
node_to_text(child, f)
def translate_to_text(results_file, text_file):
f = open(text_file, 'w')
import xml.etree.ElementTree as ET
et = ET.parse (results_file)
for test in et.findall('Test'):
node_to_text (test, f)
for example in et.findall('Example'):
result = example.find('Result').text
name = example.find('Name').text
if not example.find('Time') is None:
time_real = example.find('Time').get('real')
else:
time_real = ''
output = "%s: Example \"%s\" (%s)\n" % (result, name, time_real)
f.write(output)
f.close()
#
# A simple example of writing an HTML file with a test result summary. It is
# expected that this will eventually be made prettier as time progresses and
# we have time to tweak it. This may end up being moved to a separate module
# since it will probably grow over time.
#
def translate_to_html(results_file, html_file):
f = open(html_file, 'w')
f.write("<html>\n")
f.write("<body>\n")
f.write("<center><h1>ns-3 Test Results</h1></center>\n")
#
# Read and parse the whole results file.
#
import xml.etree.ElementTree as ET
et = ET.parse(results_file)
#
# Iterate through the test suites
#
f.write("<h2>Test Suites</h2>\n")
for suite in et.findall('Test'):
#
# For each test suite, get its name, result and execution time info
#
(result, name, time) = read_test (suite)
#
# Print a level three header with the result, name and time. If the
# test suite passed, the header is printed in green. If the suite was
# skipped, print it in orange, otherwise assume something bad happened
# and print in red.
#
if result == "PASS":
f.write("<h3 style=\"color:green\">%s: %s (%s)</h3>\n" % (result, name, time))
elif result == "SKIP":
f.write("<h3 style=\"color:#ff6600\">%s: %s (%s)</h3>\n" % (result, name, time))
else:
f.write("<h3 style=\"color:red\">%s: %s (%s)</h3>\n" % (result, name, time))
#
# The test case information goes in a table.
#
f.write("<table border=\"1\">\n")
#
# The first column of the table has the heading Result
#
f.write("<th> Result </th>\n")
#
# If the suite crashed or is skipped, there is no further information, so just
# delare a new table row with the result (CRASH or SKIP) in it. Looks like:
#
# +--------+
# | Result |
# +--------+
# | CRASH |
# +--------+
#
# Then go on to the next test suite. Valgrind and skipped errors look the same.
#
if result in ["CRASH", "SKIP", "VALGR"]:
f.write("<tr>\n")
if result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</td>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("</tr>\n")
f.write("</table>\n")
continue
#
# If the suite didn't crash, we expect more information, so fill out
# the table heading row. Like,
#
# +--------+----------------+------+
# | Result | Test Case Name | Time |
# +--------+----------------+------+
#
f.write("<th>Test Case Name</th>\n")
f.write("<th> Time </th>\n")
#
# If the test case failed, we need to print out some failure details
# so extend the heading row again. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
#
if result == "FAIL":
f.write("<th>Failure Details</th>\n")
#
# Now iterate through all of the test cases.
#
for case in suite.findall('Test'):
#
# Get the name, result and timing information from xml to use in
# printing table below.
#
(result, name, time) = read_test(case)
#
# If the test case failed, we iterate through possibly multiple
# failure details
#
if result == "FAIL":
#
# There can be multiple failures for each test case. The first
# row always gets the result, name and timing information along
# with the failure details. Remaining failures don't duplicate
# this information but just get blanks for readability. Like,
#
# +--------+----------------+------+-----------------+
# | Result | Test Case Name | Time | Failure Details |
# +--------+----------------+------+-----------------+
# | FAIL | The name | time | It's busted |
# +--------+----------------+------+-----------------+
# | | | | Really broken |
# +--------+----------------+------+-----------------+
# | | | | Busted bad |
# +--------+----------------+------+-----------------+
#
first_row = True
for details in case.findall('FailureDetails'):
#
# Start a new row in the table for each possible Failure Detail
#
f.write("<tr>\n")
if first_row:
first_row = False
f.write("<td style=\"color:red\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
else:
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td></td>\n")
f.write("<td>")
f.write("<b>Message: </b>%s, " % details.find('Message').text)
f.write("<b>Condition: </b>%s, " % details.find('Condition').text)
f.write("<b>Actual: </b>%s, " % details.find('Actual').text)
f.write("<b>Limit: </b>%s, " % details.find('Limit').text)
f.write("<b>File: </b>%s, " % details.find('File').text)
f.write("<b>Line: </b>%s" % details.find('Line').text)
f.write("</td>\n")
#
# End the table row
#
f.write("</td>\n")
else:
#
# If this particular test case passed, then we just print the PASS
# result in green, followed by the test case name and its execution
# time information. These go off in <td> ... </td> table data.
# The details table entry is left blank.
#
# +--------+----------------+------+---------+
# | Result | Test Case Name | Time | Details |
# +--------+----------------+------+---------+
# | PASS | The name | time | |
# +--------+----------------+------+---------+
#
f.write("<tr>\n")
f.write("<td style=\"color:green\">%s</td>\n" % result)
f.write("<td>%s</td>\n" % name)
f.write("<td>%s</td>\n" % time)
f.write("<td></td>\n")
f.write("</tr>\n")
#
# All of the rows are written, so we need to end the table.
#
f.write("</table>\n")
#
# That's it for all of the test suites. Now we have to do something about
# our examples.
#
f.write("<h2>Examples</h2>\n")
#
# Example status is rendered in a table just like the suites.
#
f.write("<table border=\"1\">\n")
#
# The table headings look like,
#
# +--------+--------------+--------------+
# | Result | Example Name | Elapsed Time |
# +--------+--------------+--------------+
#
f.write("<th> Result </th>\n")
f.write("<th>Example Name</th>\n")
f.write("<th>Elapsed Time</th>\n")
#
# Now iterate through all of the examples
#
for example in et.findall("Example"):
#
# Start a new row for each example
#
f.write("<tr>\n")
#
# Get the result and name of the example in question
#
(result, name, time) = read_test(example)
#
# If the example either failed or crashed, print its result status
# in red; otherwise green. This goes in a <td> ... </td> table data
#
if result == "PASS":
f.write("<td style=\"color:green\">%s</td>\n" % result)
elif result == "SKIP":
f.write("<td style=\"color:#ff6600\">%s</fd>\n" % result)
else:
f.write("<td style=\"color:red\">%s</td>\n" % result)
#
# Write the example name as a new tag data.
#
f.write("<td>%s</td>\n" % name)
#
# Write the elapsed time as a new tag data.
#
f.write("<td>%s</td>\n" % time)
#
# That's it for the current example, so terminate the row.
#
f.write("</tr>\n")
#
# That's it for the table of examples, so terminate the table.
#
f.write("</table>\n")
#
# And that's it for the report, so finish up.
#
f.write("</body>\n")
f.write("</html>\n")
f.close()
#
# Python Control-C handling is broken in the presence of multiple threads.
# Signals get delivered to the runnable/running thread by default and if
# it is blocked, the signal is simply ignored. So we hook sigint and set
# a global variable telling the system to shut down gracefully.
#
thread_exit = False
def sigint_hook(signal, frame):
global thread_exit
thread_exit = True
return 0
#
# In general, the build process itself naturally takes care of figuring out
# which tests are built into the test runner. For example, if waf configure
# determines that ENABLE_EMU is false due to some missing dependency,
# the tests for the emu net device simply will not be built and will
# therefore not be included in the built test runner.
#
# Examples, however, are a different story. In that case, we are just given
# a list of examples that could be run. Instead of just failing, for example,
# nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration
# for relevant configuration items.
#
# XXX This function pokes around in the waf internal state file. To be a
# little less hacky, we should add a commmand to waf to return this info
# and use that result.
#
def read_waf_config():
for line in open(".lock-waf_" + sys.platform + "_build", "rt"):
if line.startswith("top_dir ="):
key, val = line.split('=')
top_dir = eval(val.strip())
if line.startswith("out_dir ="):
key, val = line.split('=')
out_dir = eval(val.strip())
global NS3_BASEDIR
NS3_BASEDIR = top_dir
global NS3_BUILDDIR
NS3_BUILDDIR = out_dir
for line in open("%s/c4che/_cache.py" % out_dir).readlines():
for item in interesting_config_items:
if line.startswith(item):
exec(line, globals())
if options.verbose:
for item in interesting_config_items:
print "%s ==" % item, eval(item)
#
# It seems pointless to fork a process to run waf to fork a process to run
# the test runner, so we just run the test runner directly. The main thing
# that waf would do for us would be to sort out the shared library path but
# we can deal with that easily and do here.
#
# There can be many different ns-3 repositories on a system, and each has
# its own shared libraries, so ns-3 doesn't hardcode a shared library search
# path -- it is cooked up dynamically, so we do that too.
#
def make_paths():
have_DYLD_LIBRARY_PATH = False
have_LD_LIBRARY_PATH = False
have_PATH = False
have_PYTHONPATH = False
keys = os.environ.keys()
for key in keys:
if key == "DYLD_LIBRARY_PATH":
have_DYLD_LIBRARY_PATH = True
if key == "LD_LIBRARY_PATH":
have_LD_LIBRARY_PATH = True
if key == "PATH":
have_PATH = True
if key == "PYTHONPATH":
have_PYTHONPATH = True
pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, "bindings", "python")
if not have_PYTHONPATH:
os.environ["PYTHONPATH"] = pypath
else:
os.environ["PYTHONPATH"] += ":" + pypath
if options.verbose:
print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"]
if sys.platform == "darwin":
if not have_DYLD_LIBRARY_PATH:
os.environ["DYLD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["DYLD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"]
elif sys.platform == "win32":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ';' + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
elif sys.platform == "cygwin":
if not have_PATH:
os.environ["PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["PATH"] += ":" + path
if options.verbose:
print "os.environ[\"PATH\"] == %s" % os.environ["PATH"]
else:
if not have_LD_LIBRARY_PATH:
os.environ["LD_LIBRARY_PATH"] = ""
for path in NS3_MODULE_PATH:
os.environ["LD_LIBRARY_PATH"] += ":" + path
if options.verbose:
print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"]
#
# Short note on generating suppressions:
#
# See the valgrind documentation for a description of suppressions. The easiest
# way to generate a suppression expression is by using the valgrind
# --gen-suppressions option. To do that you have to figure out how to run the
# test in question.
#
# If you do "test.py -v -g -s <suitename> then test.py will output most of what
# you need. For example, if you are getting a valgrind error in the
# devices-mesh-dot11s-regression test suite, you can run:
#
# ./test.py -v -g -s devices-mesh-dot11s-regression
#
# You should see in the verbose output something that looks like:
#
# Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output/2010-01-12-22-47-50-CUT
# --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml
#
# You need to pull out the useful pieces, and so could run the following to
# reproduce your error:
#
# valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Hint: Use the first part of the command as is, and point the "tempdir" to
# somewhere real. You don't need to specify an "out" file.
#
# When you run the above command you should see your valgrind error. The
# suppression expression(s) can be generated by adding the --gen-suppressions=yes
# option to valgrind. Use something like:
#
# valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp
# --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/ns3-dev-test-runner-debug
# --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev
# --tempdir=testpy-output
#
# Now when valgrind detects an error it will ask:
#
# ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ----
#
# to which you just enter 'y'<ret>.
#
# You will be provided with a suppression expression that looks something like
# the following:
# {
# <insert_a_suppression_name_here>
# Memcheck:Addr8
# fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE
# fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv
# fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj
# ...
# the rest of the stack frame
# ...
# }
#
# You need to add a supression name which will only be printed out by valgrind in
# verbose mode (but it needs to be there in any case). The entire stack frame is
# shown to completely characterize the error, but in most cases you won't need
# all of that info. For example, if you want to turn off all errors that happen
# when the function (fun:) is called, you can just delete the rest of the stack
# frame. You can also use wildcards to make the mangled signatures more readable.
#
# I added the following to the testpy.supp file for this particular error:
#
# {
# Supress invalid read size errors in SendPreq() when using HwmpProtocolMac
# Memcheck:Addr8
# fun:*HwmpProtocolMac*SendPreq*
# }
#
# Now, when you run valgrind the error will be suppressed.
#
VALGRIND_SUPPRESSIONS_FILE = "testpy.supp"
def run_job_synchronously(shell_command, directory, valgrind, is_python, build_path=""):
suppressions_path = os.path.join (NS3_BASEDIR, VALGRIND_SUPPRESSIONS_FILE)
if is_python:
path_cmd = PYTHON[0] + " " + os.path.join (NS3_BASEDIR, shell_command)
else:
if len(build_path):
path_cmd = os.path.join (build_path, shell_command)
else:
path_cmd = os.path.join (NS3_BUILDDIR, shell_command)
if valgrind:
cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path,
path_cmd)
else:
cmd = path_cmd
if options.verbose:
print "Synchronously execute %s" % cmd
start_time = time.time()
proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
elapsed_time = time.time() - start_time
retval = proc.returncode
#
# valgrind sometimes has its own idea about what kind of memory management
# errors are important. We want to detect *any* leaks, so the way to do
# that is to look for the presence of a valgrind leak summary section.
#
# If another error has occurred (like a test suite has failed), we don't
# want to trump that error, so only do the valgrind output scan if the
# test has otherwise passed (return code was zero).
#
if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results:
retval = 2
if options.verbose:
print "Return code = ", retval
print "stderr = ", stderr_results
return (retval, stdout_results, stderr_results, elapsed_time)
#
# This class defines a unit of testing work. It will typically refer to
# a test suite to run using the test-runner, or an example to run directly.
#
class Job:
def __init__(self):
self.is_break = False
self.is_skip = False
self.is_example = False
self.is_pyexample = False
self.shell_command = ""
self.display_name = ""
self.basedir = ""
self.tempdir = ""
self.cwd = ""
self.tmp_file_name = ""
self.returncode = False
self.elapsed_time = 0
self.build_path = ""
#
# A job is either a standard job or a special job indicating that a worker
# thread should exist. This special job is indicated by setting is_break
# to true.
#
def set_is_break(self, is_break):
self.is_break = is_break
#
# If a job is to be skipped, we actually run it through the worker threads
# to keep the PASS, FAIL, CRASH and SKIP processing all in one place.
#
def set_is_skip(self, is_skip):
self.is_skip = is_skip
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_example(self, is_example):
self.is_example = is_example
#
# Examples are treated differently than standard test suites. This is
# mostly because they are completely unaware that they are being run as
# tests. So we have to do some special case processing to make them look
# like tests.
#
def set_is_pyexample(self, is_pyexample):
self.is_pyexample = is_pyexample
#
# This is the shell command that will be executed in the job. For example,
#
# "utils/ns3-dev-test-runner-debug --test-name=some-test-suite"
#
def set_shell_command(self, shell_command):
self.shell_command = shell_command
#
# This is the build path where ns-3 was built. For example,
#
# "/home/craigdo/repos/ns-3-allinone-test/ns-3-dev/build/debug"
#
def set_build_path(self, build_path):
self.build_path = build_path
#
# This is the dispaly name of the job, typically the test suite or example
# name. For example,
#
# "some-test-suite" or "udp-echo"
#
def set_display_name(self, display_name):
self.display_name = display_name
#
# This is the base directory of the repository out of which the tests are
# being run. It will be used deep down in the testing framework to determine
# where the source directory of the test was, and therefore where to find
# provided test vectors. For example,
#
# "/home/user/repos/ns-3-dev"
#
def set_basedir(self, basedir):
self.basedir = basedir
#
# This is the directory to which a running test suite should write any
# temporary files.
#
def set_tempdir(self, tempdir):
self.tempdir = tempdir
#
# This is the current working directory that will be given to an executing
# test as it is being run. It will be used for examples to tell them where
# to write all of the pcap files that we will be carefully ignoring. For
# example,
#
# "/tmp/unchecked-traces"
#
def set_cwd(self, cwd):
self.cwd = cwd
#
# This is the temporary results file name that will be given to an executing
# test as it is being run. We will be running all of our tests in parallel
# so there must be multiple temporary output files. These will be collected
# into a single XML file at the end and then be deleted.
#
def set_tmp_file_name(self, tmp_file_name):
self.tmp_file_name = tmp_file_name
#
# The return code received when the job process is executed.
#
def set_returncode(self, returncode):
self.returncode = returncode
#
# The elapsed real time for the job execution.
#
def set_elapsed_time(self, elapsed_time):
self.elapsed_time = elapsed_time
#
# The worker thread class that handles the actual running of a given test.
# Once spawned, it receives requests for work through its input_queue and
# ships the results back through the output_queue.
#
class worker_thread(threading.Thread):
def __init__(self, input_queue, output_queue):
threading.Thread.__init__(self)
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
job = self.input_queue.get()
#
# Worker threads continue running until explicitly told to stop with
# a special job.
#
if job.is_break:
return
#
# If the global interrupt handler sets the thread_exit variable,
# we stop doing real work and just report back a "break" in the
# normal command processing has happened.
#
if thread_exit == True:
job.set_is_break(True)
self.output_queue.put(job)
continue
#
# If we are actually supposed to skip this job, do so. Note that
# if is_skip is true, returncode is undefined.
#
if job.is_skip:
if options.verbose:
print "Skip %s" % job.shell_command
self.output_queue.put(job)
continue
#
# Otherwise go about the business of running tests as normal.
#
else:
if options.verbose:
print "Launch %s" % job.shell_command
if job.is_example or job.is_pyexample:
#
# If we have an example, the shell command is all we need to
# know. It will be something like "examples/udp/udp-echo" or
# "examples/wireless/mixed-wireless.py"
#
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command,
job.cwd, options.valgrind, job.is_pyexample, job.build_path)
else:
#
# If we're a test suite, we need to provide a little more info
# to the test runner, specifically the base directory and temp
# file name
#
if options.update_data:
update_data = '--update-data'
else:
update_data = ''
(job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command +
" --xml --tempdir=%s --out=%s %s" % (job.tempdir, job.tmp_file_name, update_data),
job.cwd, options.valgrind, False)
job.set_elapsed_time(et)
if options.verbose:
print "returncode = %d" % job.returncode
print "---------- begin standard out ----------"
print standard_out
print "---------- begin standard err ----------"
print standard_err
print "---------- end standard err ----------"
self.output_queue.put(job)
#
# This is the main function that does the work of interacting with the
# test-runner itself.
#
def run_tests():
#
# Pull some interesting configuration information out of waf, primarily
# so we can know where executables can be found, but also to tell us what
# pieces of the system have been built. This will tell us what examples
# are runnable.
#
read_waf_config()
#
# Set the proper suffix.
#
global BUILD_PROFILE_SUFFIX
if BUILD_PROFILE == 'release':
BUILD_PROFILE_SUFFIX = ""
else:
BUILD_PROFILE_SUFFIX = "-" + BUILD_PROFILE
#
# Add the proper prefix and suffix to the test-runner name to
# match what is done in the wscript file.
#
test_runner_name = "%s%s-%s%s" % (APPNAME, VERSION, "test-runner", BUILD_PROFILE_SUFFIX)
#
# Run waf to make sure that everything is built, configured and ready to go
# unless we are explicitly told not to. We want to be careful about causing
# our users pain while waiting for extraneous stuff to compile and link, so
# we allow users that know what they''re doing to not invoke waf at all.
#
if not options.nowaf:
#
# If the user is running the "kinds" or "list" options, there is an
# implied dependency on the test-runner since we call that program
# if those options are selected. We will exit after processing those
# options, so if we see them, we can safely only build the test-runner.
#
# If the user has constrained us to running only a particular type of
# file, we can only ask waf to build what we know will be necessary.
# For example, if the user only wants to run BVT tests, we only have
# to build the test-runner and can ignore all of the examples.
#
# If the user only wants to run a single example, then we can just build
# that example.
#
# If there is no constraint, then we have to build everything since the
# user wants to run everything.
#
if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds):
if sys.platform == "win32":
waf_cmd = "waf --target=test-runner"
else:
waf_cmd = "./waf --target=test-runner"
elif len(options.example):
if sys.platform == "win32":
waf_cmd = "waf --target=%s" % os.path.basename(options.example)
else:
waf_cmd = "./waf --target=%s" % os.path.basename(options.example)
else:
if sys.platform == "win32":
waf_cmd = "waf"
else:
waf_cmd = "./waf"
if options.verbose:
print "Building: %s" % waf_cmd
proc = subprocess.Popen(waf_cmd, shell = True)
proc.communicate()
if proc.returncode:
print >> sys.stderr, "Waf died. Not running tests"
return proc.returncode
#
# Dynamically set up paths.
#
make_paths()
#
# Get the information from the build status file.
#
build_status_file = os.path.join (NS3_BUILDDIR, 'build-status.py')
if os.path.exists(build_status_file):
ns3_runnable_programs = get_list_from_file(build_status_file, "ns3_runnable_programs")
ns3_runnable_scripts = get_list_from_file(build_status_file, "ns3_runnable_scripts")
else:
print >> sys.stderr, 'The build status file was not found. You must do waf build before running test.py.'
sys.exit(2)
#
# Make a dictionary that maps the name of a program to its path.
#
ns3_runnable_programs_dictionary = {}
for program in ns3_runnable_programs:
# Remove any directory names from path.
program_name = os.path.basename(program)
ns3_runnable_programs_dictionary[program_name] = program
# Generate the lists of examples to run as smoke tests in order to
# ensure that they remain buildable and runnable over time.
#
example_tests = []
example_names_original = []
python_tests = []
for directory in EXAMPLE_DIRECTORIES:
# Set the directories and paths for this example.
example_directory = os.path.join("examples", directory)
examples_to_run_path = os.path.join(example_directory, "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this example directory's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
for module in NS3_ENABLED_MODULES:
# Remove the "ns3-" from the module name.
module = module[len("ns3-"):]
# Set the directories and paths for this example.
module_directory = os.path.join("src", module)
example_directory = os.path.join(module_directory, "examples")
examples_to_run_path = os.path.join(module_directory, "test", "examples-to-run.py")
cpp_executable_dir = os.path.join(NS3_BUILDDIR, example_directory)
python_script_dir = os.path.join(example_directory)
# Parse this module's file.
parse_examples_to_run_file(
examples_to_run_path,
cpp_executable_dir,
python_script_dir,
example_tests,
example_names_original,
python_tests)
#
# If lots of logging is enabled, we can crash Python when it tries to
# save all of the text. We just don't allow logging to be turned on when
# test.py runs. If you want to see logging output from your tests, you
# have to run them using the test-runner directly.
#
os.environ["NS_LOG"] = ""
#
# There are a couple of options that imply we can to exit before starting
# up a bunch of threads and running tests. Let's detect these cases and
# handle them without doing all of the hard work.
#
if options.kinds:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-type-list")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
print standard_out
if options.list:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types --test-type=%s" % options.constrain)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --print-test-types")
(rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
list_items = standard_out.split('\n')
list_items.sort()
print "Test Type Test Name"
print "--------- ---------"
for item in list_items:
if len(item.strip()):
print item
example_names_original.sort()
for item in example_names_original:
print "example ", item
print
if options.kinds or options.list:
return
#
# We communicate results in two ways. First, a simple message relating
# PASS, FAIL, CRASH or SKIP is always written to the standard output. It
# is expected that this will be one of the main use cases. A developer can
# just run test.py with no options and see that all of the tests still
# pass.
#
# The second main use case is when detailed status is requested (with the
# --text or --html options). Typicall this will be text if a developer
# finds a problem, or HTML for nightly builds. In these cases, an
# XML file is written containing the status messages from the test suites.
# This file is then read and translated into text or HTML. It is expected
# that nobody will really be interested in the XML, so we write it somewhere
# with a unique name (time) to avoid collisions. In case an error happens, we
# provide a runtime option to retain the temporary files.
#
# When we run examples as smoke tests, they are going to want to create
# lots and lots of trace files. We aren't really interested in the contents
# of the trace files, so we also just stash them off in the temporary dir.
# The retain option also causes these unchecked trace files to be kept.
#
date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime())
if not os.path.exists(TMP_OUTPUT_DIR):
os.makedirs(TMP_OUTPUT_DIR)
testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time);
if not os.path.exists(testpy_output_dir):
os.makedirs(testpy_output_dir)
#
# Create the main output file and start filling it with XML. We need to
# do this since the tests will just append individual results to this file.
#
xml_results_file = os.path.join(testpy_output_dir, "results.xml")
f = open(xml_results_file, 'w')
f.write('<?xml version="1.0"?>\n')
f.write('<Results>\n')
f.close()
#
# We need to figure out what test suites to execute. We are either given one
# suite or example explicitly via the --suite or --example/--pyexample option,
# or we need to call into the test runner and ask it to list all of the available
# test suites. Further, we need to provide the constraint information if it
# has been given to us.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test,py: run all of the suites and examples
# ./test.py --constrain=core: run all of the suites of all kinds
# ./test.py --constrain=unit: run all unit suites
# ./test.py --suite=some-test-suite: run a single suite
# ./test.py --example=examples/udp/udp-echo: run single example
# ./test.py --pyexample=examples/wireless/mixed-wireless.py: run python example
# ./test.py --suite=some-suite --example=some-example: run the single suite
#
# We can also use the --constrain option to provide an ordering of test
# execution quite easily.
#
if len(options.suite):
# See if this is a valid test suite.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
if options.suite in suites.split('\n'):
suites = options.suite + "\n"
else:
print >> sys.stderr, 'The test suite was not run because an unknown test suite name was requested.'
sys.exit(2)
elif len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain):
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % options.constrain)
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list")
(rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
else:
suites = ""
#
# suite_list will either a single test suite name that the user has
# indicated she wants to run or a list of test suites provided by
# the test-runner possibly according to user provided constraints.
# We go through the trouble of setting up the parallel execution
# even in the case of a single suite to avoid having two process the
# results in two different places.
#
suite_list = suites.split('\n')
#
# Performance tests should only be run when they are requested,
# i.e. they are not run by default in test.py.
#
if options.constrain != 'performance':
# Get a list of all of the performance tests.
path_cmd = os.path.join("utils", test_runner_name + " --print-test-name-list --test-type=%s" % "performance")
(rc, performance_tests, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False)
performance_test_list = performance_tests.split('\n')
# Remove any performance tests from the suites list.
for performance_test in performance_test_list:
if performance_test in suite_list:
suite_list.remove(performance_test)
# We now have a possibly large number of test suites to run, so we want to
# run them in parallel. We're going to spin up a number of worker threads
# that will run our test jobs for us.
#
input_queue = Queue.Queue(0)
output_queue = Queue.Queue(0)
jobs = 0
threads=[]
#
# In Python 2.6 you can just use multiprocessing module, but we don't want
# to introduce that dependency yet; so we jump through a few hoops.
#
processors = 1
if sys.platform != "win32":
if 'SC_NPROCESSORS_ONLN'in os.sysconf_names:
processors = os.sysconf('SC_NPROCESSORS_ONLN')
else:
proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_results, stderr_results = proc.communicate()
if len(stderr_results) == 0:
processors = int(stdout_results)
#
# Now, spin up one thread per processor which will eventually mean one test
# per processor running concurrently.
#
for i in range(processors):
thread = worker_thread(input_queue, output_queue)
threads.append(thread)
thread.start()
#
# Keep track of some summary statistics
#
total_tests = 0
skipped_tests = 0
#
# We now have worker threads spun up, and a list of work to do. So, run
# through the list of test suites and dispatch a job to run each one.
#
# Dispatching will run with unlimited speed and the worker threads will
# execute as fast as possible from the queue.
#
# Note that we actually dispatch tests to be skipped, so all of the
# PASS, FAIL, CRASH and SKIP processing is done in the same place.
#
for test in suite_list:
test = test.strip()
if len(test):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test))
job.set_cwd(os.getcwd())
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
if (options.multiple):
multiple = ""
else:
multiple = " --stop-on-failure"
if (len(options.fullness)):
fullness = options.fullness.upper()
fullness = " --fullness=%s" % fullness
else:
fullness = " --fullness=QUICK"
path_cmd = os.path.join("utils", test_runner_name + " --test-name=%s%s%s" % (test, multiple, fullness))
job.set_shell_command(path_cmd)
if options.valgrind and test in core_valgrind_skip_tests:
job.set_is_skip(True)
# Skip tests that will fail if NSC is missing.
if not NSC_ENABLED and test in core_nsc_missing_skip_tests:
job.set_is_skip(True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# We've taken care of the discovered or specified test suites. Now we
# have to deal with examples run as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied. For example, if an example depends
# on NSC being configured by waf, that example should have a condition
# that evaluates to true if NSC is enabled. For example,
#
# ("tcp-nsc-zoo", "NSC_ENABLED == True"),
#
# In this case, the example "tcp-nsc-zoo" will only be run if we find the
# waf configuration variable "NSC_ENABLED" to be True.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# XXX As it stands, all of the trace files have unique names, and so file
# collisions can only happen if two instances of an example are running in
# two versions of the test.py process concurrently. We may want to create
# uniquely named temporary traces directories to avoid this problem.
#
# We need to figure out what examples to execute. We are either given one
# suite or example explicitly via the --suite or --example option, or we
# need to walk the list of examples looking for available example
# conditions.
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py: run all of the examples
# ./test.py --constrain=unit run no examples
# ./test.py --constrain=example run all of the examples
# ./test.py --suite=some-test-suite: run no examples
# ./test.py --example=some-example: run the single example
# ./test.py --suite=some-suite --example=some-example: run the single example
#
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "example":
if ENABLE_EXAMPLES:
for test, do_run, do_valgrind_run in example_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if ns3_runnable_programs_dictionary.has_key(test_name):
if eval(do_run):
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path(options.buildpath)
if options.valgrind and not eval(do_valgrind_run):
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.example):
# Add the proper prefix and suffix to the example name to
# match what is done in the wscript file.
example_name = "%s%s-%s%s" % (APPNAME, VERSION, options.example, BUILD_PROFILE_SUFFIX)
# Don't try to run this example if it isn't runnable.
if not ns3_runnable_programs_dictionary.has_key(example_name):
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run an example, I will try and run the example
# irrespective of any condition.
#
example_path = ns3_runnable_programs_dictionary[example_name]
example_path = os.path.abspath(example_path)
job = Job()
job.set_is_example(True)
job.set_is_pyexample(False)
job.set_display_name(example_path)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(example_path)
job.set_build_path(options.buildpath)
if options.verbose:
print "Queue %s" % example_name
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Run some Python examples as smoke tests. We have a list of all of
# the example programs it makes sense to try and run. Each example will
# have a condition associated with it that must evaluate to true for us
# to try and execute it. This is used to determine if the example has
# a dependency that is not satisfied.
#
# We don't care at all how the trace files come out, so we just write them
# to a single temporary directory.
#
# We need to figure out what python examples to execute. We are either
# given one pyexample explicitly via the --pyexample option, or we
# need to walk the list of python examples
#
# This translates into allowing the following options with respect to the
# suites
#
# ./test.py --constrain=pyexample run all of the python examples
# ./test.py --pyexample=some-example.py: run the single python example
#
if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0:
if len(options.constrain) == 0 or options.constrain == "pyexample":
if ENABLE_EXAMPLES:
for test, do_run in python_tests:
# Remove any arguments and directory names from test.
test_name = test.split(' ', 1)[0]
test_name = os.path.basename(test_name)
# Don't try to run this example if it isn't runnable.
if test_name in ns3_runnable_scripts:
if eval(do_run):
job = Job()
job.set_is_example(False)
job.set_is_pyexample(True)
job.set_display_name(test)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(test)
job.set_build_path("")
#
# Python programs and valgrind do not work and play
# well together, so we skip them under valgrind.
# We go through the trouble of doing all of this
# work to report the skipped tests in a consistent
# way throught the output formatter.
#
if options.valgrind:
job.set_is_skip (True)
#
# The user can disable python bindings, so we need
# to pay attention to that and give some feedback
# that we're not testing them
#
if not ENABLE_PYTHON_BINDINGS:
job.set_is_skip (True)
if options.verbose:
print "Queue %s" % test
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
elif len(options.pyexample):
# Don't try to run this example if it isn't runnable.
example_name = os.path.basename(options.pyexample)
if example_name not in ns3_runnable_scripts:
print "Example %s is not runnable." % example_name
else:
#
# If you tell me to run a python example, I will try and run the example
# irrespective of any condition.
#
job = Job()
job.set_is_pyexample(True)
job.set_display_name(options.pyexample)
job.set_tmp_file_name("")
job.set_cwd(testpy_output_dir)
job.set_basedir(os.getcwd())
job.set_tempdir(testpy_output_dir)
job.set_shell_command(options.pyexample)
job.set_build_path("")
if options.verbose:
print "Queue %s" % options.pyexample
input_queue.put(job)
jobs = jobs + 1
total_tests = total_tests + 1
#
# Tell the worker threads to pack up and go home for the day. Each one
# will exit when they see their is_break task.
#
for i in range(processors):
job = Job()
job.set_is_break(True)
input_queue.put(job)
#
# Now all of the tests have been dispatched, so all we have to do here
# in the main thread is to wait for them to complete. Keyboard interrupt
# handling is broken as mentioned above. We use a signal handler to catch
# sigint and set a global variable. When the worker threads sense this
# they stop doing real work and will just start throwing jobs back at us
# with is_break set to True. In this case, there are no real results so we
# ignore them. If there are real results, we always print PASS or FAIL to
# standard out as a quick indication of what happened.
#
passed_tests = 0
failed_tests = 0
crashed_tests = 0
valgrind_errors = 0
for i in range(jobs):
job = output_queue.get()
if job.is_break:
continue
if job.is_example or job.is_pyexample:
kind = "Example"
else:
kind = "TestSuite"
if job.is_skip:
status = "SKIP"
skipped_tests = skipped_tests + 1
else:
if job.returncode == 0:
status = "PASS"
passed_tests = passed_tests + 1
elif job.returncode == 1:
failed_tests = failed_tests + 1
status = "FAIL"
elif job.returncode == 2:
valgrind_errors = valgrind_errors + 1
status = "VALGR"
else:
crashed_tests = crashed_tests + 1
status = "CRASH"
if options.duration or options.constrain == "performance":
print "%s (%.3f): %s %s" % (status, job.elapsed_time, kind, job.display_name)
else:
print "%s: %s %s" % (status, kind, job.display_name)
if job.is_example or job.is_pyexample:
#
# Examples are the odd man out here. They are written without any
# knowledge that they are going to be run as a test, so we need to
# cook up some kind of output for them. We're writing an xml file,
# so we do some simple XML that says we ran the example.
#
# XXX We could add some timing information to the examples, i.e. run
# them through time and print the results here.
#
f = open(xml_results_file, 'a')
f.write('<Example>\n')
example_name = " <Name>%s</Name>\n" % job.display_name
f.write(example_name)
if status == "PASS":
f.write(' <Result>PASS</Result>\n')
elif status == "FAIL":
f.write(' <Result>FAIL</Result>\n')
elif status == "VALGR":
f.write(' <Result>VALGR</Result>\n')
elif status == "SKIP":
f.write(' <Result>SKIP</Result>\n')
else:
f.write(' <Result>CRASH</Result>\n')
f.write(' <Time real="%.3f"/>\n' % job.elapsed_time)
f.write('</Example>\n')
f.close()
else:
#
# If we're not running an example, we're running a test suite.
# These puppies are running concurrently and generating output
# that was written to a temporary file to avoid collisions.
#
# Now that we are executing sequentially in the main thread, we can
# concatenate the contents of the associated temp file to the main
# results file and remove that temp file.
#
# One thing to consider is that a test suite can crash just as
# well as any other program, so we need to deal with that
# possibility as well. If it ran correctly it will return 0
# if it passed, or 1 if it failed. In this case, we can count
# on the results file it saved being complete. If it crashed, it
# will return some other code, and the file should be considered
# corrupt and useless. If the suite didn't create any XML, then
# we're going to have to do it ourselves.
#
# Another issue is how to deal with a valgrind error. If we run
# a test suite under valgrind and it passes, we will get a return
# code of 0 and there will be a valid xml results file since the code
# ran to completion. If we get a return code of 1 under valgrind,
# the test case failed, but valgrind did not find any problems so the
# test case return code was passed through. We will have a valid xml
# results file here as well since the test suite ran. If we see a
# return code of 2, this means that valgrind found an error (we asked
# it to return 2 if it found a problem in run_job_synchronously) but
# the suite ran to completion so there is a valid xml results file.
# If the suite crashes under valgrind we will see some other error
# return code (like 139). If valgrind finds an illegal instruction or
# some other strange problem, it will die with its own strange return
# code (like 132). However, if the test crashes by itself, not under
# valgrind we will also see some other return code.
#
# If the return code is 0, 1, or 2, we have a valid xml file. If we
# get another return code, we have no xml and we can't really say what
# happened -- maybe the TestSuite crashed, maybe valgrind crashed due
# to an illegal instruction. If we get something beside 0-2, we assume
# a crash and fake up an xml entry. After this is all done, we still
# need to indicate a valgrind error somehow, so we fake up an xml entry
# with a VALGR result. Thus, in the case of a working TestSuite that
# fails valgrind, we'll see the PASS entry for the working TestSuite
# followed by a VALGR failing test suite of the same name.
#
if job.is_skip:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>SKIP</Result>\n')
f.write("</Test>\n")
f.close()
else:
if job.returncode == 0 or job.returncode == 1 or job.returncode == 2:
f_to = open(xml_results_file, 'a')
f_from = open(job.tmp_file_name)
f_to.write(f_from.read())
f_to.close()
f_from.close()
else:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>CRASH</Suite>\n')
f.write("</Test>\n")
f.close()
if job.returncode == 2:
f = open(xml_results_file, 'a')
f.write("<Test>\n")
f.write(" <Name>%s</Name>\n" % job.display_name)
f.write(' <Result>VALGR</Result>\n')
f.write("</Test>\n")
f.close()
#
# We have all of the tests run and the results written out. One final
# bit of housekeeping is to wait for all of the threads to close down
# so we can exit gracefully.
#
for thread in threads:
thread.join()
#
# Back at the beginning of time, we started the body of an XML document
# since the test suites and examples were going to just write their
# individual pieces. So, we need to finish off and close out the XML
# document
#
f = open(xml_results_file, 'a')
f.write('</Results>\n')
f.close()
#
# Print a quick summary of events
#
print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests,
total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors)
#
# The last things to do are to translate the XML results file to "human
# readable form" if the user asked for it (or make an XML file somewhere)
#
if len(options.html):
translate_to_html(xml_results_file, options.html)
if len(options.text):
translate_to_text(xml_results_file, options.text)
if len(options.xml):
shutil.copyfile(xml_results_file, options.xml)
#
# Let the user know if they need to turn on tests or examples.
#
if not ENABLE_TESTS or not ENABLE_EXAMPLES:
print
if not ENABLE_TESTS:
print '*** Note: ns-3 tests are currently disabled. Enable them by adding'
print '*** "--enable-tests" to ./waf configure or modifying your .ns3rc file.'
print
if not ENABLE_EXAMPLES:
print '*** Note: ns-3 examples are currently disabled. Enable them by adding'
print '*** "--enable-examples" to ./waf configure or modifying your .ns3rc file.'
print
#
# Let the user know if they tried to use valgrind but it was not
# present on their machine.
#
if options.valgrind and not VALGRIND_FOUND:
print
print '*** Note: you are trying to use valgrind, but valgrind could not be found'
print '*** on your machine. All tests and examples will crash or be skipped.'
print
#
# If we have been asked to retain all of the little temporary files, we
# don't delete tm. If we do delete the temporary files, delete only the
# directory we just created. We don't want to happily delete any retained
# directories, which will probably surprise the user.
#
if not options.retain:
shutil.rmtree(testpy_output_dir)
if passed_tests + skipped_tests == total_tests:
return 0 # success
else:
return 1 # catchall for general errors
def main(argv):
parser = optparse.OptionParser()
parser.add_option("-b", "--buildpath", action="store", type="string", dest="buildpath", default="",
metavar="BUILDPATH",
help="specify the path where ns-3 was built (defaults to the build directory for the current variant)")
parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="",
metavar="KIND",
help="constrain the test-runner by kind of test")
parser.add_option("-d", "--duration", action="store_true", dest="duration", default=False,
help="print the duration of each test suite and example")
parser.add_option("-e", "--example", action="store", type="string", dest="example", default="",
metavar="EXAMPLE",
help="specify a single example to run (no relative path is needed)")
parser.add_option("-u", "--update-data", action="store_true", dest="update_data", default=False,
help="If examples use reference data files, get them to re-generate them")
parser.add_option("-f", "--fullness", action="store", type="string", dest="fullness", default="QUICK",
metavar="FULLNESS",
help="choose the duration of tests to run: QUICK, EXTENSIVE, or TAKES_FOREVER, where EXTENSIVE includes QUICK and TAKES_FOREVER includes QUICK and EXTENSIVE (only QUICK tests are run by default)")
parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False,
help="run the test suites and examples using valgrind")
parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False,
help="print the kinds of tests available")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False,
help="print the list of known tests")
parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False,
help="report multiple failures from test suites and test cases")
parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False,
help="do not run waf before starting testing")
parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="",
metavar="PYEXAMPLE",
help="specify a single python example to run (with relative path)")
parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False,
help="retain all temporary files (which are normally deleted)")
parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="",
metavar="TEST-SUITE",
help="specify a single test suite to run")
parser.add_option("-t", "--text", action="store", type="string", dest="text", default="",
metavar="TEXT-FILE",
help="write detailed test results into TEXT-FILE.txt")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="print progress and informational messages")
parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="",
metavar="HTML-FILE",
help="write detailed test results into HTML-FILE.html")
parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="",
metavar="XML-FILE",
help="write detailed test results into XML-FILE.xml")
global options
options = parser.parse_args()[0]
signal.signal(signal.SIGINT, sigint_hook)
return run_tests()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 | -8,879,720,097,523,576,000 | 39.474161 | 218 | 0.576649 | false |
urandu/gumbo-parser | python/gumbo/gumboc.py | 1 | 12165 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CTypes bindings for the Gumbo HTML5 parser.
This exports the raw interface of the library as a set of very thin ctypes
wrappers. It's intended to be wrapped by other libraries to provide a more
Pythonic API.
"""
__author__ = '[email protected] (Jonathan Tang)'
import sys
import contextlib
import ctypes
import os.path
import gumboc_tags
_name_of_lib = 'libgumbo.so'
if sys.platform.startswith('darwin'):
_name_of_lib = 'libgumbo.dylib'
elif sys.platform.startswith('win'):
_name_of_lib = "gumbo.dll"
try:
# First look for a freshly-built .so in the .libs directory, for development.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), '..', '..', '.libs', _name_of_lib))
except OSError:
# PyPI or setuptools install, look in the current directory.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), _name_of_lib))
except OSError:
# System library, on unix or mac osx
_dll = ctypes.cdll.LoadLibrary(_name_of_lib)
# Some aliases for common types.
_bitvector = ctypes.c_uint
_Ptr = ctypes.POINTER
class EnumMetaclass(type(ctypes.c_uint)):
def __new__(metaclass, name, bases, cls_dict):
cls = type(ctypes.c_uint).__new__(metaclass, name, bases, cls_dict)
if name == 'Enum':
return cls
try:
for i, value in enumerate(cls_dict['_values_']):
setattr(cls, value, cls.from_param(i))
except KeyError:
raise ValueError('No _values_ list found inside enum type.')
except TypeError:
raise ValueError('_values_ must be a list of names of enum constants.')
return cls
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
@with_metaclass(EnumMetaclass)
class Enum(ctypes.c_uint):
@classmethod
def from_param(cls, param):
if isinstance(param, Enum):
if param.__class__ != cls:
raise ValueError("Can't mix enums of different types")
return param
if param < 0 or param > len(cls._values_):
raise ValueError('%d is out of range for enum type %s; max %d.' %
(param, cls.__name__, len(cls._values_)))
return cls(param)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __hash__(self):
return hash(self.value)
def __repr__(self):
try:
return self._values_[self.value]
except IndexError:
raise IndexError('Value %d is out of range for %r' %
(self.value, self._values_))
class StringPiece(ctypes.Structure):
_fields_ = [
('data', _Ptr(ctypes.c_char)),
('length', ctypes.c_size_t),
]
def __len__(self):
return self.length
def __str__(self):
return ctypes.string_at(self.data, self.length)
class SourcePosition(ctypes.Structure):
_fields_ = [
('line', ctypes.c_uint),
('column', ctypes.c_uint),
('offset', ctypes.c_uint)
]
SourcePosition.EMPTY = SourcePosition.in_dll(_dll, 'kGumboEmptySourcePosition')
class AttributeNamespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/1999/xlink',
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2000/xmlns',
]
_values_ = ['NONE', 'XLINK', 'XML', 'XMLNS']
def to_url(self):
return self.URLS[self.value]
class Attribute(ctypes.Structure):
_fields_ = [
('namespace', AttributeNamespace),
('name', ctypes.c_char_p),
('original_name', StringPiece),
('value', ctypes.c_char_p),
('original_value', StringPiece),
('name_start', SourcePosition),
('name_end', SourcePosition),
('value_start', SourcePosition),
('value_end', SourcePosition)
]
class Vector(ctypes.Structure):
_type_ = ctypes.c_void_p
_fields_ = [
('data', _Ptr(ctypes.c_void_p)),
('length', ctypes.c_uint),
('capacity', ctypes.c_uint)
]
class Iter(object):
def __init__(self, vector):
self.current = 0
self.vector = vector
def __iter__(self):
return self
def __next__(self):
# Python 3
if self.current >= self.vector.length:
raise StopIteration
obj = self.vector[self.current]
self.current += 1
return obj
def next(self):
# Python 2
return self.__next__()
def __len__(self):
return self.length
def __getitem__(self, i):
try:
# Python 2
numeric_types = (int, long)
except NameError:
# Python 3
numeric_types = int
if isinstance(i, numeric_types):
if i < 0:
i += self.length
if i > self.length:
raise IndexError
array_type = _Ptr(_Ptr(self._type_))
return ctypes.cast(self.data, array_type)[i].contents
return list(self)[i]
def __iter__(self):
return Vector.Iter(self)
Vector.EMPTY = Vector.in_dll(_dll, 'kGumboEmptyVector')
class AttributeVector(Vector):
_type_ = Attribute
class NodeVector(Vector):
# _type_ assigned later, to avoid circular references with Node
pass
class QuirksMode(Enum):
_values_ = ['NO_QUIRKS', 'QUIRKS', 'LIMITED_QUIRKS']
class Document(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('has_doctype', ctypes.c_bool),
('name', ctypes.c_char_p),
('public_identifier', ctypes.c_char_p),
('system_identifier', ctypes.c_char_p),
('doc_type_quirks_mode', QuirksMode),
]
def __repr__(self):
return 'Document'
class Namespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/2000/svg',
'http://www.w3.org/1998/Math/MathML',
]
_values_ = ['HTML', 'SVG', 'MATHML']
def to_url(self):
return self.URLS[self.value]
class Tag(Enum):
@staticmethod
def from_str(tagname):
text_ptr = ctypes.c_char_p(tagname.encode('utf-8'))
return _tag_enum(text_ptr)
_values_ = gumboc_tags.TagNames + ['UNKNOWN', 'LAST']
class Element(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('tag', Tag),
('tag_namespace', Namespace),
('original_tag', StringPiece),
('original_end_tag', StringPiece),
('start_pos', SourcePosition),
('end_pos', SourcePosition),
('attributes', AttributeVector),
]
@property
def tag_name(self):
original_tag = StringPiece.from_buffer_copy(self.original_tag)
_tag_from_original_text(ctypes.byref(original_tag))
if self.tag_namespace == Namespace.SVG:
svg_tagname = _normalize_svg_tagname(ctypes.byref(original_tag))
if svg_tagname is not None:
return str(svg_tagname)
if self.tag == Tag.UNKNOWN:
if original_tag.data is None:
return ''
return str(original_tag).lower()
return _tagname(self.tag)
def __repr__(self):
return ('<%r>\n' % self.tag +
'\n'.join(repr(child) for child in self.children) +
'</%r>' % self.tag)
class Text(ctypes.Structure):
_fields_ = [
('text', ctypes.c_char_p),
('original_text', StringPiece),
('start_pos', SourcePosition)
]
def __repr__(self):
return 'Text(%r)' % self.text
class NodeType(Enum):
_values_ = ['DOCUMENT', 'ELEMENT', 'TEXT', 'CDATA',
'COMMENT', 'WHITESPACE', 'TEMPLATE']
class NodeUnion(ctypes.Union):
_fields_ = [
('document', Document),
('element', Element),
('text', Text),
]
class Node(ctypes.Structure):
# _fields_ set later to avoid a circular reference
def _contents(self):
# Python3 enters an infinite loop if you use an @property within
# __getattr__, so we factor it out to a helper.
if self.type == NodeType.DOCUMENT:
return self.v.document
elif self.type in (NodeType.ELEMENT, NodeType.TEMPLATE):
return self.v.element
else:
return self.v.text
@property
def contents(self):
return self._contents()
def __getattr__(self, name):
return getattr(self._contents(), name)
def __setattr__(self, name, value):
return setattr(self._contents(), name, value)
def __repr__(self):
return repr(self.contents)
Node._fields_ = [
('type', NodeType),
# Set the type to Node later to avoid a circular dependency.
('parent', _Ptr(Node)),
('next', _Ptr(Node)),
('prev', _Ptr(Node)),
('index_within_parent', ctypes.c_size_t),
# TODO(jdtang): Make a real list of enum constants for this.
('parse_flags', _bitvector),
('v', NodeUnion)
]
NodeVector._type_ = Node
class Options(ctypes.Structure):
_fields_ = [
# TODO(jdtang): Allow the Python API to set the allocator/deallocator
# function. Right now these are treated as opaque void pointers.
('allocator', ctypes.c_void_p),
('deallocator', ctypes.c_void_p),
('userdata', ctypes.c_void_p),
('tab_stop', ctypes.c_int),
('stop_on_first_error', ctypes.c_bool),
('max_errors', ctypes.c_int),
]
class Output(ctypes.Structure):
_fields_ = [
('document', _Ptr(Node)),
('root', _Ptr(Node)),
# TODO(jdtang): Error type.
('errors', Vector),
]
@contextlib.contextmanager
def parse(text, **kwargs):
options = Options()
context_tag = kwargs.get('container', Tag.LAST)
context_namespace = kwargs.get('container_namespace', Namespace.HTML)
for field_name, _ in Options._fields_:
try:
setattr(options, field_name, kwargs[field_name])
except KeyError:
setattr(options, field_name, getattr(_DEFAULT_OPTIONS, field_name))
# We have to manually take a reference to the input text here so that it
# outlives the parse output. If we let ctypes do it automatically on function
# call, it creates a temporary buffer which is destroyed when the call
# completes, and then the original_text pointers point into invalid memory.
text_ptr = ctypes.c_char_p(text.encode('utf-8'))
output = _parse_fragment(
ctypes.byref(options), text_ptr, len(text),
context_tag, context_namespace)
try:
yield output
finally:
_destroy_output(ctypes.byref(options), output)
_DEFAULT_OPTIONS = Options.in_dll(_dll, 'kGumboDefaultOptions')
_parse_with_options = _dll.gumbo_parse_with_options
_parse_with_options.argtypes = [_Ptr(Options), ctypes.c_char_p, ctypes.c_size_t]
_parse_with_options.restype = _Ptr(Output)
_parse_fragment = _dll.gumbo_parse_fragment
_parse_fragment.argtypes = [
_Ptr(Options), ctypes.c_char_p, ctypes.c_size_t, Tag, Namespace]
_parse_fragment.restype = _Ptr(Output)
_tag_from_original_text = _dll.gumbo_tag_from_original_text
_tag_from_original_text.argtypes = [_Ptr(StringPiece)]
_tag_from_original_text.restype = None
_normalize_svg_tagname = _dll.gumbo_normalize_svg_tagname
_normalize_svg_tagname.argtypes = [_Ptr(StringPiece)]
_normalize_svg_tagname.restype = ctypes.c_char_p
_destroy_output = _dll.gumbo_destroy_output
_destroy_output.argtypes = [_Ptr(Options), _Ptr(Output)]
_destroy_output.restype = None
_tagname = _dll.gumbo_normalized_tagname
_tagname.argtypes = [Tag]
_tagname.restype = ctypes.c_char_p
_tag_enum = _dll.gumbo_tag_enum
_tag_enum.argtypes = [ctypes.c_char_p]
_tag_enum.restype = Tag
__all__ = ['StringPiece', 'SourcePosition', 'AttributeNamespace', 'Attribute',
'Vector', 'AttributeVector', 'NodeVector', 'QuirksMode', 'Document',
'Namespace', 'Tag', 'Element', 'Text', 'NodeType', 'Node',
'Options', 'Output', 'parse']
| apache-2.0 | 2,796,645,549,917,822,500 | 27.159722 | 80 | 0.63469 | false |
T3ddy-Bear/PiChat | requests/models.py | 1 | 33804 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
import sys
from io import UnsupportedOperation
from ._internal_utils import to_native_string, unicode_is_ascii
from .auth import HTTPBasicAuth
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .hooks import default_hooks
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .status_codes import codes
from .structures import CaseInsensitiveDict
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
try:
from .packages import idna
except ImportError:
# tolerate the possibility of downstream repackagers unvendoring `requests`
# For more information, read: packages/__init__.py
import idna
sys.modules['requests.packages.idna'] = idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, collections.Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| apache-2.0 | 8,118,881,208,333,657,000 | 35.192719 | 119 | 0.582475 | false |
aldarionsevero/datalogger-ROS-rasp | sensors/botbook_mcp3002.py | 1 | 2047 | # botbook_mcp3002.py - read analog values from mcp3002
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
# Installing spidev:
# sudo apt-get update
# sudo apt-get -y install git python-dev
# git clone https://github.com/doceme/py-spidev.git
# cd py-spidev/
# sudo python setup.py install
import spidev # installation help in botbook_mcp3002.py comments
import time
def readAnalog(device=0, channel=0):
assert device in (1, 0)
assert channel in (1, 0)
# open spi
spi = spidev.SpiDev()
spi.open(0, device)
"""
Protocol start bit (S), sql/diff (D), odd/sign (C), MSBF (M)
Use leading zero for more stable clock cycle
0000 000S DCM0 0000 0000 0000
Sending 3 8bit packages so xpi.xfer2 will return the same amount.
start bit = 1
sql/diff = 1 SINGLE ENDED MODE (2 channel mode)
odd/sign = channel 0/1
MSBF = 0
"""
command = [1, (2 + channel) << 6, 0]
# 2 + channel shifted 6 to left
# 10 or 11 << 6 = 1000 0000 or 1100 0000
reply = spi.xfer2(command)
"""
Parse right bits from 24 bit package (3*8bit)
We need only data from last 2 bytes.
And there we can discard last two bits to get 10 bit value
as MCP3002 resolution is 10bits
Discard reply[0] byte and start from reply[1] where our data starts
"""
value = reply[1] & 31
# 31 = 0001 1111 with & operation makes sure that we have all data from
# XXXX DDDD and nothing more. 0001 is for signed in next operation.
value = value << 6 # Move to left to make room for next piece of data.
# 000D DDDD << 6 = 0DDD DD00 0000
# Now we get the last of data from reply[2]
value = value + (reply[2] >> 2)
# Here we discard last to bits
# DDDD DDXXX >> 2 = 00DD DDDD
# 0DDD DD00 0000 + 00DD DDDD = 0DDD DDDD DDDD
spi.close()
return value
def main():
# read channel 0 on device 0
value = readAnalog(0, 0)
print value
time.sleep(10)
if __name__ == "__main__":
main()
| mit | -913,414,466,716,447,900 | 30.492063 | 75 | 0.622374 | false |
kasahorow/kwl | kwl2text/parser_test.py | 1 | 15858 | #!/usr/bin/env python
# coding: utf-8
import kwl2text
import semantics as s
import unittest
class KWLTest(unittest.TestCase):
def setUp(self):
self.psr = kwl2text.kwl2textParser()
self.sem = s.Semantics()
self.maxDiff = None
self.adj = 'adj:red'
self.nom = 'nom:food'
def testToken(self):
alpha = 'abc'
number = '12'
sem_alpha = {'t': 'alpha', 'v': 'abc'}
sem_number = {'t': 'number', 'v': '12'}
self.assertEquals(number,
self.psr.parse(number, rule_name='token',
parseinfo=True))
self.assertEquals("abc",
self.psr.parse(alpha, rule_name='token',
parseinfo=True))
self.assertEquals(number,
self.psr.parse(number, rule_name='token',
parseinfo=True))
self.assertEquals("abc",
self.psr.parse(alpha, rule_name='token',
parseinfo=True))
# Test Semantics
self.assertEquals(sem_number,
self.psr.parse(number, rule_name='token',
semantics = self.sem,
parseinfo=True))
self.assertEquals(sem_alpha,
self.psr.parse(alpha, rule_name='token',
semantics = self.sem,
parseinfo=True))
def testWord(self):
noun = 'nom:dog'
verb = 'act:love'
pronoun = 'pro:we'
semantic_noun = {'t': 'nom', 'v': {'t': 'alpha', 'v': u'dog'} }
semantic_verb = {'t': 'act', 'v': {'t': 'alpha', 'v': u'love'} }
semantic_pronoun = {'t': 'pro', 'v': {'t': 'alpha', 'v': u'we'} }
self.assertEquals(semantic_noun,
self.psr.parse(noun, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_verb,
self.psr.parse(verb, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_pronoun,
self.psr.parse(pronoun, rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testFormatting(self):
defn = 'defn(%s)' % self.adj
sample = 'sample(%s)' % self.nom
quote = 'quote(raw(1 2 3))'
semantic_defn = {'t': u'defn', 'v': {'t': u'adj', 'v': {'t': 'alpha', 'v': u'red'}}}
semantic_sample = {'t': u'sample', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}
semantic_quote = {'t': u'quote', 'v': {'t': u'raw', 'v': u'1 2 3'}}
self.assertEquals(semantic_defn,
self.psr.parse(defn, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_sample,
self.psr.parse(sample, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_quote,
self.psr.parse(quote, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
def testPhrase(self):
adj = self.adj
nom = self.nom
pre = 'pre:for'
act = 'act:love'
adj_nom = 'adj:good_nom:dog;'
pos_nom = 'pos:his_nom:country'
det_adj_nom = 'det:the(adj:good_nom:dog)'
conjugation = 'ydy(tu(act:walk))'
semantic_adj = {'t': u'adj', 'v': {'t': 'alpha', 'v': u'red'}}
semantic_nom = {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}
semantic_pre = {'t': u'pre', 'v': {'t': 'alpha', 'v': u'for'}}
semantic_act = {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}
semantic_adj_nom = {'t': 'adj_nom', 'v': [{'t': u'adj', 'v': {'t': 'alpha', 'v': u'good'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'dog'}}]}
semantic_pos_nom = {'t': 'pos_nom', 'v': [{'t': u'pos', 'v': {'t': 'alpha', 'v': u'his'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'country'}}]}
semantic_det_adj_nom = {'t': 'det_adj_nom',
'v': [
{'t': u'det', 'v': {'t': 'alpha', 'v': u'the'}},
{'t': 'adj_nom', 'v': [{'t': u'adj', 'v': {'t': 'alpha', 'v': u'good'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'dog'}}]}]}
semantic_conjugation = {'t': u'ydy',
'v': {'t': u'tu',
'v': {'t': u'act',
'v': {'t': 'alpha', 'v': u'walk'}}}}
self.assertEquals(semantic_adj,
self.psr.parse(adj, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_nom,
self.psr.parse(nom, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_pre,
self.psr.parse(pre, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_act,
self.psr.parse(act, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_adj_nom,
self.psr.parse(adj_nom, 'expression')) # as in "good dog"
self.assertEquals(semantic_pos_nom,
self.psr.parse(pos_nom, 'expression')) # as in "his country"
self.assertEquals(semantic_det_adj_nom,
self.psr.parse(det_adj_nom, 'expression')) # as in "the good dog"
self.assertEquals(semantic_conjugation,
self.psr.parse(conjugation, 'expression')) # as in "walked"
def testExpression(self):
subject_verb = 'pro:I act:love'
verb_object = 'act:love nom:food'
subject_verb_object = 'pro:I act:love nom:food'
semantic_s_v = {'t': 's_v',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}}]}
semantic_v_o = {'t': 'v_o',
'v': [{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}
semantic_s_v_o = {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}
self.assertEquals(semantic_s_v,
self.psr.parse(subject_verb, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_v_o,
self.psr.parse(verb_object, rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_s_v_o,
self.psr.parse(subject_verb_object, rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testSentence(self):
statement = 'pro:I act:love nom:food.'
question = 'pro:I act:love nom:food?'
command = 'pro:I act:love nom:food!'
semantic_statement = {'t': 'statement', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
semantic_question = {'t': 'question', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
semantic_command = {'t': 'command', 'v': {'t': 's_v_o',
'v': [{'t': 'subject', 'v': {'t': u'pro', 'v': {'t': 'alpha', 'v': u'I'}}},
{'t': 'verb', 'v': {'t': u'act', 'v': {'t': 'alpha', 'v': u'love'}}},
{'t': 'object', 'v': {'t': u'nom', 'v': {'t': 'alpha', 'v': u'food'}}}]}}
self.assertEquals(semantic_statement,
{'t': 'statement', 'v':self.psr.parse(statement.replace('.', ''), rule_name='sentence',
semantics = self.sem,
parseinfo=True)})
self.assertEquals(semantic_statement,
self.psr.parse(statement, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_question,
self.psr.parse(question, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
self.assertEquals(semantic_command,
self.psr.parse(command, rule_name='sentence',
semantics = self.sem,
parseinfo=True))
def testKWL2Text(self):
story = open('test.kwl', 'r').read()
t = self.psr.parse(story, rule_name='kwl2text',
semantics = self.sem,
parseinfo=True)
self.assertEquals(len(story.split(';')), 1 + len(t['v'])) # Contains N KWL sentences
def testDate(self):
self.assertEquals({'t':'date', 'v': '2015-11-11'},
self.psr.parse("date(2015-11-11)", rule_name='raw',
semantics = self.sem,
parseinfo=True))
def testRaw(self):
self.assertEquals('raw(dog)',
self.psr.parse("raw(dog)", rule_name='raw',
parseinfo=True))
self.assertEquals('raw(Obuor)',
self.psr.parse("raw(Obuor)", rule_name='raw',
parseinfo=True))
self.assertEquals('raw(13:4)',
self.psr.parse("raw(13:4)", rule_name='raw',
parseinfo=True))
self.assertEquals({'t':'raw', 'v': '13:4'},
self.psr.parse("raw(13:4)", rule_name='raw',
semantics = self.sem,
parseinfo=True))
self.assertEquals({'t': 'raw', 'v': u'13:4'},
self.psr.parse("raw(13:4)", rule_name='expression',
semantics = self.sem,
parseinfo=True))
self.assertEquals({'t': 'nom_raw', 'v': [{'t': 'nom', 'v': {'t': 'alpha', 'v': u'dog'}}, {'t': u'raw', 'v': u'13:4'}]},
self.psr.parse("nom:dog_raw(13:4)", rule_name='expression',
semantics = self.sem,
parseinfo=True))
def testForcedGrouping(self):
kwl_text = 'nom:wealth'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{ nom:wealth }'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
self.assertEquals(p1, p2)
kwl_text = 'nom:wealth and nom:happiness'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{nom:wealth and nom:happiness}'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
self.assertEquals(p1, p2)
kwl_text = 'pro:it tdy(il(act:bring)) nom:wealth'
p1 = self.psr.parse(kwl_text, rule_name='expression', semantics=self.sem)
kwl_text2 = '{pro:it} tdy(il(act:bring)) nom:wealth'
p2 = self.psr.parse(kwl_text2, rule_name='expression', semantics=self.sem)
#print '\nP2 = ', p2
self.assertEquals(p1, p2)
kwl_text3 = 'pro:it {tdy(il(act:bring))} nom:wealth'
p3 = self.psr.parse(kwl_text3, rule_name='expression', semantics=self.sem)
#print '\nP3 = ', p3
self.assertEquals(p1, p3)
kwl_text4 = 'pro:it tdy(il(act:bring)) {nom:wealth}'
p4 = self.psr.parse(kwl_text4, rule_name='expression', semantics=self.sem)
#print '\nP4 = ', p4
self.assertEquals(p1, p4)
kwl_text5 = '{pro:it} {tdy(il(act:bring))} {nom:wealth}'
p5 = self.psr.parse(kwl_text5, rule_name='expression', semantics=self.sem)
#print '\nP5 = ', p5
self.assertEquals(p1, p5)
kwl_text6 = 'pro:you tdy(il(act:bring)) {nom:wealth and nom:happiness}'
p6 = self.psr.parse(kwl_text6, rule_name='expression', semantics=self.sem)
#print '\nP6 = ', kwl_text6, p6
kwl_text7 = 'pro:you {tdy(il(act:go)) and tdy(il(act:bring))} {nom:wealth and nom:happiness}'
p7 = self.psr.parse(kwl_text7, rule_name='expression', semantics=self.sem)
kwl_text8 = '{pro:you and pro:I} {tdy(nous(act:go)) and tdy(nous(act:bring))} {nom:wealth and nom:happiness}'
p8 = self.psr.parse(kwl_text8, rule_name='expression', semantics=self.sem)
kwl_text9 = 'pro:you tdy(il(act:bring)) {nom:wealth and nom:happiness}'
p9 = self.psr.parse(kwl_text9, rule_name='expression', semantics=self.sem)
self.assertEquals(
{'t': 's_v_o',
'v': [{'t': 'subject',
'v': {u't': u'pro', u'v': {'t': 'alpha', 'v': u'you'}}},
{'t': 'verb', 'v': {'t': u'tdy', 'v': {'t': u'il', 'v': {u't': u'act', u'v': {'t': 'alpha', 'v': u'bring'}}}}},
{'t': 'object', 'v': {'t': u'and', 'v': [{u't': u'nom', u'v': {'t': 'alpha', 'v': u'wealth'}}, {u't': u'nom', u'v': {'t': 'alpha', 'v': u'happiness'}}]}}]},
p6)
def testJoins(self):
and_nouns = 'nom:eagle and nom:bird;'
ast_and_nouns = [{u'v': u'eagle', u't': u'nom'}, u'and', {u'v': u'bird', u't': u'nom'}]
sem_and_nouns = {'t': u'and', 'v': [{'t': u'nom', 'v': {'t': 'alpha', 'v': u'eagle'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'bird'}}]}
or_nouns = 'nom:eagle or nom:bird;'
ast_or_nouns = [{u'v': u'eagle', u't': u'nom'}, u'or', {u'v': u'bird', u't': u'nom'}]
sem_or_nouns = {'t': u'or', 'v': [{'t': u'nom', 'v': {'t': 'alpha', 'v': u'eagle'}}, {'t': u'nom', 'v': {'t': 'alpha', 'v': u'bird'}}]}
ifthen_nouns = 'if nom:eagle then nom:bird'
ast_ifthen_nouns = [{u'v': u'eagle', u't': u'nom'}, u'then', {u'v': u'bird', u't': u'nom'}]
self.assertEquals(ast_and_nouns,
self.psr.parse(and_nouns, 'conjunction'))
self.assertEquals(ast_or_nouns,
self.psr.parse(or_nouns, 'conjunction'))
self.assertEquals(ast_ifthen_nouns,
self.psr.parse(ifthen_nouns, 'conjunction'))
sen = ' pos:his_nom:birthday tdy(elle(act:be)) date(1982-01-30)'
sen = 'pro:it tdy(i(act:use)) plural(adj:neural_nom:net), {inf(act:think)} like(det:a_nom:human)'
sen = '{inf(tu(act:think))} like(det:a_nom:human)'
sen = 'title({inf(tu(act:think))} like(det:a_nom:human))'
sen = 'quote(raw(1 2 3))'
sen = 'title(pro:you) act:have adj:three_plural(nom:part): nom:body and nom:mind and nom:spirit'
#print 'TEST_PARSE =', self.psr.parse(sen, rule_name='sentence', semantics=self.sem)
# Once semantics are turned on, all subsequent calls of the parser have semantics
self.assertEquals(sem_and_nouns,
self.psr.parse(and_nouns,
rule_name='conjunction', semantics=self.sem))
self.assertEquals(sem_or_nouns,
self.psr.parse(or_nouns,
rule_name='conjunction', semantics=self.sem))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -9,083,434,866,117,134,000 | 46.76506 | 185 | 0.480388 | false |
maferelo/saleor | saleor/graphql/webhook/resolvers.py | 1 | 1820 | import graphene
from graphql_jwt.exceptions import PermissionDenied
from ...core.permissions import WebhookPermissions
from ...webhook import models, payloads
from ...webhook.event_types import WebhookEventType
from ..utils import sort_queryset
from .sorters import WebhookSortField
from .types import Webhook, WebhookEvent
def resolve_webhooks(info, sort_by=None, **_kwargs):
service_account = info.context.service_account
if service_account:
qs = models.Webhook.objects.filter(service_account=service_account)
else:
user = info.context.user
if not user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
raise PermissionDenied()
qs = models.Webhook.objects.all()
return sort_queryset(qs, sort_by, WebhookSortField)
def resolve_webhook(info, webhook_id):
service_account = info.context.service_account
if service_account:
_, webhook_id = graphene.Node.from_global_id(webhook_id)
return service_account.webhooks.filter(id=webhook_id).first()
user = info.context.user
if user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
return graphene.Node.get_node_from_global_id(info, webhook_id, Webhook)
raise PermissionDenied()
def resolve_webhook_events():
return [
WebhookEvent(event_type=event_type[0])
for event_type in WebhookEventType.CHOICES
]
def resolve_sample_payload(info, event_name):
service_account = info.context.service_account
required_permission = WebhookEventType.PERMISSIONS.get(event_name)
if service_account and service_account.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
if info.context.user.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
raise PermissionDenied()
| bsd-3-clause | 3,332,239,013,513,664,000 | 36.142857 | 79 | 0.734066 | false |
mattmillr/utaka | src/rest/UtakaBucket.py | 1 | 7437 | #Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from mod_python import apache
import xml.dom.minidom
import utaka.src.core.BucketWithACPAndLog as Bucket
import utaka.src.accessControl.BucketACP as BucketACP
import utaka.src.accessControl.AcpXml as AcpXml
import utaka.src.exceptions.MethodNotAllowedException as MethodNotAllowedException
import utaka.src.exceptions.BadRequestException as BadRequestException
class UtakaBucket:
def __init__(self, utakaReq):
self.utakaReq = utakaReq
def handleRequest(self):
if 'torrent' in self.utakaReq.subresources:
raise BadRequestException.RequestTorrentOfBucketErrorException()
if 'acl' in self.utakaReq.subresources:
if self.utakaReq.req.method == 'GET':
operation = self.__getAclOperation
elif self.utakaReq.req.method == 'PUT':
operation = self.__putAclOperation
else:
raise MethodNotAllowedException.ACLMethodNotAllowedException(self.utakaReq.req.method)
elif 'logging' in self.utakaReq.subresources:
if self.utakaReq.req.method == 'GET':
operation = self.__getLoggingOperation
elif self.utakaReq.req.method == 'PUT':
raise MethodNotAllowedException.BucketLogginStatusMethodException
else:
raise MethodNotAllowedException.LoggingStatusMethodNotAllowedException(self.utakaReq.req.method)
elif self.utakaReq.req.method == 'GET':
operation = self.__getOperation
elif self.utakaReq.req.method == 'PUT':
operation = self.__putOperation
elif self.utakaReq.req.method == 'DELETE':
operation = self.__deleteOperation
elif self.utakaReq.req.method == 'POST':
operation = self.__postOperation
elif self.utakaReq.req.method == 'COPY':
operation = self.__copyOperation
else:
raise MethodNotAllowedException.BucketMethodNotAllowedException(self.utakaReq.req.method)
return operation()
def __copyOperation(self):
pass
def __postOperation(self):
pass
def __deleteOperation(self):
result = Bucket.destroyBucket(bucket=self.utakaReq.bucket, user=self.utakaReq.user)
self.utakaReq.req.status = 204
def __putOperation(self):
cannedACL = self.utakaReq.customHeaderTable.get('acl', 'private')
acp = {}
acp['owner'] = {'userid':self.utakaReq.user}
acl = [{'grantee':{'userid':self.utakaReq.user}, 'permission':'FULL_CONTROL'}]
if cannedACL == 'public-read':
acl.append({'grantee':{'userid':1}, 'permission':'read'})
elif cannedACL == 'public-read-write':
acl.append({'grantee':{'userid':1}, 'permission':'read'})
acl.append({'grantee':{'userid':1}, 'permission':'write'})
elif cannedACL == 'authenticated-read':
acl.append({'grantee':{'userid':2}, 'permission':'read'})
elif cannedACL != 'private':
'''throw error'''
acp['acl'] = acl
result = Bucket.setBucket(bucket = self.utakaReq.bucket, user = self.utakaReq.user, accessControlPolicy = acp)
def __getOperation(self):
getBucketParams = {'name':self.utakaReq.bucket}
for param in 'prefix', 'marker', 'max-keys', 'delimiter':
if param in self.utakaReq.subresources:
getBucketParams[param] = self.utakaReq.subresources[param][0]
if 'max-keys' not in getBucketParams:
getBucketParams['max-keys'] = 1000
res = Bucket.getBucket(bucket = self.utakaReq.bucket, user = self.utakaReq.user,
prefix = getBucketParams.get('prefix'), marker = getBucketParams.get('marker'),
maxKeys = getBucketParams.get('max-keys'), delimiter = getBucketParams.get('delimiter'))
getBucketParams['isTruncated'] = str(res[2])
self.utakaReq.req.content_type = 'application/xml'
self.utakaReq.write(self.__getXMLResponse(getBucketParams, res[0], res[1]))
def __putLoggingOperation(self):
pass
def __getLoggingOperation(self):
Bucket.getBucketLogStatus(user=self.utakaReq.user, bucket=self.utakaReq.bucket)
def __putAclOperation(self):
#READ BODY
acp = AcpXml.fromXML(self.utakaReq.req.read())
Bucket.setBucketACP(user=self.utakaReq.user, bucket=self.utakaReq.bucket, accessControlPolicy=acp)
pass
def __getAclOperation(self):
bucket_acp = Bucket.getBucketACP(bucket=self.utakaReq.bucket, user=self.utakaReq.user)
if len(bucket_acp) == 0:
'''bucket not found, throw error'''
else:
self.utakaReq.req.content_type = 'applicaton/xml'
self.utakaReq.write(AcpXml.toXML(bucket_acp))
def __getXMLResponse(self, bucketDictionary, contentDictionaryList, commonPrefixesList):
doc = xml.dom.minidom.Document()
listBucketEl = doc.createElement("ListBucketResult")
listBucketEl.setAttribute('xmlns', 'http://s3.amazonaws.com/doc/2006-03-01/')
nameEl = doc.createElement("Name")
nameEl.appendChild(doc.createTextNode(bucketDictionary.get('name')))
listBucketEl.appendChild(nameEl)
prefixEl = doc.createElement("Prefix")
prefixEl.appendChild(doc.createTextNode(bucketDictionary.get('prefix', '')))
listBucketEl.appendChild(prefixEl)
markerEl = doc.createElement("Marker")
markerEl.appendChild(doc.createTextNode(bucketDictionary.get('marker', '')))
listBucketEl.appendChild(markerEl)
maxkeysEl = doc.createElement("MaxKeys")
maxkeysEl.appendChild(doc.createTextNode(str(bucketDictionary.get('max-keys', ''))))
listBucketEl.appendChild(maxkeysEl)
truncatedEl= doc.createElement("IsTruncated")
truncatedEl.appendChild(doc.createTextNode(bucketDictionary.get('isTruncated', '')))
listBucketEl.appendChild(truncatedEl)
for val in contentDictionaryList:
contentsEl = doc.createElement("Contents")
keyEl = doc.createElement("Key")
keyEl.appendChild(doc.createTextNode(val['key']))
contentsEl.appendChild(keyEl)
lastModifiedEl = doc.createElement("LastModified")
lastModifiedEl.appendChild(doc.createTextNode(val['lastModified']))
contentsEl.appendChild(lastModifiedEl)
eTagEl = doc.createElement("ETag")
eTagEl.appendChild(doc.createTextNode(val['eTag']))
contentsEl.appendChild(eTagEl)
sizeEl = doc.createElement("Size")
sizeEl.appendChild(doc.createTextNode(str(val['size'])))
contentsEl.appendChild(sizeEl)
storageClassEl = doc.createElement("StorageClass")
storageClassEl.appendChild(doc.createTextNode("STANDARD"))
contentsEl.appendChild(storageClassEl)
ownerEl = doc.createElement("Owner")
ownerIdEl = doc.createElement("ID")
ownerIdEl.appendChild(doc.createTextNode(str(val['owner']['id'])))
ownerNameEl = doc.createElement("DisplayName")
ownerNameEl.appendChild(doc.createTextNode(val['owner']['name']))
ownerEl.appendChild(ownerIdEl)
ownerEl.appendChild(ownerNameEl)
contentsEl.appendChild(ownerEl)
listBucketEl.appendChild(contentsEl)
if commonPrefixesList:
commonPrefixesEl = doc.createElement("CommonPrefixes")
for val in commonPrefixesList:
commonPrefixEl = doc.createElement("Prefix")
commonPrefixEl.appendChild(doc.createTextNode(val))
commonPrefixesEl.appendChild(commonPrefixEl)
listBucketEl.appendChild(commonPrefixesEl)
doc.appendChild(listBucketEl)
return doc.toxml('utf-8') | apache-2.0 | -5,196,733,002,555,355,000 | 36.756345 | 112 | 0.751513 | false |
krisb78/django-redactorjs | testproject/settings.py | 1 | 5391 | import os
# Django settings for testproject project.
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'redactor_testproject', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(
PROJECT_PATH,
'media'
)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(
PROJECT_PATH,
'static'
)
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
(
'redactor-js',
os.path.join(
PROJECT_PATH,
'..',
'redactor-js'
)
),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '&6dx22n2y&2k^i+7j3*d+y-yf(hv6e8qi^8gq#5-jo(h7im@7u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'app',
'redactorjs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | -4,929,814,843,324,972,000 | 31.672727 | 109 | 0.674643 | false |
auduny/chains | lib/chains/services/sonos/__init__.py | 1 | 7848 | from threading import Timer
import chains.service
from chains.common import log
import time, datetime, re, copy
from Queue import Empty
from soco import SoCo, discover
from soco.events import event_listener
class SonosService(chains.service.Service):
def onInit(self):
# Discover Sonos players
self.zones = list(discover())
# Set default player
self.defaultZone = None
if self.zones:
defaultZone = self.config.get('defaultzone')
if defaultZone:
self.defaultZone = self.getZone(defaultZone)
else:
self.defaultZone = self.zones[0]
self.interval = None
self.subscribers = []
self.registerForEvents()
def onStart(self):
while True:
self.checkForNewEvents()
def getZone(self, nameOrId):
for zone in self.zones:
if zone.uid == nameOrId:
return zone
if zone.player_name == nameOrId:
return zone
return self.defaultZone
def action_play(self, zone=None):
zone = self.getZone(zone)
zone.play()
def action_stop(self, zone=None):
zone = self.getZone(zone)
zone.stop()
def action_playUri(self, uri, zone=None, volume=None, playmode=None):
zone = self.getZone(zone)
if volume:
log.info('vol:%s'%volume)
zone.volume = int(volume)
if playmode:
log.info('mode:%s'%playmode)
zone.play_mode = playmode
log.info('zone: %s' % zone)
log.info('playUri: %s' % uri)
zone.play_uri(uri)
def action_setPlayMode(self, mode, zone=None):
zone = self.getZone(zone)
zone.play_mode = mode
def action_getPlaylistNames(self):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
result.append( playlist.title )
return result
def action_getPlaylistDicts(self):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
result.append( playlist.to_dict )
return result
def action_getPlaylistTracks(self, playlist_name):
zone = self.getZone(zone)
playlists = soc.get_music_library_information('sonos_playlists')
result = []
for playlist in playlists:
if playlist.title == playlist_name:
track_list = zone.browse(playlist)
for item in track_list:
result.append({
title: item.title,
album: item.album,
artist: item.creator,
uri: item.uri,
art: item.album_art_uri
})
return result
return result
def action_playPlaylist(self, name, zone=None, volume=None, playmode=None):
zone = self.getZone(zone)
if volume:
log.info('vol:%s'%volume)
zone.volume = int(volume)
if playmode:
log.info('mode:%s'%playmode)
zone.play_mode = playmode
log.info('pls')
playlists = zone.get_sonos_playlists()
if not playlists:
return False
found = None
for playlist in playlists:
if playlist.title.lower().strip() == name.lower().strip():
found = playlist
break
if not found:
return False
zone.clear_queue()
zone.add_to_queue(playlist)
zone.play_from_queue(0)
def action_setVolume(self, volume, zone=None):
zone = self.getZone(zone)
zone.volume = int(volume)
def action_getVolume(self, zone=None):
zone = self.getZone(zone)
return zone.volume
def action_modifyVolume(self,amount, zone=None):
zone = self.getZone(zone)
amount = int(amount)
zone.volume += amount
def action_volumeUp(self, zone=None):
zone = self.getZone(zone)
zone.volume += 1
def action_volumeDown(self, zone=None):
zone = self.getZone(zone)
zone.volume -= 1
def action_getTrackInfo(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
del info['metadata']
return info
def action_getTrackMetaData(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
return info['metadata']
def action_getTrackUri(self, zone=None):
zone = self.getZone(zone)
info = zone.get_current_track_info()
if not info:
return None
return info.get('uri')
def action_clearQueue(self, zone=None):
zone = self.getZone(zone)
zone.clear_queue()
def action_join(self, slaveZone, masterZone=None):
slaveZone = self.getZone(slaveZone)
masterZone = self.getZone(masterZone)
slaveZone.join(masterZone)
def action_unjoin(self, slaveZone):
slaveZone = self.getZone(slaveZone)
slaveZone.unjoin()
def action_list(self):
result = []
for zone in self.zones:
#result.append({
# 'name': zone.player_name,
# 'id': zone.uid
#})
result.append(zone.get_speaker_info())
return result
def sendEventWrapper(self, property, zone, event):
name = zone.player_name
self.sendEvent(property, event, {
'device': name,
'type': 'speaker',
'location': name
})
def registerForEvent(self, zone=None):
if (zone == None): return
controlSubscriber = zone.renderingControl.subscribe()
soundSubscriber = zone.avTransport.subscribe()
self.subscribers.append({
'zone': zone,
'control': controlSubscriber,
'sound': soundSubscriber
})
def registerForEvents(self):
for zone in self.zones:
self.registerForEvent(zone)
def parseEvents(self, zone):
for subscriber in [zone['control'], zone['sound']]:
try:
event = subscriber.events.get(timeout=0.5)
if 'transport_state' in event.variables:
self.sendEventWrapper('state', zone['zone'], { 'transport': {
'value': event.variables['transport_state'],
'actions': ['play', 'stop']
}})
if 'volume' in event.variables and 'Master' in event.variables['volume']:
volume = int(event.variables['volume']['Master'])
self.sendEventWrapper('volume', zone['zone'], { 'volume': {
'value': volume,
'actions': ['volumeUp', 'volumeDown']
}})
if 'mute' in event.variables and 'Master' in event.variables['mute']:
self.sendEventWrapper('mute', zone['zone'], { 'mute': {
'value': int(event.variables['mute']['Master'])
}})
except Empty:
pass
def checkForNewEvents(self):
for zone in self.subscribers:
self.parseEvents(zone)
def deRegisterForEvent(self, zone):
zone['control'].unsubscribe()
zone['sound'].unsubscribe()
def deRegisterForEvents(self):
for zone in self.subscribers:
self.deRegisterForEvent(zone)
def onShutdown(self):
if self.interval:
self.interval.cancel()
self.deRegisterForEvents()
event_listener.stop()
| gpl-2.0 | -5,247,336,795,785,983,000 | 29.418605 | 89 | 0.55237 | false |
mtougeron/python-openstacksdk | examples/network/delete.py | 1 | 1721 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network examples
Destroy all the pieces parts of a working network.
To run:
python examples/network/delete.py
"""
import sys
from examples import common
from examples import connection
def delete(conn, name):
router = conn.network.find_router(name)
if router is not None:
print(str(router))
subnet = conn.network.find_subnet(name)
if subnet is not None:
print(str(subnet))
if router:
try:
conn.network.router_remove_interface(router, subnet.id)
except Exception:
pass
for port in conn.network.get_subnet_ports(subnet.id):
print(str(port))
conn.delete(port)
if router is not None:
conn.delete(router)
if subnet:
conn.delete(subnet)
network = conn.network.find_network(name)
if network is not None:
print(str(network))
conn.delete(network)
def run_network(opts):
name = opts.data.pop('name', 'netty')
conn = connection.make_connection(opts)
return(delete(conn, name))
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_network))
| apache-2.0 | -5,908,766,536,051,304,000 | 25.075758 | 75 | 0.662987 | false |
zaubermaerchen/imas_cg_api | api/skill/serializer.py | 1 | 1189 | # coding: utf-8
from rest_framework import serializers
from data.models import Skill, SkillValue
class ListSerializer(serializers.ModelSerializer):
skill_value_list = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Skill
fields = [
'skill_id',
'target_unit',
'target_member',
'target_type',
'target_num',
'target_param',
'skill_value_id',
'skill_value_list',
'comment'
]
@staticmethod
def get_skill_value_list(obj):
return SkillValue.get_value_list(obj.skill_value_id)
class Costar(object):
def __init__(self, name, count):
self.name = name
self.count = count
class CostarSerializer(serializers.Serializer):
name = serializers.CharField(max_length=255)
count = serializers.IntegerField()
def create(self, validated_data):
return Costar(**validated_data)
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.count = validated_data.get('count', instance.count)
return instance
| mit | -7,853,903,863,662,876,000 | 26.022727 | 72 | 0.619008 | false |
verloop/rasa_nlu | rasa_nlu/components.py | 1 | 16133 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import os
from collections import defaultdict
from builtins import object
import inspect
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from rasa_nlu.config import RasaNLUConfig
def load_component(component_clz, context, config):
# type: (Type[Component], Dict[Text, Any], Dict[Text, Any]) -> Optional[Component]
"""Calls a components load method to init it based on a previously persisted model."""
if component_clz is not None:
load_args = fill_args(component_clz.load_args(), context, config)
return component_clz.load(*load_args)
else:
return None
def create_component(component_clz, config):
# type: (Type[Component], Dict[Text, Any]) -> Optional[Component]
"""Calls a components load method to init it based on a previously persisted model."""
if component_clz is not None:
create_args = fill_args(component_clz.create_args(), context={}, config=config)
return component_clz.create(*create_args)
else:
return None
def fill_args(arguments, context, config):
# type: (List[Text], Dict[Text, Any], Dict[Text, Any]) -> List[Any]
"""Given a list of arguments, tries to look up these argument names in the config / context to fill the arguments"""
filled = []
for arg in arguments:
if arg in context:
filled.append(context[arg])
elif arg in config:
filled.append(config[arg])
else:
raise MissingArgumentError("Couldn't fill argument '{}' :(".format(arg))
return filled
def _read_dev_requirements(file_name):
"""Reads the dev requirements and groups the pinned versions into sections indicated by comments in the file.
The dev requirements should be grouped by preceeding comments. The comment should start with `#` followed by
the name of the requirement, e.g. `# sklearn`. All following lines till the next line starting with `#` will be
required to be installed if the name `sklearn` is requested to be available."""
with open(file_name) as f:
req_lines = f.readlines()
requirements = defaultdict(list)
current_name = None
for req_line in req_lines:
if req_line.startswith("#"):
current_name = req_line[1:].strip(' \n')
elif current_name is not None:
requirements[current_name].append(req_line.strip(' \n'))
return requirements
def find_unavailable_packages(package_names):
# type: (List[Text]) -> Set[Text]
"""Tries to import all the package names and returns the packages where it failed."""
import importlib
failed_imports = set()
for package in package_names:
try:
importlib.import_module(package)
except ImportError:
failed_imports.add(package)
return failed_imports
def validate_requirements(component_names, dev_requirements_file="dev-requirements.txt"):
# type: (List[Text]) -> None
"""Ensures that all required python packages are installed to instantiate and used the passed components."""
from rasa_nlu import registry
# Validate that all required packages are installed
failed_imports = set()
for component_name in component_names:
component_class = registry.get_component_class(component_name)
failed_imports.update(find_unavailable_packages(component_class.required_packages()))
if failed_imports: # pragma: no cover
# if available, use the development file to figure out the correct version numbers for each requirement
if os.path.exists(dev_requirements_file):
all_requirements = _read_dev_requirements(dev_requirements_file)
missing_requirements = [r for i in failed_imports for r in all_requirements[i]]
raise Exception("Not all required packages are installed. To use this pipeline, run\n\t" +
"> pip install {}".format(" ".join(missing_requirements)))
else:
raise Exception("Not all required packages are installed. Please install {}".format(
" ".join(failed_imports)))
def validate_arguments(pipeline, config, allow_empty_pipeline=False):
# type: (List[Component], RasaNLUConfig, bool) -> None
"""Validates a pipeline before it is run. Ensures, that all arguments are present to train the pipeline."""
# Ensure the pipeline is not empty
if not allow_empty_pipeline and len(pipeline) == 0:
raise ValueError("Can not train an empty pipeline. " +
"Make sure to specify a proper pipeline in the configuration using the `pipeline` key." +
"The `backend` configuration key is NOT supported anymore.")
# Validate the init phase
context = {}
for component in pipeline:
try:
fill_args(component.pipeline_init_args(), context, config.as_dict())
updates = component.context_provides.get("pipeline_init", [])
for u in updates:
context[u] = None
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to validate component '{}'. {}".format(component.name, e))
after_init_context = context.copy()
context["training_data"] = None # Prepare context for testing the training phase
for component in pipeline:
try:
fill_args(component.train_args(), context, config.as_dict())
updates = component.context_provides.get("train", [])
for u in updates:
context[u] = None
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to validate at component '{}'. {}".format(component.name, e))
# Reset context to test processing phase and prepare for training phase
context = {"entities": [], "text": None}
context.update(after_init_context)
for component in pipeline:
try:
fill_args(component.process_args(), context, config.as_dict())
updates = component.context_provides.get("process", [])
for u in updates:
context[u] = None
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to validate at component '{}'. {}".format(component.name, e))
class MissingArgumentError(ValueError):
"""Raised when a function is called and not all parameters can be filled from the context / config.
Attributes:
message -- explanation of which parameter is missing
"""
def __init__(self, message):
# type: (Text) -> None
super(MissingArgumentError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
class Component(object):
"""A component is a message processing unit in a pipeline.
Components are collected sequentially in a pipeline. Each component is called one after another. This holds for
initialization, training, persisting and loading the components. If a component comes first in a pipeline, its
methods will be called first.
E.g. to process an incoming message, the `process` method of each component will be called. During the processing
(as well as the training, persisting and initialization) components can pass information to other components.
The information is passed to other components by providing attributes to the so called pipeline context. The
pipeline context contains all the information of the previous components a component can use to do its own
processing. For example, a featurizer component can provide features that are used by another component down
the pipeline to do intent classification."""
# Name of the component to be used when integrating it in a pipeline. E.g. `[ComponentA, ComponentB]`
# will be a proper pipeline definition where `ComponentA` is the name of the first component of the pipeline.
name = ""
# Defines what attributes the pipeline component will provide when called. The different keys indicate the
# different functions (`pipeline_init`, `train`, `process`) that are able to update the pipelines context.
# (mostly used to check if the pipeline is valid)
context_provides = {
"pipeline_init": [],
"train": [],
"process": [],
}
# Defines which of the attributes the component provides should be added to the final output json at the end of the
# pipeline. Every attribute in `output_provides` should be part of the above `context_provides['process']`. As it
# wouldn't make much sense to keep an attribute in the output that is not generated. Every other attribute provided
# in the context during the process step will be removed from the output json.
output_provides = []
@classmethod
def required_packages(cls):
# type: () -> List[Text]
"""Specify which python packages need to be installed to use this component, e.g. `["spacy", "numpy"]`.
This list of requirements allows us to fail early during training if a required package is not installed."""
return []
@classmethod
def load(cls, *args):
# type: (*Any) -> Component
"""Load this component from file.
After a component got trained, it will be persisted by calling `persist`. When the pipeline gets loaded again,
this component needs to be able to restore itself. Components can rely on any context attributes that are
created by `pipeline_init` calls to components previous to this one."""
return cls(*args)
@classmethod
def create(cls, *args):
# type: (*Any) -> Component
"""Creates this component (e.g. before a training is started).
Method can access all configuration parameters."""
return cls(*args)
def pipeline_init(self, *args):
# type: (*Any) -> Optional[Dict[Text, Any]]
"""Initialize this component for a new pipeline
This function will be called before the training is started and before the first message is processed using
the interpreter. The component gets the opportunity to add information to the context that is passed through
the pipeline during training and message parsing. Most components do not need to implement this method.
It's mostly used to initialize framework environments like MITIE and spacy
(e.g. loading word vectors for the pipeline)."""
pass
def train(self, *args):
# type: (*Any) -> Optional[Dict[Text, Any]]
"""Train this component.
This is the components chance to train itself provided with the training data. The component can rely on
any context attribute to be present, that gets created by a call to `pipeline_init` of ANY component and
on any context attributes created by a call to `train` of components previous to this one."""
pass
def process(self, *args):
# type: (*Any) -> Optional[Dict[Text, Any]]
"""Process an incomming message.
This is the components chance to process an incommng message. The component can rely on
any context attribute to be present, that gets created by a call to `pipeline_init` of ANY component and
on any context attributes created by a call to `process` of components previous to this one."""
pass
def persist(self, model_dir):
# type: (Text) -> Optional[Dict[Text, Any]]
"""Persist this component to disk for future loading."""
pass
@classmethod
def cache_key(cls, model_metadata):
# type: (Metadata) -> Optional[Text]
"""This key is used to cache components.
If a component is unique to a model it should return None. Otherwise, an instantiation of the
component will be reused for all models where the metadata creates the same key."""
from rasa_nlu.model import Metadata
return None
def pipeline_init_args(self):
# type: () -> List[Text]
return [arg for arg in inspect.getargspec(self.pipeline_init).args if arg not in ["self"]]
@classmethod
def create_args(cls):
# type: () -> List[Text]
return [arg for arg in inspect.getargspec(cls.create).args if arg not in ["cls"]]
def train_args(self):
# type: () -> List[Text]
return [arg for arg in inspect.getargspec(self.train).args if arg not in ["self"]]
def process_args(self):
# type: () -> List[Text]
return [arg for arg in inspect.getargspec(self.process).args if arg not in ["self"]]
@classmethod
def load_args(cls):
# type: () -> List[Text]
return [arg for arg in inspect.getargspec(cls.load).args if arg not in ["cls"]]
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ComponentBuilder(object):
"""Creates trainers and interpreters based on configurations. Caches components for reuse."""
def __init__(self, use_cache=True):
self.use_cache = use_cache
# Reuse nlp and featurizers where possible to save memory,
# every component that implements a cache-key will be cached
self.component_cache = {}
def __get_cached_component(self, component_name, metadata):
# type: (Text, Metadata) -> Tuple[Optional[Component], Optional[Text]]
"""Load a component from the cache, if it exists. Returns the component, if found, and the cache key."""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
component_class = registry.get_component_class(component_name)
cache_key = component_class.cache_key(metadata)
if cache_key is not None and self.use_cache and cache_key in self.component_cache:
return self.component_cache[cache_key], cache_key
else:
return None, cache_key
def __add_to_cache(self, component, cache_key):
# type: (Component, Text) -> None
"""Add a component to the cache."""
if cache_key is not None and self.use_cache:
self.component_cache[cache_key] = component
logging.info("Added '{}' to component cache. Key '{}'.".format(component.name, cache_key))
def load_component(self, component_name, context, model_config, meta):
# type: (Text, Dict[Text, Any], Dict[Text, Any], Metadata) -> Component
"""Tries to retrieve a component from the cache, calls `load` to create a new component."""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
try:
component, cache_key = self.__get_cached_component(component_name, meta)
if component is None:
component = registry.load_component_by_name(component_name, context, model_config)
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to load component '{}'. {}".format(component_name, e))
def create_component(self, component_name, config):
# type: (Text, RasaNLUConfig) -> Component
"""Tries to retrieve a component from the cache, calls `create` to create a new component."""
from rasa_nlu import registry
from rasa_nlu.model import Metadata
try:
component, cache_key = self.__get_cached_component(component_name, Metadata(config.as_dict(), None))
if component is None:
component = registry.create_component_by_name(component_name, config.as_dict())
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise Exception("Failed to create component '{}'. {}".format(component_name, e))
| apache-2.0 | -8,919,818,506,342,547,000 | 42.602703 | 120 | 0.662431 | false |
vup1120/oq-risklib | openquake/commonlib/writers.py | 1 | 9803 | # Copyright (c) 2010-2014, GEM Foundation.
#
# NRML is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NRML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with NRML. If not, see <http://www.gnu.org/licenses/>.
import cStringIO
from contextlib import contextmanager
from xml.sax.saxutils import escape, quoteattr
import numpy # this is needed by the doctests, don't remove it
@contextmanager
def floatformat(fmt_string):
"""
Context manager to change the default format string for the
function :func:`openquake.commonlib.writers.scientificformat`.
:param fmt_string: the format to use; for instance '%13.9E'
"""
fmt_defaults = scientificformat.__defaults__
scientificformat.__defaults__ = (fmt_string,) + fmt_defaults[1:]
try:
yield
finally:
scientificformat.__defaults__ = fmt_defaults
zeroset = set(['E', '-', '+', '.', '0'])
def scientificformat(value, fmt='%13.9E', sep=' ', sep2=':'):
"""
:param value: the value to convert into a string
:param fmt: the formatting string to use for float values
:param sep: separator to use for vector-like values
:param sep2: second separator to use for matrix-like values
Convert a float or an array into a string by using the scientific notation
and a fixed precision (by default 10 decimal digits). For instance:
>>> scientificformat(-0E0)
'0.000000000E+00'
>>> scientificformat(-0.004)
'-4.000000000E-03'
>>> scientificformat([0.004])
'4.000000000E-03'
>>> scientificformat([0.01, 0.02], '%10.6E')
'1.000000E-02 2.000000E-02'
>>> scientificformat([[0.1, 0.2], [0.3, 0.4]], '%4.1E')
'1.0E-01:2.0E-01 3.0E-01:4.0E-01'
"""
if isinstance(value, basestring):
return value
elif isinstance(value, (int, long)):
return str(value)
elif hasattr(value, '__len__'):
return sep.join((scientificformat(f, fmt, sep2) for f in value))
elif isinstance(value, float):
fmt_value = fmt % value
if set(fmt_value) <= zeroset:
# '-0.0000000E+00' is converted into '0.0000000E+00
fmt_value = fmt_value.replace('-', '')
return fmt_value
return str(value)
class StreamingXMLWriter(object):
"""
A stream-based XML writer. The typical usage is something like this::
with StreamingXMLWriter(output_file) as writer:
writer.start_tag('root')
for node in nodegenerator():
writer.serialize(node)
writer.end_tag('root')
"""
def __init__(self, stream, indent=4, encoding='utf-8', nsmap=None):
"""
:param stream: the stream or a file where to write the XML
:param int indent: the indentation to use in the XML (default 4 spaces)
"""
self.stream = stream
self.indent = indent
self.encoding = encoding
self.indentlevel = 0
self.nsmap = nsmap
def shorten(self, tag):
"""
Get the short representation of a fully qualified tag
:param str tag: a (fully qualified or not) XML tag
"""
if tag.startswith('{'):
ns, _tag = tag.rsplit('}')
tag = self.nsmap.get(ns[1:], '') + _tag
return tag
def _write(self, text):
"""Write text by respecting the current indentlevel"""
if not isinstance(text, str):
text = text.encode(self.encoding, 'xmlcharrefreplace')
spaces = ' ' * (self.indent * self.indentlevel)
self.stream.write(spaces + text.strip() + '\n')
def emptyElement(self, name, attrs):
"""Add an empty element (may have attributes)"""
attr = ' '.join('%s=%s' % (n, quoteattr(scientificformat(v)))
for n, v in sorted(attrs.iteritems()))
self._write('<%s %s/>' % (name, attr))
def start_tag(self, name, attrs=None):
"""Open an XML tag"""
if not attrs:
self._write('<%s>' % name)
else:
self._write('<' + name)
for (name, value) in sorted(attrs.items()):
self._write(
' %s=%s' % (name, quoteattr(scientificformat(value))))
self._write('>')
self.indentlevel += 1
def end_tag(self, name):
"""Close an XML tag"""
self.indentlevel -= 1
self._write('</%s>' % name)
def serialize(self, node):
"""Serialize a node object (typically an ElementTree object)"""
if self.nsmap is not None:
tag = self.shorten(node.tag)
else:
tag = node.tag
if not node and node.text is None:
self.emptyElement(tag, node.attrib)
return
self.start_tag(tag, node.attrib)
if node.text is not None:
self._write(escape(scientificformat(node.text).strip()))
for subnode in node:
self.serialize(subnode)
self.end_tag(tag)
def __enter__(self):
"""Write the XML declaration"""
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self.encoding)
return self
def __exit__(self, etype, exc, tb):
"""Close the XML document"""
pass
def tostring(node, indent=4):
"""
Convert a node into an XML string by using the StreamingXMLWriter.
This is useful for testing purposes.
:param node: a node object (typically an ElementTree object)
:param indent: the indentation to use in the XML (default 4 spaces)
"""
out = cStringIO.StringIO()
writer = StreamingXMLWriter(out, indent)
writer.serialize(node)
return out.getvalue()
def save_csv(dest, header_rows, sep=',', fmt='%12.8E', mode='wb'):
"""
:param dest: destination filename
:param header_rows: header + rows to save
:param sep: separator to use (default comma)
:param fmt: formatting string (default '%12.8E')
:param mode: file open mode (default 'wb')
"""
with open(dest, mode) as f:
for row in header_rows:
f.write(sep.join(scientificformat(col, fmt) for col in row) + '\n')
return dest
# recursive function used internally by build_header
def _build_header(dtype, root):
header = []
if dtype.fields is None:
if not root:
return []
return [root + (str(dtype), dtype.shape)]
for field in dtype.fields:
dt = dtype.fields[field][0]
if dt.subdtype is None: # nested
header.extend(_build_header(dt, root + (field,)))
else:
numpytype = str(dt.subdtype[0])
header.append(root + (field, numpytype, dt.shape))
return header
def build_header(dtype):
"""
Convert a numpy nested dtype into a list of strings suitable as header
of csv file.
>>> imt_dt = numpy.dtype([('PGA', float, 3), ('PGV', float, 4)])
>>> build_header(imt_dt)
['PGV:float64:4', 'PGA:float64:3']
>>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt),
... ('idx', numpy.uint32)])
>>> build_header(gmf_dt)
['A-PGV:float64:4', 'A-PGA:float64:3', 'B-PGV:float64:4', 'B-PGA:float64:3', 'idx:uint32:']
"""
header = _build_header(dtype, ())
h = []
for col in header:
name = '-'.join(col[:-2])
numpytype = col[-2]
shape = col[-1]
h.append(':'.join([name, numpytype, ':'.join(map(str, shape))]))
return h
def extract_from(data, fields):
"""
Extract data from numpy arrays with nested records.
>>> imt_dt = numpy.dtype([('PGA', float, 3), ('PGV', float, 4)])
>>> a = numpy.array([([1, 2, 3], [4, 5, 6, 7])], imt_dt)
>>> extract_from(a, ['PGA'])
array([[ 1., 2., 3.]])
>>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt),
... ('idx', numpy.uint32)])
>>> b = numpy.array([(([1, 2, 3], [4, 5, 6, 7]),
... ([1, 2, 4], [3, 5, 6, 7]), 8)], gmf_dt)
>>> extract_from(b, ['idx'])
array([8], dtype=uint32)
>>> extract_from(b, ['B', 'PGV'])
array([[ 3., 5., 6., 7.]])
"""
for f in fields:
data = data[f]
return data
def write_csv(dest, data, sep=',', fmt='%12.8E', header=None):
"""
:param dest: destination filename
:param data: array to save
:param sep: separator to use (default comma)
:param fmt: formatting string (default '%12.8E')
:param header:
optional list with the names of the columns to display
"""
try:
# see if data is a composite numpy array
data.dtype.fields
except AttributeError:
# not a composite array
header = header or []
else:
header = header or build_header(data.dtype)
with open(dest, 'wb') as f:
if header:
f.write(sep.join(header) + '\n')
all_fields = [col.split(':', 1)[0].split('-')
for col in header]
for record in data:
row = []
for fields in all_fields:
row.append(extract_from(record, fields))
f.write(sep.join(scientificformat(col, fmt)
for col in row) + '\n')
else:
for row in data:
f.write(sep.join(scientificformat(col, fmt)
for col in row) + '\n')
return dest
| agpl-3.0 | 344,428,272,819,919,700 | 32.803448 | 95 | 0.571254 | false |
tranlyvu/autonomous-vehicle-projects | Vehicle Detection/src/vehicle_detection.py | 1 | 14396 | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
from skimage.feature import hog
from skimage import color, exposure
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
import time
from moviepy.editor import VideoFileClip
from scipy.ndimage.measurements import label
from IPython.display import HTML
def load_data(my_list):
new_list = []
for image in my_list:
img = cv2.imread(image)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
new_list.append(img)
return new_list
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=True, feature_vector=False)
return features, hog_image
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=False, feature_vector=feature_vec)
return features
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:,:,0], size).ravel()
color2 = cv2.resize(img[:,:,1], size).ravel()
color3 = cv2.resize(img[:,:,2], size).ravel()
return np.hstack((color1, color2, color3))
def color_hist(img, nbins=32): #bins_range=(0, 256)
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins)
channel2_hist = np.histogram(img[:,:,1], bins=nbins)
channel3_hist = np.histogram(img[:,:,2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs,
color_space='RGB',
spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8,
cell_per_block=2,
hog_channel=0,
spatial_feat=True,
hist_feat=True,
hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for image in imgs:
file_features = []
# Read in each one by one
#image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
img_boxes = []
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
#img = img.astype(np.float32)/255
heat_map = np.zeros_like(img[:,:,0]).astype(np.float)
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
img_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw, ytop_draw+win_draw+ystart)))
heat_map[ytop_draw+ystart:ytop_draw+win_draw+ystart, xbox_left:xbox_left+win_draw] +=1
return draw_img, heat_map
def process_image(img):
# Find final boxes from heatmap using label function
out_img, heatmap = find_cars(img,
ystart=YSTART,
ystop=YSTOP,
scale=SCALE,
svc = SVC,
X_scaler = X_scaler,
orient= ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block= CELL_PER_BLOCK,
spatial_size = SPATIAL_SIZE,
hist_bins = HIST_BINS)
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(img), labels)
return draw_img
if __name__ == "__main__":
vehicles_images = glob.glob('../../../vehicles/vehicles/*/*.png')
non_vehicles_images = glob.glob('../../../non-vehicles/non-vehicles/*/*.png')
cars = load_data(vehicles_images)
non_cars = load_data(non_vehicles_images)
"""Parameters"""
COLOR_SPACE = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
ORIENTATION = 9 # HOG orientations
PIX_PER_CELL = 8 # HOG pixels per cell
CELL_PER_BLOCK = 2 # HOG cells per block
HOG_CHANNEL = "ALL" # Can be 0, 1, 2, or "ALL"
SPATIAL_SIZE = (16, 16) # Spatial binning dimensions
HIST_BINS = 16 # Number of histogram bins
IS_SPATIAL_FEAT = True # Spatial features on or off
IS_HIST_FEAT = True # Histogram features on or off
IS_HOG_FEAT = True # HOG features on or off
t=time.time()
car_features = extract_features(cars,
color_space = COLOR_SPACE,
spatial_size= SPATIAL_SIZE,
hist_bins = HIST_BINS,
orient = ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block = CELL_PER_BLOCK,
hog_channel = HOG_CHANNEL,
spatial_feat = IS_SPATIAL_FEAT ,
hist_feat = IS_HIST_FEAT,
hog_feat = IS_HOG_FEAT)
notcar_features = extract_features(non_cars,
color_space = COLOR_SPACE,
spatial_size= SPATIAL_SIZE,
hist_bins = HIST_BINS,
orient = ORIENTATION,
pix_per_cell = PIX_PER_CELL,
cell_per_block = CELL_PER_BLOCK,
hog_channel = HOG_CHANNEL,
spatial_feat = IS_SPATIAL_FEAT ,
hist_feat = IS_HIST_FEAT,
hog_feat = IS_HOG_FEAT)
print(time.time()-t, 'Seconds to compute features...')
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',orient,'orientations',pix_per_cell,'pixels per cell and', cell_per_block,'cells per block')
print('Feature vector length:', len(X_train[0]))
# Use a linear SVC
SVC = LinearSVC()
# Check the training time for the SVC
SVC.fit(X_train, y_train)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to train SVC...')
# Check the score of the SVC
print('Test Accuracy of SVC = ', round(SVC.score(X_test, y_test), 4))
clip1 = VideoFileClip('../project_video.mp4')
video_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
video_output = '../output_videos/project_video.mp4'
| apache-2.0 | -4,546,993,512,947,049,500 | 43.27044 | 128 | 0.566199 | false |
Poofjunior/dxf2gcode | core/shape.py | 1 | 23737 | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from math import radians, pi
from copy import deepcopy
import logging
import globals.globals as g
from core.point import Point
from core.linegeo import LineGeo
from core.arcgeo import ArcGeo
from core.holegeo import HoleGeo
from globals.six import text_type
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5 import QtCore
else:
from PyQt4 import QtCore
logger = logging.getLogger("Core.Shape")
class Shape(object):
"""
The Shape Class includes all plotting, GUI functionality and export functions
related to the Shapes.
"""
# only need default arguments here because of the change of usage with super in QGraphicsItem
def __init__(self, nr=-1, closed=True, parentEntity=None):
if nr == -1:
return
self.type = "Shape"
self.nr = nr
self.closed = closed
self.cut_cor = 40
self.parentEntity = parentEntity
self.parentLayer = None
self.geos = Geos([])
self.cw = True
self.stmove = None
self.topLeft = None
self.bottomRight = None
self.send_to_TSP = g.config.vars.Route_Optimisation['default_TSP']
self.selected = False
self.disabled = False
self.allowedToChange = True
# preset defaults
self.axis3_start_mill_depth = g.config.vars.Depth_Coordinates['axis3_start_mill_depth']
self.axis3_slice_depth = g.config.vars.Depth_Coordinates['axis3_slice_depth']
self.axis3_mill_depth = g.config.vars.Depth_Coordinates['axis3_mill_depth']
self.f_g1_plane = g.config.vars.Feed_Rates['f_g1_plane']
self.f_g1_depth = g.config.vars.Feed_Rates['f_g1_depth']
# Parameters for drag knife
self.drag_angle = radians(g.config.vars.Drag_Knife_Options['drag_angle'])
# Parameters for laser cutter
self.laser_power = g.config.vars.Laser_Cutter_Options['laser_power']
self.laser_pulses_per_mm = g.config.vars.Laser_Cutter_Options['laser_pulses_per_mm']
def __str__(self):
"""
Standard method to print the object
@return: A string
"""
return "\ntype: %s" % self.type +\
"\nnr: %i" % self.nr +\
"\nclosed: %i" % self.closed +\
"\ncut_cor: %s" % self.cut_cor +\
"\nlen(geos): %i" % len(self.geos) +\
"\ngeos: %s" % self.geos
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate("Shape",
string_to_translate))
def setSelected(self, flag=False):
self.selected = flag
def isSelected(self):
return self.selected
def setDisable(self, flag=False):
self.disabled = flag
def isDisabled(self):
return self.disabled
def setToolPathOptimized(self, flag=False):
self.send_to_TSP = flag
def isToolPathOptimized(self):
return self.send_to_TSP
def isDirectionOfGeosCCW(self, geos):
# By calculating the area of the shape
start = geos.abs_el(0).get_start_end_points(True)
summe = 0.0
for geo in geos.abs_iter():
if isinstance(geo, LineGeo):
end = geo.get_start_end_points(False)
summe += (start.x + end.x) * (end.y - start.y)
start = end
elif isinstance(geo, ArcGeo):
segments = 10
for i in range(1, segments + 1):
end = geo.get_point_from_start(i, segments)
summe += (end.x + start.x) * (end.y - start.y)
start = end
if not self.closed:
# if shape is not closed... simply treat it as closed
end = geos.abs_el(0).get_start_end_points(True)
summe += (end.x + start.x) * (end.y - start.y)
if summe == 0: # inconclusive
logger.debug(self.tr("Shoelace method cannot (directly) be applied to this shape"))
# lets take it clock wise with relation to the workpiece zero
start = geos.abs_el(0).get_start_end_points(True)
# get the farthest end point with relation to the start
end = start
distance2 = 0
for geo in geos.abs_iter():
pos_end = geo.get_start_end_points(False)
pos_distance2 = (start - pos_end).length_squared()
if pos_distance2 > distance2:
end = pos_end
distance2 = pos_distance2
direction = start.to3D().cross_product(end.to3D()).z
if -1e-5 < direction < 1e-5: # start and end are aligned wrt to wp zero
direction = start.length_squared() - end.length_squared()
summe = direction
return summe > 0.0
def AnalyseAndOptimize(self):
self.setNearestStPoint(Point())
logger.debug(self.tr("Analysing the shape for CW direction Nr: %s" % self.nr))
if self.isDirectionOfGeosCCW(self.geos):
self.reverse()
logger.debug(self.tr("Had to reverse the shape to be CW"))
self.cw = True
def setNearestStPoint(self, stPoint):
if self.closed:
logger.debug(self.tr("Clicked Point: %s" % stPoint))
start = self.get_start_end_points(True)
logger.debug(self.tr("Old Start Point: %s" % start))
min_geo_nr, _ = min(enumerate(self.geos.abs_iter()),
key=lambda geo: geo[1].get_start_end_points(True).distance(stPoint))
# Overwrite the geometries in changed order.
self.geos = Geos(self.geos[min_geo_nr:] + self.geos[:min_geo_nr])
start = self.get_start_end_points(True)
logger.debug(self.tr("New Start Point: %s" % start))
def reverse(self, geos=None):
if not geos:
geos = self.geos
geos.reverse()
for geo in geos:
geo.reverse()
self.cw = not self.cw
def switch_cut_cor(self):
"""
Switches the cutter direction between 41 and 42.
G41 = Tool radius compensation left.
G42 = Tool radius compensation right
"""
if self.cut_cor == 41:
self.cut_cor = 42
elif self.cut_cor == 42:
self.cut_cor = 41
def append(self, geo):
geo.make_abs_geo(self.parentEntity)
self.geos.append(geo)
def get_start_end_points_physical(self, start_point=None, angles=None):
"""
With multiple slices end point could be start point.
e.g. useful for the optimal rout etc
"""
if start_point or self.closed:
return self.get_start_end_points(start_point, angles)
else:
max_slice = max(self.axis3_slice_depth, self.axis3_mill_depth - self.axis3_start_mill_depth)
if max_slice == 0:
end_should_be_start = True
else:
end_should_be_start = (self.axis3_start_mill_depth - self.axis3_mill_depth) // max_slice % 2 == 0
if not end_should_be_start:
return self.get_start_end_points(start_point, angles)
else:
start_stuff = self.get_start_end_points(True, angles)
if angles is False:
end_stuff = start_stuff[0], -start_stuff[1]
else:
end_stuff = start_stuff
if start_point is None:
return start_stuff, end_stuff
else:
return end_stuff
def get_start_end_points(self, start_point=None, angles=None):
if start_point is None:
return (self.geos.abs_el(0).get_start_end_points(True, angles),
self.geos.abs_el(-1).get_start_end_points(False, angles))
elif start_point:
return self.geos.abs_el(0).get_start_end_points(True, angles)
else:
return self.geos.abs_el(-1).get_start_end_points(False, angles)
def make_path(self, drawHorLine, drawVerLine):
for geo in self.geos.abs_iter():
drawVerLine(self, geo.get_start_end_points(True))
geo.make_path(self, drawHorLine)
if self.topLeft is None:
self.topLeft = deepcopy(geo.topLeft)
self.bottomRight = deepcopy(geo.bottomRight)
else:
self.topLeft.detTopLeft(geo.topLeft)
self.bottomRight.detBottomRight(geo.bottomRight)
if not self.closed:
drawVerLine(self, geo.get_start_end_points(False))
def isHit(self, xy, tol):
if self.topLeft.x - tol <= xy.x <= self.bottomRight.x + tol\
and self.bottomRight.y - tol <= xy.y <= self.topLeft.y + tol:
for geo in self.geos.abs_iter():
if geo.isHit(self, xy, tol):
return True
return False
def Write_GCode_for_geo(self, geo, PostPro):
# Used to remove zero length geos. If not, arcs can become a full circle
post_dec = PostPro.vars.Number_Format["post_decimals"]
if isinstance(geo, HoleGeo) or\
round(geo.Ps.x, post_dec) != round(geo.Pe.x, post_dec) or\
round(geo.Ps.y, post_dec) != round(geo.Pe.y, post_dec) or\
isinstance(geo, ArcGeo) and geo.length > 0.5 * 0.1 ** post_dec * pi:
return geo.Write_GCode(PostPro)
else:
return ""
def Write_GCode(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
if g.config.machine_type == 'drag_knife':
return self.Write_GCode_Drag_Knife(PostPro)
elif g.config.machine_type == 'laser_cutter':
return self.Write_GCode_Laser_Cutter(PostPro)
prv_cut_cor = self.cut_cor
if self.cut_cor != 40 and not g.config.vars.Cutter_Compensation["done_by_machine"]:
self.cut_cor = 40
new_geos = Geos(self.stmove.geos[1:])
else:
new_geos = self.geos
new_geos = PostPro.breaks.getNewGeos(new_geos)
# initialisation of the string
exstr = ""
# Get the mill settings defined in the GUI
safe_retract_depth = self.parentLayer.axis3_retract
safe_margin = self.parentLayer.axis3_safe_margin
max_slice = self.axis3_slice_depth
workpiece_top_Z = self.axis3_start_mill_depth
# We want to mill the piece, even for the first pass, so remove one "slice"
initial_mill_depth = workpiece_top_Z - abs(max_slice)
depth = self.axis3_mill_depth
f_g1_plane = self.f_g1_plane
f_g1_depth = self.f_g1_depth
# Save the initial Cutter correction in a variable
has_reversed = False
# If the Output Format is DXF do not perform more then one cut.
if PostPro.vars.General["output_type"] == 'dxf':
depth = max_slice
if max_slice == 0:
logger.error(self.tr("ERROR: Z infeed depth is null!"))
if initial_mill_depth < depth:
logger.warning(self.tr(
"WARNING: initial mill depth (%i) is lower than end mill depth (%i). Using end mill depth as final depth.") % (
initial_mill_depth, depth))
# Do not cut below the depth.
initial_mill_depth = depth
mom_depth = initial_mill_depth
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Cutter radius compensation when G41 or G42 is on, AND cutter compensation option is set to be done outside the piece
if self.cut_cor != 40 and PostPro.vars.General["cc_outside_the_piece"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
exstr += PostPro.chg_feed_rate(f_g1_plane)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
exstr += PostPro.rap_pos_z(
workpiece_top_Z + abs(safe_margin)) # Compute the safe margin from the initial mill depth
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
# Cutter radius compensation when G41 or G42 is on, AND cutter compensation option is set to be done inside the piece
if self.cut_cor != 40 and not PostPro.vars.General["cc_outside_the_piece"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
# Write the geometries for the first cut
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turning the cutter radius compensation
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Numbers of loops
snr = 0
# Loops for the number of cuts
while mom_depth > depth and max_slice != 0.0:
snr += 1
mom_depth = mom_depth - abs(max_slice)
if mom_depth < depth:
mom_depth = depth
# Erneutes Eintauchen
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
# If it is not a closed contour
if not self.closed:
self.reverse(new_geos)
self.switch_cut_cor()
has_reversed = not has_reversed # switch the "reversed" state (in order to restore it at the end)
# If cutter radius compensation is turned on. Turn it off - because some interpreters cannot handle
# a switch
if self.cut_cor != 40 and not PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# If cutter correction is enabled
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.set_cut_cor(self.cut_cor)
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turning off the cutter radius compensation if needed
if self.cut_cor != 40 and PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Do the tool retraction
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(workpiece_top_Z + abs(safe_margin))
exstr += PostPro.rap_pos_z(safe_retract_depth)
# If cutter radius compensation is turned on.
if self.cut_cor != 40 and not PostPro.vars.General["cancel_cc_for_depth"]:
exstr += PostPro.deactivate_cut_cor()
# Initial value of direction restored if necessary
if has_reversed:
self.reverse(new_geos)
self.switch_cut_cor()
self.cut_cor = prv_cut_cor
# Add string to be added before the shape will be cut.
exstr += PostPro.write_post_shape_cut()
return exstr
def Write_GCode_Drag_Knife(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape. This function is used for
Drag Knife cutting machine only.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
# initialisation of the string
exstr = ""
# Get the mill settings defined in the GUI
safe_retract_depth = self.parentLayer.axis3_retract
safe_margin = self.parentLayer.axis3_safe_margin
workpiece_top_Z = self.axis3_start_mill_depth
f_g1_plane = self.f_g1_plane
f_g1_depth = self.f_g1_depth
"""
Cutting in slices is not supported for Swivel Knife tool. All is cut at once.
"""
mom_depth = self.axis3_mill_depth
drag_depth = self.axis3_slice_depth
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Move into workpiece and start cutting into Z
exstr += PostPro.rap_pos_z(
workpiece_top_Z + abs(safe_margin)) # Compute the safe margin from the initial mill depth
exstr += PostPro.chg_feed_rate(f_g1_depth)
# Write the geometries for the first cut
if isinstance(self.stmove.geos.abs_el(1), ArcGeo):
if self.stmove.geos.abs_el(1).drag:
exstr += PostPro.lin_pol_z(drag_depth)
drag = True
else:
exstr += PostPro.lin_pol_z(mom_depth)
drag = False
else:
exstr += PostPro.lin_pol_z(mom_depth)
drag = False
exstr += PostPro.chg_feed_rate(f_g1_plane)
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
for geo in Geos(self.stmove.geos[2:]).abs_iter():
if isinstance(geo, ArcGeo):
if geo.drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(drag_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = True
elif drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = False
elif drag:
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(mom_depth)
exstr += PostPro.chg_feed_rate(f_g1_plane)
drag = False
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Do the tool retraction
exstr += PostPro.chg_feed_rate(f_g1_depth)
exstr += PostPro.lin_pol_z(workpiece_top_Z + abs(safe_margin))
exstr += PostPro.rap_pos_z(safe_retract_depth)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_post_shape_cut()
return exstr
def Write_GCode_Laser_Cutter(self, PostPro):
"""
This method returns the string to be exported for this shape, including
the defined start and end move of the shape.
@param PostPro: this is the Postprocessor class including the methods
to export
"""
# Save prior machine state.
prv_cut_cor = self.cut_cor
if self.cut_cor != 40 and not g.config.vars.Cutter_Compensation["done_by_machine"]:
new_geos = Geos(self.stmove.geos[1:])
else:
new_geos = self.geos
new_geos = PostPro.breaks.getNewGeos(new_geos)
# Initialize string to hold all the GCode.
exstr = ""
laser_disable_depth = 0
laser_enable_depth = -0.01
# Save the initial Cutter correction in a variable
has_reversed = False
# Move the tool to the start.
exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)
exstr += PostPro.rap_pos_z(laser_disable_depth)
# Add string to be added before the shape will be cut.
exstr += PostPro.write_pre_shape_cut()
# Enable Laser by Restore Z to (non-negative value) 0
exstr += PostPro.rap_pos_z(laser_enable_depth)
# Set the feed rate.
exstr += PostPro.chg_feed_rate(self.f_g1_plane)
if self.cut_cor != 40 and g.config.vars.Cutter_Compensation["done_by_machine"]:
# Enable Cutter Compensation at the start of all shapes.
exstr += PostPro.set_cut_cor(self.cut_cor)
# Apply Lead-In move for all shapes.
exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)
exstr += self.stmove.geos.abs_el(2).Write_GCode(PostPro)
# Set the desired laser power.
exstr += PostPro.chg_laser_power(self.laser_power)
# Set the desired laser pulses per mm.
exstr += PostPro.chg_laser_pulses_per_mm(self.laser_pulses_per_mm)
# Write the geometries for the cut.
for geo in new_geos.abs_iter():
exstr += self.Write_GCode_for_geo(geo, PostPro)
# Turn off the cutter radius compensation if enabled.
if self.cut_cor != 40 and g.config.vars.Cutter_Compensation["done_by_machine"]:
exstr += PostPro.deactivate_cut_cor()
# Disable Laser by Restore Z to (non-negative value) 0
exstr += PostPro.rap_pos_z(laser_disable_depth)
# Initial value of direction restored if necessary
if has_reversed:
self.reverse(new_geos)
self.switch_cut_cor()
self.cut_cor = prv_cut_cor
# Add string to be added after cutting all shapes..
exstr += PostPro.write_post_shape_cut()
return exstr
class Geos(list):
def __init__(self, *args):
list.__init__(self, *args)
def abs_iter(self):
for geo in list.__iter__(self):
yield geo.abs_geo if geo.abs_geo else geo
else:
raise StopIteration()
def abs_el(self, element):
return self[element].abs_geo if self[element].abs_geo else self[element]
| gpl-3.0 | 24,751,064,062,946,144 | 36.975369 | 127 | 0.56686 | false |
tomaaron/raiden | raiden/tests/utils/tester.py | 1 | 4931 | # -*- coding: utf-8 -*-
from ethereum import tester
from ethereum.utils import decode_hex
from raiden.blockchain.abi import (
CHANNEL_MANAGER_ABI,
NETTING_CHANNEL_ABI,
HUMAN_TOKEN_ABI,
REGISTRY_ABI,
)
from raiden.channel import Channel, ChannelEndState
from raiden.utils import privatekey_to_address
class InvalidKey(str):
# using an invalid key as the proxies default_key to force the user to set
# `sender`. The reason for this is that too many tests were mixing the
# wrong key, the alternative was to instantiate a proxy per key, which was
# adding to much code-bloat, using an invalid key we effectvelly disable
# the "feature" of the ABIContract to use a default key, making all the
# calls explicit, this is intentional!
def __getitem__(self, key):
# please provide an explicit key while testing with tester
raise Exception('sender key was not set')
INVALID_KEY = InvalidKey('default_key_was_not_set')
def create_tokenproxy(tester_state, tester_token_address, log_listener):
translator = tester.ContractTranslator(HUMAN_TOKEN_ABI)
token_abi = tester.ABIContract(
tester_state,
translator,
tester_token_address,
log_listener=log_listener,
default_key=INVALID_KEY,
)
return token_abi
def create_registryproxy(tester_state, tester_registry_address, log_listener):
translator = tester.ContractTranslator(REGISTRY_ABI)
registry_abi = tester.ABIContract(
tester_state,
translator,
tester_registry_address,
log_listener=log_listener,
default_key=INVALID_KEY,
)
return registry_abi
def create_channelmanager_proxy(tester_state, tester_channelmanager_address, log_listener):
translator = tester.ContractTranslator(CHANNEL_MANAGER_ABI)
channel_manager_abi = tester.ABIContract(
tester_state,
translator,
tester_channelmanager_address,
log_listener=log_listener,
default_key=INVALID_KEY,
)
return channel_manager_abi
def create_nettingchannel_proxy(tester_state, tester_nettingchannel_address, log_listener):
translator = tester.ContractTranslator(NETTING_CHANNEL_ABI)
netting_channel_abi = tester.ABIContract(
tester_state,
translator,
tester_nettingchannel_address,
log_listener=log_listener,
default_key=INVALID_KEY,
)
return netting_channel_abi
def channel_from_nettingcontract(our_key, netting_contract, external_state, reveal_timeout):
""" Create a `channel.Channel` for the `netting_contract`.
Use this to make sure that both implementations (the smart contract and the
python code) work in tandem.
"""
our_address = privatekey_to_address(our_key)
asset_address_hex = netting_contract.assetAddress(sender=our_key)
settle_timeout = netting_contract.settleTimeout(sender=our_key)
address_balance = netting_contract.addressAndBalance(sender=our_key)
address1_hex, balance1, address2_hex, balance2 = address_balance
asset_address = decode_hex(asset_address_hex)
address1 = decode_hex(address1_hex)
address2 = decode_hex(address2_hex)
if our_address == address1:
our_balance = balance1
partner_address = address2
partner_balance = balance2
else:
our_balance = balance2
partner_address = address1
partner_balance = balance1
our_state = ChannelEndState(
our_address,
our_balance,
external_state.opened_block,
)
partner_state = ChannelEndState(
partner_address,
partner_balance,
external_state.opened_block,
)
channel = Channel(
our_state,
partner_state,
external_state,
asset_address,
reveal_timeout,
settle_timeout,
)
return channel
def new_channelmanager(our_key, tester_state, log_listener, tester_registry, tester_token):
channel_manager_address = tester_registry.addAsset(
tester_token.address,
sender=our_key,
)
tester_state.mine(number_of_blocks=1)
channelmanager = create_channelmanager_proxy(
tester_state,
channel_manager_address,
log_listener,
)
return channelmanager
def new_nettingcontract(our_key, partner_key, tester_state, log_listener,
channelmanager, settle_timeout):
netting_channel_address0_hex = channelmanager.newChannel(
privatekey_to_address(partner_key),
settle_timeout,
sender=our_key,
)
tester_state.mine(number_of_blocks=1)
nettingchannel_translator = tester.ContractTranslator(NETTING_CHANNEL_ABI)
nettingchannel = tester.ABIContract(
tester_state,
nettingchannel_translator,
netting_channel_address0_hex,
log_listener=log_listener,
default_key=INVALID_KEY,
)
return nettingchannel
| mit | 5,570,702,080,133,079,000 | 29.251534 | 92 | 0.683634 | false |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/libsvm/ops/gen_libsvm_ops.py | 1 | 6994 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: libsvm_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
_decode_libsvm_outputs = ["label", "feature_indices", "feature_values",
"feature_shape"]
_DecodeLibsvmOutput = _collections.namedtuple(
"DecodeLibsvm", _decode_libsvm_outputs)
@tf_export('decode_libsvm')
def decode_libsvm(input, num_features, dtype=_dtypes.float32, label_dtype=_dtypes.int64, name=None):
r"""Convert LibSVM input to tensors. The output consists of
a label and a feature tensor. The shape of the label tensor
is the same as input and the shape of the feature tensor is
`[input_shape, num_features]`.
Args:
input: A `Tensor` of type `string`. Each string is a record in the LibSVM.
num_features: An `int` that is `>= 1`. The number of features.
dtype: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`.
label_dtype: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (label, feature_indices, feature_values, feature_shape).
label: A `Tensor` of type `label_dtype`. A tensor of the same shape as input.
feature_indices: A `Tensor` of type `int64`. A 2-D int64 tensor of dense_shape [N, ndims].
feature_values: A `Tensor` of type `dtype`. A 1-D tensor of any type and dense_shape [N].
feature_shape: A `Tensor` of type `int64`. A 1-D int64 tensor of dense_shape [ndims].
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
num_features = _execute.make_int(num_features, "num_features")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
if label_dtype is None:
label_dtype = _dtypes.int64
label_dtype = _execute.make_type(label_dtype, "label_dtype")
_, _, _op = _op_def_lib._apply_op_helper(
"DecodeLibsvm", input=input, num_features=num_features, dtype=dtype,
label_dtype=label_dtype, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("dtype", _op.get_attr("dtype"), "label_dtype",
_op.get_attr("label_dtype"), "num_features",
_op.get_attr("num_features"))
_execute.record_gradient(
"DecodeLibsvm", _inputs_flat, _attrs, _result, name)
_result = _DecodeLibsvmOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "DecodeLibsvm",
name, _ctx._post_execution_callbacks, input, "dtype", dtype,
"label_dtype", label_dtype, "num_features", num_features)
_result = _DecodeLibsvmOutput._make(_result)
return _result
except _core._FallbackException:
return decode_libsvm_eager_fallback(
input, dtype=dtype, label_dtype=label_dtype,
num_features=num_features, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def decode_libsvm_eager_fallback(input, num_features, dtype=_dtypes.float32, label_dtype=_dtypes.int64, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function decode_libsvm
"""
_ctx = ctx if ctx else _context.context()
num_features = _execute.make_int(num_features, "num_features")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
if label_dtype is None:
label_dtype = _dtypes.int64
label_dtype = _execute.make_type(label_dtype, "label_dtype")
input = _ops.convert_to_tensor(input, _dtypes.string)
_inputs_flat = [input]
_attrs = ("dtype", dtype, "label_dtype", label_dtype, "num_features",
num_features)
_result = _execute.execute(b"DecodeLibsvm", 4, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"DecodeLibsvm", _inputs_flat, _attrs, _result, name)
_result = _DecodeLibsvmOutput._make(_result)
return _result
_ops.RegisterShape("DecodeLibsvm")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "DecodeLibsvm"
# input_arg {
# name: "input"
# type: DT_STRING
# }
# output_arg {
# name: "label"
# type_attr: "label_dtype"
# }
# output_arg {
# name: "feature_indices"
# type: DT_INT64
# }
# output_arg {
# name: "feature_values"
# type_attr: "dtype"
# }
# output_arg {
# name: "feature_shape"
# type: DT_INT64
# }
# attr {
# name: "dtype"
# type: "type"
# default_value {
# type: DT_FLOAT
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "label_dtype"
# type: "type"
# default_value {
# type: DT_INT64
# }
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT32
# type: DT_INT64
# }
# }
# }
# attr {
# name: "num_features"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\311\001\n\014DecodeLibsvm\022\t\n\005input\030\007\032\024\n\005label\"\013label_dtype\032\023\n\017feature_indices\030\t\032\027\n\016feature_values\"\005dtype\032\021\n\rfeature_shape\030\t\"\033\n\005dtype\022\004type\032\0020\001:\010\n\0062\004\001\002\003\t\"!\n\013label_dtype\022\004type\032\0020\t:\010\n\0062\004\001\002\003\t\"\027\n\014num_features\022\003int(\0010\001")
| mit | 5,052,128,959,870,387,000 | 36.005291 | 420 | 0.659708 | false |
demvher/pythondotorg | jobs/views.py | 1 | 8432 | from braces.views import LoginRequiredMixin, GroupRequiredMixin
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, TemplateView, View
from .forms import JobForm
from .models import Job, JobType, JobCategory
class JobBoardAdminRequiredMixin(GroupRequiredMixin):
group_required = "Job Board Admin"
class JobMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
active_locations = Job.objects.visible().distinct(
'location_slug'
).order_by(
'location_slug',
)
context.update({
'jobs_count': Job.objects.visible().count(),
'active_types': JobType.objects.with_active_jobs(),
'active_categories': JobCategory.objects.with_active_jobs(),
'active_locations': active_locations,
})
return context
class JobList(JobMixin, ListView):
model = Job
paginate_by = 25
job_list_view = True
def get_queryset(self):
return super().get_queryset().visible().select_related()
class JobListMine(JobMixin, ListView):
model = Job
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
if self.request.user.is_authenticated():
q = Q(creator=self.request.user)
else:
raise Http404
return queryset.filter(q)
class JobTypeMenu:
def job_type_view(self):
return True
class JobCategoryMenu:
def job_category_view(self):
return True
class JobLocationMenu:
def job_location_view(self):
return True
class JobListType(JobTypeMenu, JobList):
template_name = 'jobs/job_type_list.html'
def get_queryset(self):
return super().get_queryset().filter(job_types__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_type'] = JobType.objects.get(slug=self.kwargs['slug'])
return context
class JobListCategory(JobCategoryMenu, JobList):
template_name = 'jobs/job_category_list.html'
def get_queryset(self):
return super().get_queryset().filter(category__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_category'] = JobCategory.objects.get(slug=self.kwargs['slug'])
return context
class JobListLocation(JobLocationMenu, JobList):
template_name = 'jobs/job_location_list.html'
def get_queryset(self):
return super().get_queryset().filter(location_slug=self.kwargs['slug'])
class JobTypes(JobTypeMenu, JobMixin, ListView):
""" View to simply list JobType instances that have current jobs """
template_name = "jobs/job_types.html"
queryset = JobType.objects.with_active_jobs().order_by('name')
context_object_name = 'types'
class JobCategories(JobCategoryMenu, JobMixin, ListView):
""" View to simply list JobCategory instances that have current jobs """
template_name = "jobs/job_categories.html"
queryset = JobCategory.objects.with_active_jobs().order_by('name')
context_object_name = 'categories'
class JobLocations(JobLocationMenu, JobMixin, TemplateView):
""" View to simply list distinct Countries that have current jobs """
template_name = "jobs/job_locations.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['jobs'] = Job.objects.visible().distinct(
'country', 'city'
).order_by(
'country', 'city'
)
return context
class JobReview(LoginRequiredMixin, JobBoardAdminRequiredMixin, JobMixin, ListView):
template_name = 'jobs/job_review.html'
paginate_by = 20
def get_queryset(self):
return Job.objects.review()
def post(self, request):
try:
job = Job.objects.get(id=request.POST['job_id'])
action = request.POST['action']
except (KeyError, Job.DoesNotExist):
return redirect('jobs:job_review')
if action == 'approve':
job.approve(request.user)
messages.add_message(self.request, messages.SUCCESS, "'%s' approved." % job)
elif action == 'reject':
job.reject(request.user)
messages.add_message(self.request, messages.SUCCESS, "'%s' rejected." % job)
elif action == 'remove':
job.status = Job.STATUS_REMOVED
job.save()
messages.add_message(self.request, messages.SUCCESS, "'%s' removed." % job)
elif action == 'archive':
job.status = Job.STATUS_ARCHIVED
job.save()
messages.add_message(self.request, messages.SUCCESS, "'%s' removed." % job)
return redirect('jobs:job_review')
class JobDetail(JobMixin, DetailView):
model = Job
def get_queryset(self):
""" Show only approved jobs to the public, staff can see all jobs """
qs = Job.objects.select_related()
if self.request.user.is_staff:
return qs
else:
return qs.visible()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
category_jobs=self.object.category.jobs.select_related('company__name')[:5],
user_can_edit=(self.object.creator == self.request.user)
)
ctx.update(kwargs)
return ctx
class JobDetailReview(LoginRequiredMixin, JobBoardAdminRequiredMixin, JobDetail):
def get_queryset(self):
""" Only staff and creator can review """
if self.request.user.is_staff:
return Job.objects.select_related()
else:
raise Http404()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
user_can_edit=(
self.object.creator == self.request.user
or self.request.user.is_staff
),
under_review=True,
)
ctx.update(kwargs)
return ctx
class JobCreate(JobMixin, CreateView):
model = Job
form_class = JobForm
def get_success_url(self):
return reverse('jobs:job_thanks')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
if self.request.user.is_authenticated():
kwargs['initial'] = {'email': self.request.user.email}
return kwargs
def form_valid(self, form):
""" set the creator to the current user """
# Associate Job to user if they are logged in
if self.request.user.is_authenticated():
form.instance.creator = self.request.user
return super().form_valid(form)
class JobEdit(JobMixin, UpdateView):
model = Job
form_class = JobForm
def get_queryset(self):
if not self.request.user.is_authenticated():
raise Http404
if self.request.user.is_staff:
return super().get_queryset()
return self.request.user.jobs_job_creator.all()
def form_valid(self, form):
""" set last_modified_by to the current user """
form.instance.last_modified_by = self.request.user
return super().form_valid(form)
def get_context_data(self, **kwargs):
ctx = super().get_context_data(
form_action='update',
)
ctx.update(kwargs)
return ctx
class JobChangeStatus(LoginRequiredMixin, JobMixin, View):
"""
Abstract class to change a job's status; see the concrete implentations below.
"""
def post(self, request, pk):
job = get_object_or_404(self.request.user.jobs_job_creator, pk=pk)
job.status = self.new_status
job.save()
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return redirect('job_detail', job.id)
class JobPublish(JobChangeStatus):
new_status = Job.STATUS_APPROVED
success_message = 'Your job listing has been published.'
class JobArchive(JobChangeStatus):
new_status = Job.STATUS_ARCHIVED
success_message = 'Your job listing has been archived and is no longer public.'
| apache-2.0 | -7,756,300,799,939,174,000 | 29.330935 | 97 | 0.635674 | false |
rackerlabs/heat-pyrax | tests/unit/test_cloud_cdn.py | 1 | 2706 | import unittest
import mock
from pyrax.cloudcdn import CloudCDNClient
from pyrax.cloudcdn import CloudCDNFlavor
from pyrax.cloudcdn import CloudCDNFlavorManager
from pyrax.cloudcdn import CloudCDNService
from pyrax.cloudcdn import CloudCDNServiceManager
class CloudCDNTest(unittest.TestCase):
@mock.patch("pyrax.client.BaseClient.method_get")
def test_ping(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
sot.ping()
mock_get.assert_called_with("/ping")
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.list")
def test_list_flavors(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_flavors()
mock_list.assert_called_once_with()
@mock.patch("pyrax.cloudcdn.CloudCDNFlavorManager.get")
def test_get_flavor(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
flavor = "flavor"
sot.get_flavor(flavor)
mock_get.assert_called_once_with(flavor)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.list")
def test_list_services(self, mock_list):
sot = CloudCDNClient(mock.MagicMock())
sot.list_services()
mock_list.assert_called_with(limit=None, marker=None)
kwargs = {"limit": 1, "marker": 2}
sot.list_services(**kwargs)
mock_list.assert_called_with(**kwargs)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.get")
def test_get_service(self, mock_get):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.get_service(service)
mock_get.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.create")
def test_create_service(self, mock_create):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3, 4, 5, 6, 7)
sot.create_service(*args)
mock_create.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.patch")
def test_patch_service(self, mock_patch):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2)
sot.patch_service(*args)
mock_patch.assert_called_once_with(*args)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete")
def test_delete_service(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
service = "service"
sot.delete_service(service)
mock_delete.assert_called_once_with(service)
@mock.patch("pyrax.cloudcdn.CloudCDNServiceManager.delete_assets")
def test_delete_assets(self, mock_delete):
sot = CloudCDNClient(mock.MagicMock())
args = (1, 2, 3)
sot.delete_assets(*args)
mock_delete.assert_called_once_with(*args)
| apache-2.0 | -5,282,767,086,842,360,000 | 35.08 | 70 | 0.673688 | false |
Winnetou/ManuTironis | utils/ngram_getter.py | 1 | 1156 | # this one doesn't crawl the web, it just and only
# takes xml from
import os
import psycopg2
import uni_to_beta
import xml
import xmlrpc
connect_manu = psycopg2.connect("dbname=manu_tironis user=quellen password=quellen")
manu_cursor = connect_manu.cursor()
def get_raw_text():
pass
def translate(raw_text):
uni_to_beta.st(raw_text)
pass
def get_trigrams(translated):
"""
:param translated: str
:return:
"""
tokens = translated.split()
trigrams = []
for index, word in enumerate(tokens[:-2]):
trigram = " ".join(tokens[index], tokens[index + 1], tokens[index + 2])
trigrams.append(trigram)
# and now save /the planet:/ by calling bulk save
# FIXME FINISH ME!!
manu_cursor.executemany('insert into trigrams values (%s')
def main():
for directory in os.listdir('here'):
for subdir in directory:
for file in subdir:
if file.endswith("_gk.xml"):
xml = file.open.read()
raw_text = get_raw_text(xml)
translated = translate(raw_text)
get_trigrams(translated)
| cc0-1.0 | -7,046,498,592,810,975,000 | 24.130435 | 84 | 0.605536 | false |
docwalter/py3status | py3status/modules/uptime.py | 1 | 4335 | # -*- coding: utf-8 -*-
"""
Display system uptime.
Configuration parameters:
format: display format for this module
(default 'up {days} days {hours} hours {minutes} minutes')
Format placeholders:
{decades} decades
{years} years
{weeks} weeks
{days} days
{hours} hours
{minutes} minutes
{seconds} seconds
Note: If you don't use one of the placeholders, the value will be carried over
to the next unit. For example, given an uptime of 1h 30min:
If you use {minutes} as your only placeholder, then its value will be 90.
If you use {hours} and {minutes}, then its values will be 1 and 30, respectively.
Examples:
```
# show uptime without zeroes
uptime {
format = 'up [\?if=weeks {weeks} weeks ][\?if=days {days} days ]
[\?if=hours {hours} hours ][\?if=minutes {minutes} minutes ]'
}
# show uptime in multiple formats using group module
group uptime {
format = "up {output}"
uptime {
format = '[\?if=weeks {weeks} weeks ][\?if=days {days} days ]
[\?if=hours {hours} hours ][\?if=minutes {minutes} minutes]'
}
uptime {
format = '[\?if=weeks {weeks}w ][\?if=days {days}d ]
[\?if=hours {hours}h ][\?if=minutes {minutes}m]'
}
uptime {
format = '[\?if=days {days}, ][\?if=hours {hours}:]
[\?if=minutes {minutes:02d}]'
}
}
```
@author Alexis "Horgix" Chotard <[email protected]>, tobes, lasers
@license BSD
SAMPLE OUTPUT
{'full_text': 'up 1 days 18 hours 20 minutes'}
"""
from time import time
class Py3status:
"""
"""
# available configuration parameters
format = 'up {days} days {hours} hours {minutes} minutes'
def post_config_hook(self):
self._decades = self.py3.format_contains(self.format, 'decades')
self._years = self.py3.format_contains(self.format, 'years')
self._weeks = self.py3.format_contains(self.format, 'weeks')
self._days = self.py3.format_contains(self.format, 'days')
self._hours = self.py3.format_contains(self.format, 'hours')
self._minutes = self.py3.format_contains(self.format, 'minutes')
self._seconds = self.py3.format_contains(self.format, 'seconds')
def uptime(self):
# Units will be computed from bare seconds since timedelta only
# provides .days and .seconds anyway. Getting rid of the seconds
# part. Keeping the floating point part would make divmod return
# floats, and thus would require days/hours/minutes/seconds to be
# casted to int before formatting, which would be dirty to handle
# since we can't cast None to int.
with open('/proc/uptime', 'r') as f:
up = int(float(f.readline().split()[0]))
offset = time() - up
cache_timeout = decades = years = weeks = days = hours = minutes = seconds = 0
# Decades
if self._decades:
decades, up = divmod(up, 315360000) # 10 years -> decade
cache_timeout = 315360000
# Years
if self._years:
years, up = divmod(up, 31536000) # 365 days -> year
cache_timeout = 31536000
# Weeks
if self._weeks:
weeks, up = divmod(up, 604800) # 7 days -> week
cache_timeout = 604800
# Days
if self._days:
days, up = divmod(up, 86400) # 24 hours -> day
cache_timeout = 86400
# Hours
if self._hours:
hours, up = divmod(up, 3600) # 60 minutes -> hour
cache_timeout = 3600
# Minutes
if self._minutes:
minutes, up = divmod(up, 60) # 60 seconds -> minute
cache_timeout = 60
# Seconds
if self._seconds:
seconds = up # 1000000000 nanoseconds -> second
cache_timeout = 1
uptime = self.py3.safe_format(self.format, dict(
decades=decades, years=years, weeks=weeks, days=days,
hours=hours, minutes=minutes, seconds=seconds))
return {
'cached_until': self.py3.time_in(sync_to=cache_timeout, offset=offset),
'full_text': uptime
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | -947,488,740,994,104,300 | 32.091603 | 86 | 0.58985 | false |
miracle2k/onkyo-eiscp | setup.py | 1 | 1149 | #!/usr/bin/env python
# coding: utf8
from setuptools import setup, find_packages
# Get long_description from README
import os
here = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(here, 'README.rst'))
long_description = f.read().strip()
f.close()
setup(
name='onkyo-eiscp',
version='1.2.8',
url='https://github.com/miracle2k/onkyo-eiscp',
license='MIT',
author='Michael Elsdörfer',
author_email='[email protected]',
description='Control Onkyo receivers over ethernet.',
long_description=long_description,
packages = find_packages(exclude=('tests*',)),
entry_points="""[console_scripts]\nonkyo = eiscp.script:run\n""",
install_requires=['docopt>=0.4.1', 'netifaces', 'xmltodict>=0.12.0'],
platforms='any',
classifiers=[
'Topic :: System :: Networking',
'Topic :: Games/Entertainment',
'Topic :: Multimedia',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
| mit | 784,736,929,684,786,300 | 30.888889 | 73 | 0.641986 | false |
abztrakt/labtracker | Machine/migrations/0006_auto__add_field_item_unusable__chg_field_item_mac3__chg_field_item_mac.py | 1 | 12585 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Item.unusable'
db.add_column('Machine_item', 'unusable', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Changing field 'Item.mac3'
db.alter_column('Machine_item', 'mac3', self.gf('Machine.models.MacField')())
# Changing field 'Item.mac2'
db.alter_column('Machine_item', 'mac2', self.gf('Machine.models.MacField')())
# Changing field 'Item.mac1'
db.alter_column('Machine_item', 'mac1', self.gf('Machine.models.MacField')())
def backwards(self, orm):
# Deleting field 'Item.unusable'
db.delete_column('Machine_item', 'unusable')
# Changing field 'Item.mac3'
db.alter_column('Machine_item', 'mac3', self.gf('django.db.models.fields.CharField')(max_length=17))
# Changing field 'Item.mac2'
db.alter_column('Machine_item', 'mac2', self.gf('django.db.models.fields.CharField')(max_length=17))
# Changing field 'Item.mac1'
db.alter_column('Machine_item', 'mac1', self.gf('django.db.models.fields.CharField')(max_length=17))
models = {
'LabtrackerCore.group': {
'Meta': {'object_name': 'Group'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}),
'group_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']", 'null': 'True', 'blank': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['LabtrackerCore.Item']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.inventorytype': {
'Meta': {'object_name': 'InventoryType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}),
'inv_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'namespace': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.item': {
'Meta': {'object_name': 'Item'},
'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']"}),
'item_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'LabtrackerCore.labuser': {
'Meta': {'object_name': 'LabUser'},
'accesses': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
},
'Machine.contact': {
'Meta': {'object_name': 'Contact'},
'contact_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'Machine.group': {
'Meta': {'object_name': 'Group', '_ormbases': ['LabtrackerCore.Group']},
'casting_server': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Group']", 'unique': 'True', 'primary_key': 'True'}),
'gateway': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_lab': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'Machine.history': {
'Meta': {'object_name': 'History'},
'login_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Item']"}),
'mh_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['Machine.Status']", 'null': 'True', 'blank': 'True'}),
'session_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.LabUser']"})
},
'Machine.item': {
'Meta': {'object_name': 'Item', '_ormbases': ['LabtrackerCore.Item']},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Item']", 'unique': 'True', 'primary_key': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Location']"}),
'mac1': ('Machine.models.MacField', [], {}),
'mac2': ('Machine.models.MacField', [], {'blank': 'True'}),
'mac3': ('Machine.models.MacField', [], {'blank': 'True'}),
'manu_tag': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'machine_status'", 'symmetrical': 'False', 'to': "orm['Machine.Status']"}),
'stf_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Type']"}),
'unusable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uw_tag': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wall_port': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'warranty_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'Machine.location': {
'Meta': {'object_name': 'Location'},
'building': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '600'}),
'floor': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ml_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'room': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'usable_threshold': ('django.db.models.fields.IntegerField', [], {'default': '95'})
},
'Machine.platform': {
'Meta': {'object_name': 'Platform'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'platform_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'Machine.status': {
'Meta': {'unique_together': "(('ms_id', 'name'),)", 'object_name': 'Status'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'ms_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'Machine.type': {
'Meta': {'object_name': 'Type'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'mt_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Platform']"}),
'specs': ('django.db.models.fields.TextField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Machine']
| apache-2.0 | 8,021,627,425,522,244,000 | 68.916667 | 182 | 0.548669 | false |
opennode/waldur-mastermind | src/waldur_mastermind/marketplace_checklist/admin.py | 1 | 1428 | from django.contrib import admin
from import_export import admin as import_export_admin
from modeltranslation import admin as modeltranslation_admin
from . import models
from .import_export_resources import ChecklistResource
class CategoryAdmin(import_export_admin.ImportExportModelAdmin):
fields = ('icon', 'name', 'description')
class QuestionInline(modeltranslation_admin.TranslationStackedInline):
model = models.Question
fields = ('order', 'description', 'solution', 'correct_answer', 'category', 'image')
class ChecklistCustomerRoleInline(admin.StackedInline):
model = models.ChecklistCustomerRole
fields = ('role',)
class ChecklistProjectRoleInline(admin.StackedInline):
model = models.ChecklistProjectRole
fields = ('role',)
class ChecklistAdmin(
import_export_admin.ImportExportMixin, modeltranslation_admin.TranslationAdmin
):
inlines = [QuestionInline, ChecklistCustomerRoleInline, ChecklistProjectRoleInline]
list_display = ('name', 'description', 'category', 'uuid')
list_filter = ('category',)
fields = ('name', 'description', 'category')
resource_class = ChecklistResource
class AnswerAdmin(admin.ModelAdmin):
list_display = ('user', 'question', 'value')
list_filter = ('question',)
admin.site.register(models.Checklist, ChecklistAdmin)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Answer, AnswerAdmin)
| mit | 3,977,542,510,639,444,000 | 30.043478 | 88 | 0.757003 | false |
cylc/cylc | cylc/flow/wallclock.py | 1 | 10726 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Wall clock related utilities."""
from calendar import timegm
from datetime import datetime, timedelta
from metomi.isodatetime.timezone import (
get_local_time_zone_format, get_local_time_zone, TimeZoneFormatMode)
DATE_TIME_FORMAT_BASIC = "%Y%m%dT%H%M%S"
DATE_TIME_FORMAT_BASIC_SUB_SECOND = "%Y%m%dT%H%M%S.%f"
DATE_TIME_FORMAT_EXTENDED = "%Y-%m-%dT%H:%M:%S"
DATE_TIME_FORMAT_EXTENDED_SUB_SECOND = "%Y-%m-%dT%H:%M:%S.%f"
_FLAGS = {r'utc_mode': False}
RE_DATE_TIME_FORMAT_EXTENDED = (
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:Z|[+-][\d:]+)?")
TIME_FORMAT_BASIC = "%H%M%S"
TIME_FORMAT_BASIC_SUB_SECOND = "%H%M%S.%f"
TIME_FORMAT_EXTENDED = "%H:%M:%S"
TIME_FORMAT_EXTENDED_SUB_SECOND = "%H:%M:%S.%f"
TIME_ZONE_STRING_LOCAL_BASIC = get_local_time_zone_format(
TimeZoneFormatMode.reduced)
TIME_ZONE_STRING_LOCAL_EXTENDED = get_local_time_zone_format(
TimeZoneFormatMode.extended)
TIME_ZONE_STRING_UTC = "Z"
TIME_ZONE_UTC_UTC_OFFSET = (0, 0)
TIME_ZONE_LOCAL_UTC_OFFSET = get_local_time_zone()
TIME_ZONE_LOCAL_UTC_OFFSET_HOURS = TIME_ZONE_LOCAL_UTC_OFFSET[0]
TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES = TIME_ZONE_LOCAL_UTC_OFFSET[1]
TIME_ZONE_LOCAL_INFO = {
"hours": TIME_ZONE_LOCAL_UTC_OFFSET[0],
"minutes": TIME_ZONE_LOCAL_UTC_OFFSET[1],
"string_basic": TIME_ZONE_STRING_LOCAL_BASIC,
"string_extended": TIME_ZONE_STRING_LOCAL_EXTENDED
}
TIME_ZONE_UTC_INFO = {
"hours": TIME_ZONE_UTC_UTC_OFFSET[0],
"minutes": TIME_ZONE_UTC_UTC_OFFSET[1],
"string_basic": TIME_ZONE_STRING_UTC,
"string_extended": TIME_ZONE_STRING_UTC
}
PARSER = None
def get_utc_mode():
"""Return value of UTC mode."""
return _FLAGS['utc_mode']
def set_utc_mode(mode):
"""Set value of UTC mode."""
_FLAGS['utc_mode'] = bool(mode)
def now(override_use_utc=None):
"""Return a current-time datetime.datetime and a UTC timezone flag.
Keyword arguments:
override_use_utc (default None) - a boolean (or None) that, if
True, gives the date and time in UTC. If False, it gives the date
and time in the local time zone. If None, the _FLAGS['utc_mode'] boolean is
used.
"""
if override_use_utc or (override_use_utc is None and _FLAGS['utc_mode']):
return datetime.utcnow(), False
else:
return datetime.now(), True
def get_current_time_string(display_sub_seconds=False, override_use_utc=None,
use_basic_format=False):
"""Return a string representing the current system time.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
override_use_utc (default None) - a boolean (or None) that, if
True, switches on utc time zone reporting. If False, it switches
off utc time zone reporting (even if _FLAGS['utc_mode'] is True). If None,
the _FLAGS['utc_mode'] boolean is used.
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
"""
date_time, date_time_is_local = now(override_use_utc=override_use_utc)
return get_time_string(date_time, display_sub_seconds=display_sub_seconds,
override_use_utc=override_use_utc,
date_time_is_local=date_time_is_local,
use_basic_format=use_basic_format)
def get_time_string(date_time, display_sub_seconds=False,
override_use_utc=None, use_basic_format=False,
date_time_is_local=False, custom_time_zone_info=None):
"""Return a string representing the current system time.
Arguments:
date_time - a datetime.datetime object.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
override_use_utc (default None) - a boolean (or None) that, if
True, switches on utc time zone reporting. If False, it switches
off utc time zone reporting (even if _FLAGS['utc_mode'] is True). If None,
the _FLAGS['utc_mode'] boolean is used.
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
date_time_is_local - a boolean that, if True, indicates that
the date_time argument object is in the local time zone, not UTC.
custom_time_zone_info (default None) - a dictionary that enforces
a particular time zone. It looks like {"hours": _hours,
"minutes": _minutes, "string": _string} where _hours and _minutes
are the hours and minutes offset from UTC and _string is the string
to use as the time zone designator.
"""
time_zone_string = None
if custom_time_zone_info is not None:
custom_hours = custom_time_zone_info["hours"]
custom_minutes = custom_time_zone_info["minutes"]
if use_basic_format:
custom_string = custom_time_zone_info["string_basic"]
else:
custom_string = custom_time_zone_info["string_extended"]
if date_time_is_local:
date_time_hours = TIME_ZONE_LOCAL_UTC_OFFSET_HOURS
date_time_minutes = TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
else:
date_time_hours, date_time_minutes = (0, 0)
diff_hours = custom_hours - date_time_hours
diff_minutes = custom_minutes - date_time_minutes
date_time = date_time + timedelta(
hours=diff_hours, minutes=diff_minutes)
time_zone_string = custom_string
elif override_use_utc or (override_use_utc is None and _FLAGS['utc_mode']):
time_zone_string = TIME_ZONE_STRING_UTC
if date_time_is_local:
date_time = date_time - timedelta(
hours=TIME_ZONE_LOCAL_UTC_OFFSET_HOURS,
minutes=TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
)
else:
if use_basic_format:
time_zone_string = TIME_ZONE_STRING_LOCAL_BASIC
else:
time_zone_string = TIME_ZONE_STRING_LOCAL_EXTENDED
if not date_time_is_local:
diff_hours = TIME_ZONE_LOCAL_UTC_OFFSET_HOURS
diff_minutes = TIME_ZONE_LOCAL_UTC_OFFSET_MINUTES
date_time = date_time + timedelta(
hours=diff_hours, minutes=diff_minutes)
if use_basic_format:
date_time_format_string = DATE_TIME_FORMAT_BASIC
if display_sub_seconds:
date_time_format_string = DATE_TIME_FORMAT_BASIC_SUB_SECOND
else:
date_time_format_string = DATE_TIME_FORMAT_EXTENDED
if display_sub_seconds:
date_time_format_string = DATE_TIME_FORMAT_EXTENDED_SUB_SECOND
date_time_string = date_time.strftime(date_time_format_string)
return date_time_string + time_zone_string
def get_time_string_from_unix_time(unix_time, display_sub_seconds=False,
use_basic_format=False,
custom_time_zone_info=None):
"""Convert a unix timestamp into a local time zone datetime.datetime.
Arguments:
unix_time - an integer or float number of seconds since the Unix
epoch.
Keyword arguments:
display_sub_seconds (default False) - a boolean that, if True,
switches on microsecond reporting
use_basic_format (default False) - a boolean that, if True,
represents the date/time without "-" or ":" delimiters. This is
most useful for filenames where ":" may cause problems.
custom_time_zone_info (default None) - a dictionary that enforces
a particular time zone. It looks like {"hours": _hours,
"minutes": _minutes, "string": _string} where _hours and _minutes
are the hours and minutes offset from UTC and _string is the string
to use as the time zone designator.
"""
date_time = datetime.utcfromtimestamp(unix_time)
return get_time_string(date_time,
display_sub_seconds=display_sub_seconds,
use_basic_format=use_basic_format,
override_use_utc=None,
date_time_is_local=False,
custom_time_zone_info=custom_time_zone_info)
def get_unix_time_from_time_string(datetime_string):
"""Convert a datetime string into a unix timestamp.
The datetime_string must match DATE_TIME_FORMAT_EXTENDED above,
which is the extended ISO 8601 year-month-dayThour:minute:second format,
plus a valid ISO 8601 time zone. For example, 2016-09-07T11:21:00+01:00,
2016-12-25T06:00:00Z, or 2016-12-25T06:00:00+13.
isodatetime is not used to do the whole parsing, partly for performance,
but mostly because the calendar may be in non-Gregorian mode.
"""
try:
date_time_utc = datetime.strptime(
datetime_string, DATE_TIME_FORMAT_EXTENDED + "Z")
except ValueError:
global PARSER
if PARSER is None:
from metomi.isodatetime.parsers import TimePointParser
PARSER = TimePointParser()
time_zone_info = PARSER.get_info(datetime_string)[1]
time_zone_hour = int(time_zone_info["time_zone_hour"])
time_zone_minute = int(time_zone_info.get("time_zone_minute", 0))
offset_seconds = 3600 * time_zone_hour + 60 * time_zone_minute
if "+" in datetime_string:
datetime_string = datetime_string.split("+")[0]
else:
datetime_string = datetime_string.rsplit("-", 1)[0]
date_time = datetime.strptime(
datetime_string, DATE_TIME_FORMAT_EXTENDED)
date_time_utc = date_time - timedelta(seconds=offset_seconds)
return timegm(date_time_utc.timetuple())
def get_seconds_as_interval_string(seconds):
"""Convert a number of seconds into an ISO 8601 duration string."""
from metomi.isodatetime.data import Duration
return str(Duration(seconds=seconds, standardize=True))
| gpl-3.0 | -7,570,600,737,711,770,000 | 40.573643 | 79 | 0.658214 | false |
openweave/happy | happy/HappyProcessStart.py | 1 | 15877 | #!/usr/bin/env python3
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyProcessStart class that stars process within virtual nodes.
#
# Process runs a command in a virtual node, which itself
# is a logical representation of a network namespace.
#
from __future__ import absolute_import
import os
import subprocess
import sys
import time
import psutil
import warnings
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.HappyNode import HappyNode
from happy.HappyProcess import HappyProcess
import happy.HappyProcessStop
options = {}
options["quiet"] = False
options["node_id"] = None
options["tag"] = None
options["command"] = None
options["strace"] = False
options["env"] = {}
options["sync_on_output"] = None
options["rootMode"] = False
def option():
return options.copy()
class HappyProcessStart(HappyNode, HappyProcess):
"""
Starts a happy process.
happy-process-start [-h --help] [-q --quiet] [-i --id <NODE_NAME>]
[-t --tag <DAEMON_NAME>] [-s --strace]
[-e --env <ENVIRONMENT>] <COMMAND>
-i --id Optional. Node on which to run the process. Find using
happy-node-list or happy-state.
-t --tag Required. Name of the process.
-s --strace Optional. Enable strace output for the process.
-e --env Optional. An environment variable to pass to the node
for use by the process.
<COMMAND> Required. The command to run as process <DAEMON_NAME>.
Example:
$ happy-process-start BorderRouter ContinuousPing ping 127.0.0.1
Starts a process within the BorderRouter node called ContinuousPing
that runs "ping 127.0.0.1" continuously.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
HappyNode.__init__(self)
HappyProcess.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tag = opts["tag"]
self.command = opts["command"]
self.strace = opts["strace"]
self.env = opts["env"]
self.sync_on_output = opts["sync_on_output"]
self.output_fileput_suffix = ".out"
self.strace_suffix = ".strace"
self.rootMode = opts["rootMode"]
def __stopProcess(self):
emsg = "Process %s stops itself." % (self.tag)
self.logger.debug("[%s] daemon [%s]: %s" % (self.node_id, self.tag, emsg))
options = happy.HappyProcessStop.option()
options["node_id"] = self.node_id
options["tag"] = self.tag
options["quiet"] = self.quiet
stopProcess = happy.HappyProcessStop.HappyProcessStop(options)
stopProcess.run()
self.readState()
def __pre_check(self):
# Check if the new process is given
if not self.tag:
emsg = "Missing name of the new process to start."
self.logger.error("[localhost] HappyProcessStart: %s" % (emsg))
self.exit()
# Check if the name of new process is not a duplicate (that it does not already exists).
if self.processExists(self.tag):
emsg = "virtual process %s already exist." % (self.tag)
self.logger.info("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
self.__stopProcess()
# Check if the process command is given
if not self.command:
emsg = "Missing process command."
self.logger.error("[localhost] HappyProcessStart: %s" % (emsg))
self.exit()
timeStamp = "%010.6f" % time.time()
pid = "%06d" % os.getpid()
emsg = "Tag: %s PID: %s timeStamp : %s" % (self.tag, pid, timeStamp)
self.logger.debug("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
self.output_file = self.process_log_prefix + pid + \
"_" + timeStamp + "_" + self.tag + self.output_fileput_suffix
self.strace_file = self.process_log_prefix + pid + \
"_" + timeStamp + "_" + self.tag + self.strace_suffix
def __poll_for_output(self):
poll_interval_sec = 0.01
max_poll_time_sec = 180
time_slept = 0
tail = open(self.output_file, "r")
self.logger.debug("[%s] HappyProcessStart: polling for output: %s" % (self.node_id, self.sync_on_output))
while (True):
line = tail.readline()
if not line:
time.sleep(poll_interval_sec)
time_slept += poll_interval_sec
poll_interval_sec *= 2
if (time_slept > max_poll_time_sec):
self.logger.debug("[%s] HappyProcessStart: can't find the output requested: %s" %
(self.node_id, self.sync_on_output))
raise RuntimeError("Can't find the output requested")
elif self.sync_on_output in line:
self.logger.debug("[%s] HappyProcessStart: found output: %s in %s secs" %
(self.node_id, self.sync_on_output, str(time_slept)))
break
else:
continue
tail.close()
return
def __start_daemon(self):
cmd = self.command
# We need to support 8 combinations:
# Who: user or root
# strace: yes or not
# env: yes or not
# Given this script called sayhello.sh:
# #!/bin/bash
# echo Hello ${USER}!
# echo You passed the following opts $1, $2, $3
# echo MYENVVAR is $MYENVVAR
# a successful run with an environment variable prints:
# Hello andreello!
# You passed the following opts a, b, c
# MYENVVAR is hello
# The goal is to use the simples command line possible; in particular, we don't
# want to call sudo unless strictly necessary (for performance reasons).
# Here is how the CLI looks like if you use "ip netns exec" directly:
# user without env:
# sudo ip netns exec happy000 sudo -u andreello ./sayhello.sh a b c
# user with env:
# sudo ip netns exec happy000 sudo -u andreello MYENVVAR=hello ./sayhello.sh a b c
# root without env:
# ip netns exec happy000 ./sayhello.sh a b c
# root with env
# ip netns exec happy000 bash -c 'MYENVVAR=hello ./sayhello.sh a b c'
# user with strace, without env
# sudo ip netns exec happy000 sudo -u andreello strace -tt -o strace.out ./sayhello.sh a b c
# user with strace, with env
# sudo ip netns exec happy000 sudo -u andreello strace -tt -o strace.out -E MYENVVAR=hello ./sayhello.sh a b c
# root with strace, without env
# ip netns exec happy000 strace -tt -o strace.out ./sayhello.sh a b c
# root with strace, with env
# ip netns exec happy000 strace -tt -o strace.out -E MYENVVAR=hello ./sayhello.sh a b c
# Highlights:
# - to pass environment variables, either 'strace -E' or 'bash -c'
# - but, 'bash -c' requires the command to be in one string, while 'strace -E' requires the opposite
# - the examples above show the argument to 'bash -c' in quotes, but they are not necessary when passing
# the list of strings to Popen()
# - also, the examples above show only one env var; if passing more than one to strace, they need to have
# a '-E' each
# In summary, it's easier to build the cmd as a full string, and then split it the right way depending
# on strace vs bash.
# Here are a few examples of how the string is split into a list:
#
# user without env:
# ./bin/happy-process-start.py -i node01 -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello', u'./sayhello.sh', u'a', u'b', u'c']
#
# user with env:
# ./bin/happy-process-start.py -i node01 -e "MYENVVAR=hello" -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello',
# u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
#
# root without env:
# sudo ./bin/happy-process-start.py -i node01 -t HELLO ./sayhello.sh a b c
# [u'ip', u'netns', u'exec', u'happy000', u'./sayhello.sh', u'a', u'b', u'c']
#
# user with env and strace:
# ./bin/happy-process-start.py -i node01 -e "MYENVVAR=hello" -s -t HELLO ./sayhello.sh a b c
# [u'sudo', u'ip', u'netns', u'exec', u'happy000', u'sudo', u'-u', u'andreello', u'strace', u'-tt', u'-o',
# u'/tmp/happy_..._HELLO.strace', u'-E', u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
#
# root with env:
# [u'ip', u'netns', u'exec', u'happy000', 'bash', '-c', u' MYENVVAR=hello ./sayhello.sh a b c']
#
# root with strace no env:
# sudo ./bin/happy-process-start.py -i node01 -s -t HELLO ./sayhello.sh a b c
#
# root with strace and env:
# [u'ip', u'netns', u'exec', u'happy000', u'strace', u'-tt', u'-o', u'/tmp/happy_..._HELLO.strace',
# u'-E', u'MYENVVAR=hello', u'./sayhello.sh', u'a', u'b', u'c']
need_internal_sudo = False
if os.getuid() != 0:
need_internal_sudo = True
if "sudo" in cmd.split():
# The command already has the inner sudo; typical case is that
# a normal user started Happy, and the script needs to run
# a command in a node as root. If sudo is for root, remove it.
# TODO: properly support "sudo -u" with strace
cmd = self.stripRunAsRoot(cmd)
need_internal_sudo = False
env_vars_list = []
cmd_list_prefix = []
need_bash = False
if "bash -c" in cmd:
tmp = cmd.split("bash -c")
need_bash = True
cmd_list_prefix = tmp[0].split()
cmd = tmp[1]
for key, value in self.env.items():
tmp = ""
try:
tmp = "" + key + "=" + value
env_vars_list.append(tmp)
except:
self.logger.error("Failed to serialize environment variable %s" % (key));
self.logger.debug("HappyProcessStart with env: > %s" % (env_vars_list))
if self.strace:
cmd_list_prefix = ["strace", "-tt", "-o", self.strace_file] + cmd_list_prefix
tmp = []
for i in env_vars_list:
tmp.append("-E")
tmp.append(i)
env_vars_list = tmp
elif need_internal_sudo:
pass
elif len(env_vars_list):
need_bash = True
if need_internal_sudo:
if self.rootMode:
tmp = self.getRunAsRootPrefixList()
else:
tmp = self.getRunAsUserPrefixList()
cmd_list_prefix = tmp + cmd_list_prefix
if self.node_id:
cmd_list_prefix = ["ip", "netns", "exec", self.uniquePrefix(self.node_id)] + cmd_list_prefix
cmd_list_prefix = self.getRunAsRootPrefixList() + cmd_list_prefix
try:
self.fout = open(self.output_file, "wb", 0)
except Exception:
emsg = "Failed to open file %s." % (self.output_file)
self.logger.error("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
self.exit()
self.logger.debug("HappyProcessStart: > %s" % (cmd))
popen = None
try:
cmd_list = []
if need_bash:
env_vars_list = []
for key, value in self.env.items():
tmp = ""
try:
tmp = "" + key + '="' + value.replace('\\','\\\\').replace('"','\\"') +'"'
env_vars_list.append(tmp)
except:
self.logger.error("Failed to serialize environment variable %s" % (key));
cmd = " ".join(env_vars_list) + ' ' + cmd
cmd_list = cmd_list_prefix + ["bash", "-c", cmd]
else:
cmd_list = cmd_list_prefix + env_vars_list + cmd.split()
self.logger.debug("[%s] HappyProcessStart: executing command list %s" % (self.node_id, cmd_list))
popen = subprocess.Popen(cmd_list, stdin=subprocess.PIPE, stdout=self.fout)
self.child_pid = popen.pid
emsg = "running daemon %s (PID %d)" % (self.tag, self.child_pid)
self.logger.debug("[%s] HappyProcessStart: %s" % (self.node_id, emsg))
# The following is guaranteed to fetch info about the right process (i.e. the PID has
# no chance of being reused) because even if the child process terminates right away, it'll stay
# around in <defunct> until the popen object has been destroyed or popen.poll() has
# been called.
p = psutil.Process(self.child_pid)
# At python.psutil 2.0.0, create_time changed from a data
# member to a member function. Try to access the modern member
# function first. If that throws, try the old data member.
try:
self.create_time = p.create_time()
except Exception:
self.create_time = p.create_time
emsg = "Create time: " + str(self.create_time)
self.logger.debug("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
if self.sync_on_output:
self.__poll_for_output()
except Exception as e:
if popen:
# We need to kill the process tree; if popen succeeded,
# we assume we were also able to get the create_time
self.TerminateProcessTree(popen.pid, self.create_time)
emsg = "Starting process with command %s FAILED with %s." % (cmd, str(e))
self.logger.error("[%s] HappyProcessStart: %s." % (self.node_id, emsg))
self.exit()
def __post_check(self):
pass
def __update_state(self):
emsg = "Update State with tag %s running command: %s" % \
(self.tag, self.command)
self.logger.debug("[%s] HappyProcessStart: %s ." % (self.node_id, emsg))
new_process = {}
new_process["pid"] = self.child_pid
new_process["out"] = self.output_file
new_process["strace"] = self.strace_file
new_process["command"] = self.command
new_process["create_time"] = self.create_time
self.setNodeProcess(new_process, self.tag, self.node_id)
self.writeState()
def run(self):
with self.getStateLockManager():
self.readState()
self.__pre_check()
self.__start_daemon()
self.__update_state()
self.__post_check()
return ReturnMsg(0)
| apache-2.0 | 3,041,653,095,024,345,000 | 38.009828 | 124 | 0.552749 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/tests/unittests/RenderTest.py | 1 | 9601 | # -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import test
from taskcoachlib import render
from taskcoachlib.i18n import _
from taskcoachlib.domain import date
class RenderDateTime(test.TestCase):
def assertRenderedDateTime(self, expectedDateTime, *dateTimeArgs):
renderedDateTime = render.dateTime(date.DateTime(*dateTimeArgs))
if expectedDateTime:
renderedParts = renderedDateTime.split(' ', 1)
if len(renderedParts) > 1:
renderedDate, renderedTime = renderedParts
expectedDate, expectedTime = expectedDateTime.split(' ', 1)
self.assertEqual(expectedTime, renderedTime)
else:
expectedDate, renderedDate = expectedDateTime, renderedDateTime
self.assertEqual(expectedDate, renderedDate)
else:
self.assertEqual(expectedDateTime, renderedDateTime)
@staticmethod
def expectedDateTime(*dateTimeArgs):
return render.dateTimeFunc(date.DateTime(*dateTimeArgs))
@staticmethod
def expectedDate(*dateTimeArgs):
return render.dateFunc(date.DateTime(*dateTimeArgs))
def testSomeRandomDateTime(self):
expectedDateTime = self.expectedDateTime(2010, 4, 5, 12, 54)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 12, 54, 42)
def testInfiniteDateTime(self):
self.assertRenderedDateTime('')
def testStartOfDay(self):
expectedDateTime = self.expectedDate(2010, 4, 5)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5)
def testEndOfDay(self):
expectedDateTime = self.expectedDate(2010, 4, 5)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 23, 59, 59)
def testEndOfDayWithoutSeconds(self):
expectedDateTime = self.expectedDate(2010, 4, 5)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 23, 59)
def testAlmostStartOfDay(self):
expectedDateTime = self.expectedDateTime(2010, 4, 5, 0, 1)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 0, 1, 0)
def testAlmostEndOfDay(self):
expectedDateTime = self.expectedDateTime(2010, 4, 5, 23, 58)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 23, 58, 59)
def testElevenOClock(self):
expectedDateTime = self.expectedDateTime(2010, 4, 5, 23, 0)
self.assertRenderedDateTime(expectedDateTime, 2010, 4, 5, 23, 0, 0)
def testDateBefore1900(self):
# Don't check for '1801' since the year may be formatted on only 2
# digits.
result = render.dateTime(date.DateTime(1801, 4, 5, 23, 0, 0))
self.failUnless('01' in result, result)
class RenderDate(test.TestCase):
def testRenderDateWithDateTime(self):
self.assertEqual(render.date(date.DateTime(2000, 1, 1)),
render.date(date.DateTime(2000, 1, 1, 10, 11, 12)))
class RenderTimeLeftTest(test.TestCase):
def testNoTimeLeftWhenActive(self):
timeLeft = date.TimeDelta()
self.assertEqual('0:00', render.timeLeft(timeLeft, False))
def testNoTimeLeftWhenCompleted(self):
self.assertEqual('', render.timeLeft(date.TimeDelta(), True))
def testNoTimeLeftWhenNoDueDate(self):
self.assertEqual('', render.timeLeft(date.TimeDelta.max, False))
def testInfiniteTimeLeftWhenCompleted(self):
self.assertEqual('', render.timeLeft(date.TimeDelta.max, True))
def testOneDayLeftWhenActive(self):
timeLeft = date.TimeDelta(days=1)
self.assertEqual('1 day, 0:00', render.timeLeft(timeLeft, False))
def testOneDayLeftWhenCompleted(self):
timeLeft = date.TimeDelta(days=1)
self.assertEqual('', render.timeLeft(timeLeft, True))
def testTwoDaysLeftWhenActive(self):
timeLeft = date.TimeDelta(days=2)
self.assertEqual('2 days, 0:00', render.timeLeft(timeLeft, False))
def testTwoDaysLeftWhenCompleted(self):
timeLeft = date.TimeDelta(days=2)
self.assertEqual('', render.timeLeft(timeLeft, True))
def testOneDayLateWhenActive(self):
timeLeft = date.TimeDelta(days=-1)
self.assertEqual('-1 day, 0:00', render.timeLeft(timeLeft, False))
def testOneDayLateWhenCompleted(self):
timeLeft = date.TimeDelta(days=-1)
self.assertEqual('', render.timeLeft(timeLeft, True))
def testOneHourLateWhenActive(self):
timeLeft = -date.ONE_HOUR
self.assertEqual('-1:00', render.timeLeft(timeLeft, False))
def testOneDayHourWhenCompleted(self):
timeLeft = -date.ONE_HOUR
self.assertEqual('', render.timeLeft(timeLeft, True))
class RenderTimeSpentTest(test.TestCase):
def testZeroTime(self):
self.assertEqual('', render.timeSpent(date.TimeDelta()))
def testOneSecond(self):
self.assertEqual('0:00:01', render.timeSpent(date.ONE_SECOND))
def testTenHours(self):
self.assertEqual('10:00:00',
render.timeSpent(date.TimeDelta(hours=10)))
def testNegativeHours(self):
self.assertEqual('-1:00:00',
render.timeSpent(date.TimeDelta(hours=-1)))
def testNegativeSeconds(self):
self.assertEqual('-0:00:01',
render.timeSpent(date.TimeDelta(seconds=-1)))
class RenderWeekNumberTest(test.TestCase):
def testWeek1(self):
self.assertEqual('2005-1',
render.weekNumber(date.DateTime(2005, 1, 3)))
def testWeek53(self):
self.assertEqual('2004-53',
render.weekNumber(date.DateTime(2004, 12, 31)))
class RenderRecurrenceTest(test.TestCase):
def testNoRecurrence(self):
self.assertEqual('', render.recurrence(date.Recurrence()))
def testDailyRecurrence(self):
self.assertEqual(_('Daily'),
render.recurrence(date.Recurrence('daily')))
def testWeeklyRecurrence(self):
self.assertEqual(_('Weekly'),
render.recurrence(date.Recurrence('weekly')))
def testMonthlyRecurrence(self):
self.assertEqual(_('Monthly'),
render.recurrence(date.Recurrence('monthly')))
def testYearlyRecurrence(self):
self.assertEqual(_('Yearly'),
render.recurrence(date.Recurrence('yearly')))
def testEveryOtherDay(self):
self.assertEqual(_('Every other day'),
render.recurrence(date.Recurrence('daily', amount=2)))
def testEveryOtherWeek(self):
self.assertEqual(_('Every other week'),
render.recurrence(date.Recurrence('weekly', amount=2)))
def testEveryOtherMonth(self):
self.assertEqual(_('Every other month'),
render.recurrence(date.Recurrence('monthly', amount=2)))
def testEveryOtherYear(self):
self.assertEqual(_('Every other year'),
render.recurrence(date.Recurrence('yearly', amount=2)))
def testThreeDaily(self):
self.assertEqual('Every 3 days',
render.recurrence(date.Recurrence('daily', amount=3)))
def testThreeWeekly(self):
self.assertEqual('Every 3 weeks',
render.recurrence(date.Recurrence('weekly', amount=3)))
def testThreeMonthly(self):
self.assertEqual('Every 3 months',
render.recurrence(date.Recurrence('monthly', 3)))
def testThreeYearly(self):
self.assertEqual('Every 3 years',
render.recurrence(date.Recurrence('yearly', 3)))
class RenderException(test.TestCase):
def testRenderException(self):
instance = Exception()
self.assertEqual(unicode(instance),
render.exception(Exception, instance))
def testRenderUnicodeDecodeError(self):
try:
'abc'.encode('utf-16').decode('utf-8')
except UnicodeDecodeError, instance:
self.assertEqual(unicode(instance),
render.exception(UnicodeDecodeError, instance))
def testExceptionThatCannotBePrinted(self):
"""win32all exceptions may contain localized error
messages. But Exception.__str__ does not handle non-ASCII
characters in the args instance variable; calling
unicode(instance) is just like calling str(instance) and
raises an UnicodeEncodeError."""
e = Exception(u'é')
try:
render.exception(Exception, e)
except UnicodeEncodeError: # pragma: no cover
self.fail()
| gpl-3.0 | 5,923,536,410,789,911,000 | 37.709677 | 81 | 0.636042 | false |
recombinators/worker | models.py | 1 | 9335 | import os
import transaction
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, UnicodeText, Boolean, DateTime
from datetime import datetime
import requests
mailgun_key = os.environ['MAILGUN_KEY']
mailgun_url = os.environ['MAILGUN_URL']
DBSession = scoped_session(
sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
engine = create_engine(os.environ.get('DATABASE_URL'))
DBSession.configure(bind=engine)
Base.metadata.bind = engine
class WorkerLog(Base):
"""Model for the worker log."""
__tablename__ = 'worker_log'
id = Column(Integer, primary_key=True)
instanceid = Column(UnicodeText)
date_time = Column(DateTime)
statement = Column(UnicodeText)
value = Column(UnicodeText)
activitytype = Column(UnicodeText)
@classmethod
def log_entry(cls, instanceid, statement, value, activity_type):
current_time = datetime.utcnow()
entry = WorkerLog(instanceid=instanceid,
date_time=current_time,
statement=statement,
value=value,
activitytype=activity_type)
DBSession.add(entry)
transaction.commit()
class RenderCache_Model(Base):
"""
Model for the already rendered files.
"""
__tablename__ = 'render_cache'
id = Column(Integer, primary_key=True)
jobid = Column(Integer)
entityid = Column(UnicodeText)
band1 = Column(Integer)
band2 = Column(Integer)
band3 = Column(Integer)
previewurl = Column(UnicodeText)
renderurl = Column(UnicodeText)
rendercount = Column(Integer, default=0)
currentlyrend = Column(Boolean)
@classmethod
def add(cls, jobid, currentlyrend):
"""
Method adds entry into db given jobid and optional url.
"""
jobQuery = DBSession.query(UserJob_Model).get(jobid)
job = RenderCache_Model(entityid=jobQuery.entityid,
jobid=jobid,
band1=jobQuery.band1,
band2=jobQuery.band2,
band3=jobQuery.band3,
currentlyrend=currentlyrend)
DBSession.add(job)
transaction.commit()
@classmethod
def update(cls, jobid, currentlyrend, renderurl):
"""
Method updates entry into db given jobid and optional url.
"""
try:
DBSession.query(cls).filter(cls.jobid == jobid).update({
"currentlyrend": currentlyrend, "renderurl": renderurl})
transaction.commit()
except:
print 'Could not update database.'
@classmethod
def update_p_url(cls, scene, band1, band2, band3, previewurl):
"""
Method updates entry into db with preview url.
"""
# Convert parameters into correct type
band1, band2, band3 = int(band1), int(band2), int(band3)
previewurl = u'{}'.format(previewurl)
try:
entry = DBSession.query(cls).filter(cls.entityid == scene,
cls.band1 == band1,
cls.band2 == band2,
cls.band3 == band3).first()
# update entry if already exists,
# if there is no existing entry, add it.
if entry:
entry.update({"previewurl": previewurl})
transaction.commit()
else:
new = RenderCache_Model(entityid=scene,
band1=band1,
band2=band2,
band3=band3,
previewurl=previewurl
)
DBSession.add(new)
transaction.commit()
except:
print 'Could not add the preview URL to the database.'
class UserJob_Model(Base):
"""
Model for the user job queue. Possible job statuses:
status_key = {
0: "In queue",
1: "Downloading",
2: "Processing",
3: "Compressing",
4: "Uploading to server",
5: "Done",
10: "Failed"}
"""
__tablename__ = 'user_job'
jobid = Column(Integer, primary_key=True)
entityid = Column(UnicodeText)
userip = Column(UnicodeText)
email = Column(UnicodeText)
band1 = Column(Integer)
band2 = Column(Integer)
band3 = Column(Integer)
jobstatus = Column(Integer, nullable=False)
starttime = Column(DateTime, nullable=False)
lastmodified = Column(DateTime, nullable=False)
status1time = Column(DateTime)
status2time = Column(DateTime)
status3time = Column(DateTime)
status4time = Column(DateTime)
status5time = Column(DateTime)
status10time = Column(DateTime)
rendertype = Column(UnicodeText)
workerinstanceid = Column(UnicodeText)
@classmethod
def new_job(cls,
entityid=entityid,
band1=4,
band2=3,
band3=2,
jobstatus=0,
starttime=datetime.utcnow(),
rendertype=None
):
"""
Create a new job in the database.
"""
try:
session = DBSession
current_time = datetime.utcnow()
job = UserJob_Model(entityid=entityid,
band1=band1,
band2=band2,
band3=band3,
jobstatus=0,
starttime=current_time,
lastmodified=current_time,
rendertype=rendertype
)
session.add(job)
session.flush()
session.refresh(job)
pk = job.jobid
transaction.commit()
# could do this or a subtransacation, ie open a transaction at the
# beginning of this method.
transaction.begin()
except:
return None
try:
RenderCache_Model.add(pk, True)
except:
print 'Could not add job to rendered db'
return pk
@classmethod
def set_job_status(cls, jobid, status, url=None):
"""
Set jobstatus for jobid passed in.
"""
table_key = {1: "status1time",
2: "status2time",
3: "status3time",
4: "status4time",
5: "status5time",
10: "status10time"}
try:
current_time = datetime.utcnow()
DBSession.query(cls).filter(cls.jobid == int(jobid)).update(
{"jobstatus": status,
table_key[int(status)]: current_time,
"lastmodified": current_time
})
transaction.commit()
except:
print 'Database write failed.'
# Tell render_cache db we have this image now
if int(status) == 5:
try:
RenderCache_Model.update(jobid, False, url)
except:
print 'Could not update Rendered db'
try:
cls.email_user(jobid)
except:
print 'Email failed'
@classmethod
def email_user(cls, jobid):
"""
If request contains email_address, send email to user with a link to
the full render zip file.
"""
job = DBSession.query(cls).filter(cls.jobid == int(jobid)).first()
email_address = job.email
if email_address:
bands = str(job.band1) + str(job.band2) + str(job.band3)
scene = job.entityid
full_render = ("http://snapsatcompositesjoel.s3.amazonaws.com/{}_bands"
"_{}.zip").format(scene, bands)
scene_url = 'http://snapsat.org/scene/{}#{}'.format(scene, bands)
request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(
mailgun_url)
requests.post(request_url, auth=('api', mailgun_key),
data={
'from': '[email protected]',
'to': email_address,
'subject': 'Snapsat has rendered your request',
'text': ("Thank you for using Snapsat.\nYour full composite is"
" available here:\n{}\nScene data can be found here:"
"\n{}\n\n-Snapsat.org").format(full_render, scene_url)
})
@classmethod
def set_worker_instance_id(cls, jobid, worker_instance_id):
"""
Set worker instance id for requested job to track which worker is doing
the job.
"""
try:
DBSession.query(cls).filter(cls.jobid == int(jobid)).update(
{"workerinstanceid": worker_instance_id})
transaction.commit()
except:
print 'database write failed'
| mit | 3,469,872,206,858,366,500 | 34.359848 | 83 | 0.532191 | false |
belokop-an/agenda-tools | code/MaKaC/webinterface/rh/errors.py | 1 | 2685 | import smtplib
import MaKaC.webinterface.pages.errors as errors
from MaKaC.webinterface.rh.base import RH
from MaKaC.common import Config
class RHErrorReporting(RH):
"""Handles the reporting of errors to the Agenda support.
This handler is quite special as it has to handle the reporting of
generic errors to the support of the application; any error can happen
which means that even the DB could not be avilable so it has to use
the minimal system resources possible in order to always be able to
report errors.
"""
def _checkParams( self, params ):
self._sendIt = params.has_key( "confirm" )
self._comments = ""
if self._sendIt:
self._comments = params.get("comments", "")
self._userMail = params.get("userEmail", "")
self._msg = params.get("reportMsg", "")
def _sendReport( self ):
#cannot use the standar indico mailer as it has to use minimal features
# and not use the DB connection
cfg = Config.getInstance()
fromAddr = self._userMail
toAddr = cfg.getSupportEmail()
subject = "[agenda@%s] Error report"%cfg.getBaseURL()
body = ["-"*20, "User Comments\n", "%s\n\n"%self._comments, "-"*20, \
"Error details\n", self._msg, "-"*20 ]
msg = "Content-Type: text/plain; charset=\"utf-8\"\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n%s"%( fromAddr, toAddr,\
subject, "\n".join( body ) )
s = smtplib.SMTP( cfg.getSmtpServer() )
if Config.getInstance().getSmtpUseTLS():
s.ehlo()
(code, errormsg) = s.starttls()
if code != 220:
from MaKaC.errors import MaKaCError
raise MaKaCError("Can't start secure connection to SMTP server: %d, %s"%(code, errormsg))
if Config.getInstance().getSmtpLogin():
login = Config.getInstance().getSmtpLogin()
password = Config.getInstance().getSmtpPassword()
(code, errormsg) = s.login(login, password)
if code != 235:
from MaKaC.errors import MaKaCError
raise MaKaCError("Can't login on SMTP server: %d, %s"%(code, errormsg))
s.sendmail( fromAddr, toAddr, msg)
def process( self, params ):
self._checkParams( params )
if self._sendIt:
self._sendReport()
p = errors.WPReportErrorSummary( self )
return p.display()
else:
p = errors.WPReportError( self )
return p.display( userEmail = self._userMail, msg = self._msg )
| gpl-2.0 | -3,915,548,017,531,616,000 | 40.953125 | 127 | 0.580261 | false |
bnrubin/userv | tests/test_encyclopedia.py | 1 | 2665 | from flask import request, url_for
from userv.encyclopedia.models import Fact
from pprint import pprint
from datetime import datetime, timezone
import arrow
import json
def test_factoids_all_one(session, db,app, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
#session.commit()
print(client.get('/api/v1/factoids/all').get_data())
response = client.get(url_for('encyclopedia.factoidsall')).json
expected = [
{'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
}
]
assert response == expected
def test_factoids_all_many(session, db,app, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
f = Fact(id=2, name='bar', author='Alexander Hamilton', popularity=-1,
value='I am not giving away my shot', added=now)
session.add(f)
print(client.get('/api/v1/factoids/all').get_data())
response = client.get(url_for('encyclopedia.factoidsall')).json
expected = [
{'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
},
{'id': 2,
'name': 'bar',
'author': 'Alexander Hamilton',
'popularity': -1,
'value': 'I am not giving away my shot',
'added': str(anow)
}
]
assert response == expected
def test_factoid_one(session, client):
now = datetime.now(timezone.utc)
anow = arrow.get(now)
f = Fact(id=1, name='foo', author='Ben Franklin', popularity=42,
value="don't panic", added=now)
session.add(f)
#session.commit()
print(client.get('/api/v1/factoids/fact/foo').get_data())
response = client.get(url_for('encyclopedia.factoidbyname', name='foo')).json
expected = {'id': 1,
'name': 'foo',
'author': 'Ben Franklin',
'popularity': 42,
'value': "don't panic",
'added': str(anow)
}
assert response == expected
| mit | 1,062,741,826,242,034,800 | 27.351064 | 81 | 0.512195 | false |
MKLab-ITI/category-based-classification | OobFusion_2D.py | 1 | 19633 | '''
OobFusion_2D.py
This code uses two different types of feature vectors (word2vec and n-grams) on the training set to train two random forests.
Probabilities of these two models are then fused by weighted averaging to calculate the final predictions.
'''
import nltk, re, os, pickle, time
import numpy as np
from gensim.models import Word2Vec
from nltk.corpus import stopwords
from nltk import PorterStemmer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import StratifiedShuffleSplit
from scipy.sparse import hstack
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review)
#
# 2. Convert words to lower case and split them
words = review_text.lower().split()
#
# 3. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 4. Return a list of words
return(words)
# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( review_to_wordlist( raw_sentence, \
remove_stopwords ))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
def trainWord2Vec(dimensionality, context_window, corpus):
# train word2vec model
#
allSentences= pickle.load( open( corpus, "rb" ) )
# calculate and return model
model = Word2Vec(allSentences, size=dimensionality, window=context_window, min_count=1, workers=4)
return model
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec,model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec,nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
#
# Loop through the reviews
for review in reviews:
# print(review[0])
#
# Print a status message every 1000th review
if counter%1000. == 0.:
print ("Document %d of %d" % (counter, len(reviews)))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[counter] = makeFeatureVec(review, model, \
num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
def create_docs_and_labels_variables(datasetDir = "SIMMO/"):
# GET DOCUMENTS AND CONVERT THEM TO W2V FORMAT
allDocuments_w2v=[]
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
print("Loading sentence words for word2vec...")
#list category folders of simmo dataset
categories = os.listdir(datasetDir)
for category in categories:
# list files for each category
files = os.listdir(datasetDir+category)
for file in files:
f = open(datasetDir+category+"/"+file, encoding='utf8')
text=''
aggregated = list()
for line in f:
text += line
sentences = review_to_sentences(text,tokenizer,True)
# convert list of sentences (where each sentence is a list of words) to an unique list of document words
for sentence in sentences:
aggregated.extend(sentence)
# add document as a whole to the documents' list, we use append to add the whole list (document) and not only the values (words)
allDocuments_w2v.append(aggregated)
# GET DOCUMENTS AND CONVERT THEM TO NGRAM FORMAT
allDocuments_ngrams=[]
print("Loading sentence words for ngrams...")
for category in categories:
# list files for each category
files = os.listdir(datasetDir+category)
for file in files:
f = open(datasetDir+category+"/"+file, encoding='utf8')
text=''
aggregated = list()
for line in f:
text += line
text = re.sub("[^a-zA-Z]"," ", text)
allDocuments_ngrams.append(text)
# count documents for each category
categoryCount=dict()
for category in categories:
files = os.listdir(datasetDir+category)
categoryCount[category]=len(files)
#create labels Vector
print("Creating labels...")
filesum = categoryCount['Economy']
economy = filesum
filesum += categoryCount['Health']
health = filesum
filesum += categoryCount['Lifestyle']
lifestyle = filesum
filesum += categoryCount['Nature']
nature = filesum
filesum += categoryCount['Politics']
politics = filesum
filesum += categoryCount['Science']
science = filesum
labels=[0 for x in range(science)]
for count in range(science):
if count<economy:
labels[count] = "Economy, Business & Finance"
elif count<health:
labels[count] = "Health"
elif count<lifestyle:
labels[count] = "Lifestyle & Leisure"
elif count<nature:
labels[count] = "Nature & Environment"
elif count<politics:
labels[count] = "Politics"
elif count<science:
labels[count] = "Science & Technology"
return allDocuments_w2v, allDocuments_ngrams, labels
def mytokenizer(x):
# Tokenize sentence and return stemmed words
#
stemmed_list = list()
for y in x.split():
y_s = PorterStemmer().stem_word(y)
if len(y_s) > 2:
stemmed_list.append(y_s)
return stemmed_list
def readstopwords(file):
# Read list of stopwords from file (one stopword per line)
#
stopwords = list()
fin = open(file,"r")
for line in fin:
stopwords.append(PorterStemmer().stem_word(line.strip()))
return stopwords
def calculate_metrics(conf_matrix):
# calculate precision, recall and f-score per class
#
length = len(conf_matrix)
sum_rows = list()
sum_cols = list()
precisions = list()
recalls = list()
f_scores = list()
# calculate row sums
for row in conf_matrix:
sum_r = sum(row)
sum_rows.append(sum_r)
# calculate column sums, i = column, j = row
for i in range(length):
sum_c=0
for j in range(length):
sum_c+= conf_matrix[j][i]
sum_cols.append(sum_c)
# calculate precisions, recalls and f_scores
for i in range(length):
correct = conf_matrix[i][i]
precision = correct / sum_cols[i]
precisions.append(precision)
recall = correct / sum_rows[i]
recalls.append(recall)
f_score = 2 * precision * recall / (precision + recall)
f_scores.append(f_score)
macro_precision = np.mean(precisions)
macro_recall = np.mean(recalls)
macro_f_score = np.mean(f_scores)
print("Precisions:", precisions, "Marco average:", macro_precision)
print("Recalls:", recalls, "Marco average:", macro_recall)
print("F_scores:", f_scores, "Marco average:", macro_f_score)
def calculate_weights(conf_w2v,conf_ngrams,freqs):
# find late fusion model weights by out-of-the-bag accuracy weighting
#
length = len(conf_w2v)
weights_w2v = list()
for i in range(length):
accuracy_w2v = conf_w2v[i][i] / freqs[i]
accuracy_ngrams = conf_ngrams[i][i] / freqs[i]
weight = accuracy_w2v / (accuracy_w2v + accuracy_ngrams)
weights_w2v.append(weight)
return weights_w2v
# start timer
program_start = time.time()
if(len(sys.argv) == 3):
# Pickle file where a list of sentences variable is stored to train the word2vec model. Each sentence is a list of words, so a list of lists must be provided.
corpusFile = sys.argv[1]
# Directory of dataset (relative path example: "SIMMO/")
datasetDirectory = sys.argv[2]
else:
print("You must provide exactly two arguments: First is the corpus pickle file and second is the dataset directory.")
print("Exiting...")
exit()
# parse documents into two formats: 1) word2vec format, each document is a list of sentences 2) n-gram format, full text per document with numbers removed
print("Loading documents and labels...")
allDocuments_w2v, allDocuments_ngrams, labels = create_docs_and_labels_variables(datasetDirectory)
# balanced random split to dataset and labels
print("Generating stratified random split..")
sss = StratifiedShuffleSplit(labels, 1, test_size=0.3)
for train_index, test_index in sss:
print("TRAIN indices:", train_index, "TEST indices:", test_index)
X_train_w2v, X_test_w2v = allDocuments_w2v[train_index], allDocuments_w2v[test_index]
X_train_ngrams, X_test_ngrams = allDocuments_ngrams[train_index], allDocuments_ngrams[test_index]
y_train, y_test = labels[train_index], labels[test_index]
print(len(X_train_w2v),len(X_test_w2v),len(X_train_ngrams),len(X_test_ngrams),len(y_train),len(y_test))
######################### WORD2VEC #########################
print()
print("WORD2VEC")
# extract w2v probabilities on training and test set
word2vecModel = trainWord2Vec(200, 12, corpusFile)
print("\nCalculating training set w2v vectors...")
w2v_train = getAvgFeatureVecs(X_train_w2v, word2vecModel, 200)
print("Calculating test set w2v vectors...")
w2v_test = getAvgFeatureVecs(X_test_w2v, word2vecModel, 200)
# Fit a random forest to the training data, using 1000 trees
forest_w2v = RandomForestClassifier( n_estimators = 1000 , oob_score=True , random_state=1)
print ("Fitting a random forest to labeled training data...")
forest_w2v = forest_w2v.fit( w2v_train, y_train )
print ("Oob Score:" + str(forest_w2v.oob_score_) )
# get out-of-the-bag predictions
oob_probabilities_w2v = forest_w2v.oob_decision_function_
# variable to access index of classes
classes_w2v = forest_w2v.classes_
oob_predictions_w2v = list()
#for each document
for item in oob_probabilities_w2v:
# get index of max probability
max_value = max(item)
index = np.where(item==max_value)[0][0]
# get predicted label
predicted = classes_w2v[index]
oob_predictions_w2v.append(predicted)
# extract confusion matrix
confusion_matrix_oob_w2v = confusion_matrix(y_train, oob_predictions_w2v, labels=["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"])
print("\nOut-of-the-bag confusion matrix:")
print(confusion_matrix_oob_w2v)
#get class frequencies in training set
class_freqs = list()
print("\nClass frequencies on training set")
for cl in classes_w2v:
print(cl,":",list(y_train).count(cl))
class_freqs.append(list(y_train).count(cl))
# get model probabilities, predictions and confusion matrix on test_set
probabilities_w2v = forest_w2v.predict_proba(w2v_test)
predictions_w2v = forest_w2v.predict(w2v_test)
confusion_matrix_w2v_test = confusion_matrix(y_test, predictions_w2v, labels=["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"])
print("\nTest set confusion matrix:")
print(confusion_matrix_w2v_test)
######################### N-GRAMS #########################
print()
print("N-GRAMS")
print()
# read stopwords
stopwords = readstopwords("files/stopwords.txt")
# get N-gram counts (unigrams, bigrams, trigrams and four-grams)
#unigrams
#train
vectorizer1 = CountVectorizer(tokenizer=mytokenizer, stop_words=stopwords , min_df=0.05)
unigrams = vectorizer1.fit_transform(X_train_ngrams)
tf_unigrams = TfidfTransformer(norm='l1', use_idf=False, smooth_idf = False).fit_transform(unigrams)
tf_total = tf_unigrams
print("Unigrams shape:", np.shape(tf_unigrams))
#test
unigrams_test = vectorizer1.transform(X_test_ngrams)
tf_unigrams_test = TfidfTransformer(norm='l1', use_idf=False, smooth_idf = False).fit_transform(unigrams_test)
tf_total_test = tf_unigrams_test
print("Unigrams shape (test set):", np.shape(tf_unigrams_test))
#bigrams
try:
#train
vectorizer2 = CountVectorizer(tokenizer=mytokenizer, stop_words=stopwords , ngram_range=(2,2) , min_df=0.02)
bigrams = vectorizer2.fit_transform(X_train_ngrams)
tf_bigrams = TfidfTransformer(norm='l1',use_idf=False, smooth_idf = False).fit_transform(bigrams)
tf_total = hstack([tf_total,tf_bigrams]).toarray()
print("Bigrams shape:", np.shape(tf_bigrams))
#test
bigrams_test = vectorizer2.transform(X_test_ngrams)
tf_bigrams_test = TfidfTransformer(norm='l1',use_idf=False, smooth_idf = False).fit_transform(bigrams_test)
tf_total_test = hstack([tf_total_test,tf_bigrams_test]).toarray()
print("Bigrams shape (test set):", np.shape(tf_bigrams_test))
except ValueError:
print("No bigrams are extracted")
#trigrams
try:
#train
vectorizer3 = CountVectorizer(tokenizer=mytokenizer, stop_words=stopwords , ngram_range=(3,3) , min_df=0.02)
trigrams = vectorizer3.fit_transform(X_train_ngrams)
tf_trigrams = TfidfTransformer(norm='l1', use_idf=False, smooth_idf = False).fit_transform(trigrams)
tf_total = hstack([tf_total,tf_trigrams]).toarray()
print("Trigrams shape:", np.shape(tf_trigrams))
#test
trigrams_test = vectorizer3.transform(X_test_ngrams)
tf_trigrams_test = TfidfTransformer(norm='l1', use_idf=False, smooth_idf = False).fit_transform(trigrams_test)
tf_total_test = hstack([tf_total_test,tf_trigrams_test]).toarray()
print("Trigrams shape (test set):", np.shape(tf_trigrams_test))
except ValueError as v:
print("No trigrams are extracted")
#four-grams
try:
#train
vectorizer4 = CountVectorizer(tokenizer=mytokenizer, stop_words=stopwords , ngram_range=(4,4) , min_df=0.01)
fourgrams = vectorizer4.fit_transform(X_train_ngrams)
tf_fourgrams = TfidfTransformer(norm='l1',use_idf=False, smooth_idf = False).fit_transform(fourgrams)
tf_total = hstack([tf_total,tf_fourgrams]).toarray()
print("Fourgrams shape:", np.shape(tf_fourgrams))
#test
fourgrams_test = vectorizer4.transform(X_test_ngrams)
tf_fourgrams_test = TfidfTransformer(norm='l1',use_idf=False, smooth_idf = False).fit_transform(fourgrams_test)
tf_total_test = hstack([tf_total_test,tf_fourgrams_test]).toarray()
print("Fourgrams shape (test set):", np.shape(tf_fourgrams_test))
except ValueError:
print("No fourgrams are extracted")
print("Total vectors shape:",np.shape(tf_total))
print("Total vectors shape (test set):",np.shape(tf_total_test))
print()
# Fit a random forest to the training data, using 1000 trees
forest_ngrams = RandomForestClassifier( n_estimators = 1000 ,oob_score=True, random_state=1)
print ("Fitting a random forest to labeled training data...")
forest_ngrams = forest_ngrams.fit( tf_total, y_train )
print("Out-of-the-bag score:",forest_ngrams.oob_score_)
# get out-of-the-bag predictions
oob_probabilities_ngrams = forest_ngrams.oob_decision_function_
classes_ngrams = forest_ngrams.classes_
oob_predictions_ngrams = list()
for item in oob_probabilities_ngrams:
# get index of max probability
max_value = max(item)
index = np.where(item==max_value)[0][0]
# get predicted label
predicted = classes_ngrams[index]
oob_predictions_ngrams.append(predicted)
# extract confusion matrix
confusion_matrix_oob_ngrams = confusion_matrix(y_train, oob_predictions_ngrams, labels=["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"])
print("\nOut-of-the-bag confusion matrix:")
print(confusion_matrix_oob_ngrams)
# calculate test set predictions and confusion matrix
probabilities_ngrams = forest_ngrams.predict_proba(tf_total_test)
predictions_ngrams = forest_ngrams.predict(tf_total_test)
confusion_matrix_ngrams_test = confusion_matrix(y_test, predictions_ngrams, labels=["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"])
print("\nTest set confusion matrix:")
print(confusion_matrix_ngrams_test)
# AGGREGATE PROBABILITIES OF W2V AND N-GRAM MODELS TO GET FINAL PREDICTIONS
print()
print("Fusing probabilities...")
print()
# set averaging weights
w2v_weights = calculate_weights(confusion_matrix_oob_w2v, confusion_matrix_oob_ngrams , class_freqs)
ngram_weights = [1-w for w in w2v_weights]
# get weighted average probabilities
probabilities_final = list()
for idx, w2v_doc in enumerate(probabilities_w2v):
ngram_doc = probabilities_ngrams[idx]
final_doc = list()
for idx2, w2v_val in enumerate(w2v_doc):
ngram_val = ngram_doc[idx2]
final_val = w2v_val * w2v_weights[idx2] + ngram_val * ngram_weights[idx2]
final_doc.append(final_val)
probabilities_final.append(final_doc)
probabilities_final = np.array(probabilities_final)
print("\nFused test set probabilities matrix:")
print(probabilities_final)
# get final fused predictions
classes = ["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"]
final_predictions = list()
for item in probabilities_final:
# get index of max probability
max_value = max(item)
index = np.where(item==max_value)[0][0]
# get predicted label
predicted = classes[index]
final_predictions.append(predicted)
# output final fused confusion matrix
confusion_matrix_final = confusion_matrix(y_test, final_predictions, labels=["Economy, Business & Finance", "Health", "Lifestyle & Leisure", "Nature & Environment", "Politics", "Science & Technology"])
print("\nFused test set confusion matrix:")
print(confusion_matrix_final)
# print elapsed time
program_elapsed = time.time() - program_start
print()
print("Elapsed time (seconds):", program_elapsed)
| apache-2.0 | -1,476,432,783,024,438,000 | 37.877228 | 212 | 0.680538 | false |
RihardsT/forgettables | Languages_Programming/Python/python.py | 1 | 8335 | Simple Server
python3 -m http.server # 8000 # --bind 127.0.0.1
python2 -m SimpleHTTPServer # 8000
Python'ā atstares ir svarīgas.'
#Single line coment
""" Multiline comment.
Apostrofi apzīmē string, tāpat kā pēdiņas. \ zīme ļauj to labot.
There\'s a snake. Ir Python'am saprotami.
"""
Operators: = - * / ** % // #// floor divide
Comparators > < >= <= == !=
Assignment operators: += -= *= /=
Bool operators not and or #evaluated in this order. # 2<3 and 5<6 => True
Bitwise Operators: >> /Right shift << /Left shift & /Bitwise AND
| /Bitwise OR ^ /Bitwise XOR ~ /Bitwise NOT # & | return int, convert to binary with bin()
To write number in binary start with 0b #0b10 = 2, 0b11 = 3
#Python 2.* style
print "Life"+"of"+"Brian"+str(2) # +
name = "Name"
print "Hello %s" %(name) # %s and %(var) formatting operator.
print "String", var + 1 #var=0, prints String 1
print char, # , nozīmē, ka izprintēs bez \n
print a, b
#Python 3.* style
print('{0} and {1}'.format('var1', 'var2'))
# Python 3.6 String interpolation
f'can_put_text_here {variable}'
f'variable in brackets {{ {variable} }}'
# print array contents without brackets
print(*array, sep=', ')
Variables:
name = value #value var būt jebkas. int, float, bool, array, string, obj
String: a= "string"[0] #Piekļuve ar index.
String methods: len(variable) string.lower() .upper() str(var_to_string)
.isalpha() #Pārbauda, vai string satur tikai burtus
.split() #returns list with words
" ".join(list)
name = raw_input("Question") #Input konsolē.
name = input('Enter something')
list = [var1, var2] # Array
list[0] = changeVal
list[1:9:2] #list slicing [start:stop:step] [3:] [:2] [::2] / [::-1] #reverse #string slice, split
list.append(var) .insert(1,var) #.insert(position, var)
.sort() .index(var) #animals.index("bat") => returns index of bat
.pop(index) #Izņem no list un atgriež vērtību
.remove(item) #Izņem elementu, ja to atrod.
del(list[index]) #kā .pop, bet neatgriež vērtību
evens_to_50 = [i for i in range(51) if i % 2 == 0] #generate list
dictionary = {'key':value, 'key2':value} # Hash in ruby
dictionary[key] = newValue
del dictionary[key]
# dictionary.remove(key) # Python 2 ?
.items() #returns key/value pairs, not ordered
.keys()
.values()
.clear()
.replace() # replace char in string
if/elif/else
if True:
#Do code
pass #does nothing
elif True:
#Else if code
else:
#Code
if var not in list:
#var pievienot list'am, ja tas jau nav tajā iekšā.
if True: #code
'Yes' if fruit == 'Apple' else 'No' #value_when_true if condition else value_when_false
####for, for/else // while, while/else
for var in list_name:
#code #Šādi ejot cauri list nevar mainīt vērtības
else:
#else izpildas tikai tad, ja for izpildas normāli, ja nav break
for key in dictionary:
print dictionary[key]
for i in range(0, 5): #for: from to. Skaita i. Tipisks for cikls
n[i] = n[i] * 2
#Šādi iterē ar indexiem un var mainīt list vērtības.
for index, item in enumerate(choices): #enumerate dod indexu
print index+1, item
while True: #var izmantot break, praktiski radot do while loop
#code
if True:
break
while True:
#code
else:
#Else condition
Built in functions:
range(stop) // range(start, stop) // range(start, stop, step)
max(1,2,3) min()
abs(-3) #absolūtā pozīcija no 0. Proti -3 => 3
sum()
type(var) #atgriež var tipu: int, float, str
len(var)
str(var_to_string)
float(var_to_float) # int to float
int(to_int)
zip(list_1, list_2) #zip sapāro divu vai vairāk listu elementus
filter(function_what_to_filter, object_to_filter) #See lambda
bin(1) #returns binary representation of int #or vice versa ?
oct()
hex()
int("number_in_string", base_of_that_number) #returns value of that in base 10
set(list_in_here) # Returns unique elements
map()
Functions:
def function_name(params):
#code
function_name(params) #Call function
Anonymous function
lambda x: x % 3 == 0
#same as:
def by_three(x):
return x % 3 == 0
languages = ["HTML", "JavaScript", "Python", "Ruby"]
print filter(lambda x: x == "Python" ,languages)
Classes:
class ClassName(object):
member_variable = True #Pieejami jebkuram šīs klases objektam
#Interesanti, ka pēc objekta definēšanas default vērtību var nomainīt.
def __init__(self, name):
self.name = name #Instance variables. Katram objektam pieejami tikai savas vērtības
def method_name(self): #self norāda, ka metode pieejama tikai atsevišķam objektam.
pass
def __repr__(self):
return "(%d, %d, %d)" %(self.x, self.y, self.z)
#__repr__() nosaka to, kādā veidā objekts tiks attēlots. print my_object
class_object = ClassName("Name") #Objekta izveidošana
print class_object.name #Var piekļūt objekta mainīgajiem ar punktu
class_object.member_variable = False #nomaina default vērtību.
#Šis neizmaina pārējo objektu member_variable vērtību. Tiem tā joprojām ir default.
Inheritance:
class ChildClass(ParentClass):
#pieejamas ParentClass funkcijas u.t.t
def method_name(self): #Override. Pārraksta ar to pašu nosaukumu, kā ParentClass metodei.
return super(Derived, self).method_name() #ar super var piekļūt ParentClass ...
#CodeAcademy/Python/Introduction to Classes/14
class Employee(object):
def __init__(self, employee_name):
self.employee_name = employee_name
def calculate_wage(self, hours):
self.hours = hours
return hours * 20.00
class PartTimeEmployee(Employee):
def calculate_wage(self, hours):
self.hours = hours
return hours*12.00
def full_time_wage(self, hours):
return super(PartTimeEmployee, self).calculate_wage(hours)
milton = PartTimeEmployee("Milton")
print milton.full_time_wage(10)
############## FileInput/Output
### with is the prefered way how to deal with files. This takes care of open/close
# read line by line
with open("output.txt", "r") as f:
contents = f.read()
for line in f:
pass
### Open multiple files
with open('file1', 'w') as file1, open('file2', 'w') as file2:
pass
f = open("output.txt", "w")
#modes: "w" write only, "r" read only, "r+" read and write, "a" append
f.write("Data to be written")
print(f.read()) #Izvada visu
print(f.readline()) #Pirmoreiz pirmā rinda
print(f.readline()) #Otru - otrā rinda
f.close() #Must close the file.
f.closed #returns True False. Atgriež vai fails ir atvērts vai aizvērts.
#You always need to close your files after you're done writing to them.
#During the I/O process, data is buffered: it is held in a temp before being written to the file.
#Python doesn't flush the buffer, write data to the file—until it's sure you're done writing.
#If you write to a file without closing, the data won't make it to the target file.
with open("file", "mode") as variable:
# Read or write to the file
with open("text.txt", "w") as textfile:
textfile.write("Success!")
import math #generic import. Jāraksta math pirms katras tās funkcijas. math.sqrt(9)
from module import function #function import
from module import * #universal imports. Nav jāraksta math. pirms katras funkcijas
#Universāli importi var radīt problēmas, ja pats uzraksta funkciju ar tādu pašu nosaukumu.
#Piem. Sava funkc sqrt radītu problēmas, ja izmantotu from math import sqrt.
#Ja izmanto import math, tad sqrt izsauktu savējo, bet math.sqrt izsauktu no math.
import math # Imports the math module
everything = dir(math) # Sets everything to a list of things from math
print everything # Prints 'em all!
from datetime import datetime
print(datetime.now())
now = datetime.now()
print('{0}-{1}-{2}'.format(now.year, now.month, now.day))
from random import randint #Random int
import random
random.random() # float in range [0.0 1.0]
### string to date
import datetime
datetime.datetime.strptime(date_string, format)
# Format reference: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
# Get date month ago
time_now = datetime.datetime.utcnow()
time_30_days_ago = time_now - datetime.timedelta(days=30)
### compare dates. Replace tzinfo with None, if getting error:
# TypeError: can't compare offset-naive and offset-aware datetimes
some_date.replace(tzinfo=None) < time_30_days_ago
######
### Run system command
# https://docs.python.org/3/library/subprocess.html
import subprocess
subprocess.run(["COMMAND", "ARGUMENT"])
| unlicense | -7,686,488,076,607,635,000 | 33.894068 | 99 | 0.708682 | false |
ninegrid/dotfiles-vim | bundle/vim-orgmode/ftplugin/orgmode/plugins/EditCheckbox.py | 1 | 7316 | # -*- coding: utf-8 -*-
import vim
from orgmode._vim import echo, echom, echoe, ORGMODE, apply_count, repeat, insert_at_cursor, indent_orgmode
from orgmode.menu import Submenu, Separator, ActionEntry, add_cmd_mapping_menu
from orgmode.keybinding import Keybinding, Plug, Command
from orgmode.liborgmode.checkboxes import Checkbox
from orgmode.liborgmode.dom_obj import OrderListType
class EditCheckbox(object):
u"""
Checkbox plugin.
"""
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'Edit Checkbox')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
@classmethod
def new_checkbox(cls, below=None):
d = ORGMODE.get_document()
h = d.current_heading()
if h is None:
return
# init checkboxes for current heading
h.init_checkboxes()
c = h.current_checkbox()
nc = Checkbox()
nc._heading = h
# default checkbox level
level = h.level
start = vim.current.window.cursor[0] - 1
# if no checkbox is found, insert at current line with indent level=1
if c is None:
if h.checkboxes:
level = h.first_checkbox.level
h.checkboxes.append(nc)
else:
l = c.get_parent_list()
idx = c.get_index_in_parent_list()
if l is not None and idx is not None:
l.insert(idx + (1 if below else 0), nc)
# workaround for broken associations, Issue #165
nc._parent = c.parent
if below:
if c.next_sibling:
c.next_sibling._previous_sibling = nc
nc._next_sibling = c.next_sibling
c._next_sibling = nc
nc._previous_sibling = c
else:
if c.previous_sibling:
c.previous_sibling._next_sibling = nc
nc._next_sibling = c
nc._previous_sibling = c.previous_sibling
c._previous_sibling = nc
t = c.type
# increase key for ordered lists
if t[-1] in OrderListType:
try:
num = int(t[:-1]) + (1 if below else -1)
t = '%d%s' % (num, t[-1])
except ValueError:
try:
char = ord(t[:-1]) + (1 if below else -1)
t = '%s%s' % (chr(char), t[-1])
except ValueError:
pass
nc.type = t
if not c.status:
nc.status = None
level = c.level
if below:
start = c.end_of_last_child
else:
start = c.start
nc.level = level
vim.current.window.cursor = (start + 1, 0)
if below:
vim.command("normal o")
else:
vim.command("normal O")
insert_at_cursor(str(nc))
vim.command("call feedkeys('a')")
@classmethod
def toggle(cls, checkbox=None):
u"""
Toggle the checkbox given in the parameter.
If the checkbox is not given, it will toggle the current checkbox.
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
# init checkboxes for current heading
if current_heading is None:
return
current_heading = current_heading.init_checkboxes()
if checkbox is None:
# get current_checkbox
c = current_heading.current_checkbox()
# no checkbox found
if c is None:
cls.update_checkboxes_status()
return
else:
c = checkbox
if c.status == Checkbox.STATUS_OFF:
# set checkbox status on if all children are on
if not c.children or c.are_children_all(Checkbox.STATUS_ON):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_ON:
if not c.children or c.is_child_one(Checkbox.STATUS_OFF):
c.toggle()
d.write_checkbox(c)
elif c.status == Checkbox.STATUS_INT:
# can't toggle intermediate state directly according to emacs orgmode
pass
# update checkboxes status
cls.update_checkboxes_status()
@classmethod
def _update_subtasks(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
# update heading subtask info
c = h.first_checkbox
if c is None:
return
total, on = c.all_siblings_status()
h.update_subtasks(total, on)
# update all checkboxes under current heading
cls._update_checkboxes_subtasks(c)
@classmethod
def _update_checkboxes_subtasks(cls, checkbox):
# update checkboxes
for c in checkbox.all_siblings():
if c.children:
total, on = c.first_child.all_siblings_status()
c.update_subtasks(total, on)
cls._update_checkboxes_subtasks(c.first_child)
@classmethod
def update_checkboxes_status(cls):
d = ORGMODE.get_document()
h = d.current_heading()
# init checkboxes for current heading
h.init_checkboxes()
cls._update_checkboxes_status(h.first_checkbox)
cls._update_subtasks()
@classmethod
def _update_checkboxes_status(cls, checkbox=None):
u""" helper function for update checkboxes status
:checkbox: The first checkbox of this indent level
:return: The status of the parent checkbox
"""
if checkbox is None:
return
status_off, status_on, status_int, total = 0, 0, 0, 0
# update all top level checkboxes' status
for c in checkbox.all_siblings():
current_status = c.status
# if this checkbox is not leaf, its status should determine by all its children
if c.children:
current_status = cls._update_checkboxes_status(c.first_child)
# don't update status if the checkbox has no status
if c.status is None:
current_status = None
# the checkbox needs to have status
else:
total += 1
# count number of status in this checkbox level
if current_status == Checkbox.STATUS_OFF:
status_off += 1
elif current_status == Checkbox.STATUS_ON:
status_on += 1
elif current_status == Checkbox.STATUS_INT:
status_int += 1
# write status if any update
if current_status is not None and c.status != current_status:
c.status = current_status
d = ORGMODE.get_document()
d.write_checkbox(c)
parent_status = Checkbox.STATUS_INT
# all silbing checkboxes are off status
if status_off == total:
parent_status = Checkbox.STATUS_OFF
# all silbing checkboxes are on status
elif status_on == total:
parent_status = Checkbox.STATUS_ON
# one silbing checkbox is on or int status
elif status_on != 0 or status_int != 0:
parent_status = Checkbox.STATUS_INT
# other cases
else:
parent_status = None
return parent_status
def register(self):
u"""
Registration of the plugin.
Key bindings and other initialization should be done here.
"""
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewAbove',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox()<CR>',
key_mapping=u'<localleader>cN',
menu_desrc=u'New CheckBox Above'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxNewBelow',
function=u':py ORGMODE.plugins[u"EditCheckbox"].new_checkbox(below=True)<CR>',
key_mapping=u'<localleader>cn',
menu_desrc=u'New CheckBox Below'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxToggle',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].toggle()<CR>',
key_mapping=u'<localleader>cc',
menu_desrc=u'Toggle Checkbox'
)
add_cmd_mapping_menu(
self,
name=u'OrgCheckBoxUpdate',
function=u':silent! py ORGMODE.plugins[u"EditCheckbox"].update_checkboxes_status()<CR>',
key_mapping=u'<localleader>c#',
menu_desrc=u'Update Subtasks'
)
# vim: set noexpandtab:
| unlicense | -3,455,821,195,089,854,000 | 26.400749 | 107 | 0.6807 | false |
juergspaak/EF-at-invariant-richness | plot_figS5.py | 1 | 1315 | """
@author: J.W. Spaak
This programm plots Fig. S4
"""
from plot_functions import bars
import community_construction_repl as repl
#compute DeltaEF/EF for the different communities and different cases
EF_data = {key: repl.delta_EF_lin(*repl.para[key]) for key in repl.para.keys()}
# species have different f
EF_data_dif = {key: repl.delta_EF_lin(*repl.para[key], sim_f = False)
for key in repl.para.keys()}
# plot results
keys = ['0.95', '0.75', '0.50','0.25', '0.05']
cols = ['#006400','#CF0000', '#90FB90', '#800000']
# plot the delta EF communities with same f
fig, ax, ind = bars(EF_data, keys, col = cols[:2])
# add the deltaEF of the different f communities
fig, ax, ind = bars(EF_data_dif, keys, fig, ax, col = cols[2:])
# adjust axis
ax.set_xlim([-0.2,ind[-1]+0.7])
ax.set_xticks(ind+0.15)
ax.set_xticklabels(keys)
ax.set_xlabel("Proportion p of species present at both sites", fontsize = 16)
ax.set_ylim([-80,140]) # add enough space for the legend
# add legend
legend = {col: ax.bar(0,0,width = 0 ,color = 'white'
,edgecolor=col,linewidth=1.5) for col in cols}
lab = ['e<0, same f', 'e>0, same f', 'e<0, diff f', 'e>0, diff f']
ax.legend([legend[col] for col in cols],lab, loc = 'upper left')
# save figure
fig.savefig("Figure S5, diff f.pdf")
| mit | -6,361,027,837,631,320,000 | 32.717949 | 79 | 0.644867 | false |
autosportlabs/RaceCapture_App | autosportlabs/racecapture/views/dashboard/widgets/digitalgauge.py | 1 | 2538 | #
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.uix.boxlayout import BoxLayout
from kivy.app import Builder
from kivy.metrics import dp
from utils import kvFind, kvquery
from fieldlabel import AutoShrinkFieldLabel
from kivy.properties import NumericProperty, ObjectProperty
from autosportlabs.racecapture.views.dashboard.widgets.gauge import CustomizableGauge
DEFAULT_BACKGROUND_COLOR = [0, 0, 0, 0]
class DigitalGauge(CustomizableGauge):
Builder.load_string("""
<DigitalGauge>:
anchor_x: 'center'
anchor_y: 'center'
title_size: self.height * 0.5
value_size: self.height * 0.7
BoxLayout:
orientation: 'horizontal'
spacing: self.height * 0.1
AutoShrinkFieldLabel:
id: title
text: 'channel'
font_size: root.title_size
halign: 'right'
AutoShrinkFieldLabel:
canvas.before:
Color:
rgba: root.alert_background_color
Rectangle:
pos: self.pos
size: self.size
id: value
text: '---'
font_size: root.value_size
halign: 'center'
""")
alert_background_color = ObjectProperty(DEFAULT_BACKGROUND_COLOR)
def __init__(self, **kwargs):
super(DigitalGauge, self).__init__(**kwargs)
self.normal_color = DEFAULT_BACKGROUND_COLOR
def update_title(self, channel, channel_meta):
try:
self.title = channel if channel else ''
except Exception as e:
print('Failed to update digital gauge title ' + str(e))
def update_colors(self):
alert_color = self.select_alert_color()
self.alert_background_color = DEFAULT_BACKGROUND_COLOR if alert_color is None else alert_color
| gpl-3.0 | 1,654,545,606,196,182,300 | 32.394737 | 102 | 0.657998 | false |
jehine-MSFT/azure-storage-python | azure/storage/sharedaccesssignature.py | 1 | 35228 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from datetime import date
from ._common_conversion import (
_sign_string,
_to_str,
)
from ._serialization import (
url_quote,
_to_utc_datetime,
)
from ._constants import X_MS_VERSION
class SharedAccessSignature(object):
'''
Provides a factory for creating blob, queue, table, and file shares access
signature tokens with a common account name and account key. Users can either
use the factory or can construct the appropriate service and use the
generate_*_shared_access_signature method directly.
'''
def __init__(self, account_name, account_key):
'''
:param str account_name:
The storage account name used to generate the shared access signatures.
:param str account_key:
The access key to genenerate the shares access signatures.
'''
self.account_name = account_name
self.account_key = account_key
def generate_table(self, table_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None,
start_pk=None, start_rk=None,
end_pk=None, end_rk=None):
'''
Generates a shared access signature for the table.
Use the returned signature with the sas_token parameter of TableService.
:param str table_name:
Name of table.
:param TablePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str start_pk:
The minimum partition key accessible with this shared access
signature. startpk must accompany startrk. Key values are inclusive.
If omitted, there is no lower bound on the table entities that can
be accessed.
:param str start_rk:
The minimum row key accessible with this shared access signature.
startpk must accompany startrk. Key values are inclusive. If
omitted, there is no lower bound on the table entities that can be
accessed.
:param str end_pk:
The maximum partition key accessible with this shared access
signature. endpk must accompany endrk. Key values are inclusive. If
omitted, there is no upper bound on the table entities that can be
accessed.
:param str end_rk:
The maximum row key accessible with this shared access signature.
endpk must accompany endrk. Key values are inclusive. If omitted,
there is no upper bound on the table entities that can be accessed.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk)
sas.add_resource_signature(self.account_name, self.account_key, 'table', table_name)
return sas.get_token()
def generate_queue(self, queue_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
:param str queue_name:
Name of queue.
:param QueuePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, add, update, process.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name)
return sas.get_token()
def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token()
def generate_container(self, container_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the container.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param ContainerPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('c')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name)
return sas.get_token()
def generate_file(self, share_name, directory_name=None, file_name=None,
permission=None, expiry=None, start=None, id=None,
ip=None, protocol=None, cache_control=None,
content_disposition=None, content_encoding=None,
content_language=None, content_type=None):
'''
Generates a shared access signature for the file.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param str directory_name:
Name of directory. SAS tokens cannot be created for directories, so
this parameter should only be present if file_name is provided.
:param str file_name:
Name of file.
:param FilePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = share_name
if directory_name is not None:
resource_path += '/' + _to_str(directory_name)
resource_path += '/' + _to_str(file_name)
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('f')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path)
return sas.get_token()
def generate_share(self, share_name, permission=None, expiry=None,
start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the share.
Use the returned signature with the sas_token parameter of FileService.
:param str share_name:
Name of share.
:param SharePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, create, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_file_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('s')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name)
return sas.get_token()
def generate_account(self, services, resource_types, permission, expiry, start=None,
ip=None, protocol=None):
'''
Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
'''
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_account(services, resource_types)
sas.add_account_signature(self.account_name, self.account_key)
return sas.get_token()
class _QueryStringConstants(object):
SIGNED_SIGNATURE = 'sig'
SIGNED_PERMISSION = 'sp'
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_IDENTIFIER = 'si'
SIGNED_IP = 'sip'
SIGNED_PROTOCOL = 'spr'
SIGNED_VERSION = 'sv'
SIGNED_CACHE_CONTROL = 'rscc'
SIGNED_CONTENT_DISPOSITION = 'rscd'
SIGNED_CONTENT_ENCODING = 'rsce'
SIGNED_CONTENT_LANGUAGE = 'rscl'
SIGNED_CONTENT_TYPE = 'rsct'
TABLE_NAME = 'tn'
START_PK = 'spk'
START_RK = 'srk'
END_PK = 'epk'
END_RK = 'erk'
SIGNED_RESOURCE_TYPES = 'srt'
SIGNED_SERVICES = 'ss'
class _SharedAccessHelper():
def __init__(self):
self.query_dict = {}
def _add_query(self, name, val):
if val:
self.query_dict[name] = _to_str(val)
def add_base(self, permission, expiry, start, ip, protocol):
if isinstance(start, date):
start = _to_utc_datetime(start)
if isinstance(expiry, date):
expiry = _to_utc_datetime(expiry)
self._add_query(_QueryStringConstants.SIGNED_START, start)
self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry)
self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission)
self._add_query(_QueryStringConstants.SIGNED_IP, ip)
self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol)
self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION)
def add_resource(self, resource):
self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource)
def add_id(self, id):
self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id)
def add_account(self, services, resource_types):
self._add_query(_QueryStringConstants.SIGNED_SERVICES, services)
self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
def add_table_access_ranges(self, table_name, start_pk, start_rk,
end_pk, end_rk):
self._add_query(_QueryStringConstants.TABLE_NAME, table_name)
self._add_query(_QueryStringConstants.START_PK, start_pk)
self._add_query(_QueryStringConstants.START_RK, start_rk)
self._add_query(_QueryStringConstants.END_PK, end_pk)
self._add_query(_QueryStringConstants.END_RK, end_rk)
def add_override_response_headers(self, cache_control,
content_disposition,
content_encoding,
content_language,
content_type):
self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
def add_resource_signature(self, account_name, account_key, service, path):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + service + '/' + account_name + path + '\n'
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
canonicalized_resource +
get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
if service == 'blob' or service == 'file':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE))
if service == 'table':
string_to_sign += \
(get_value_to_append(_QueryStringConstants.START_PK) +
get_value_to_append(_QueryStringConstants.START_RK) +
get_value_to_append(_QueryStringConstants.END_PK) +
get_value_to_append(_QueryStringConstants.END_RK))
# remove the trailing newline
if string_to_sign[-1] == '\n':
string_to_sign = string_to_sign[:-1]
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def add_account_signature(self, account_name, account_key):
def get_value_to_append(query):
return_value = self.query_dict.get(query) or ''
return return_value + '\n'
string_to_sign = \
(account_name + '\n' +
get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) +
get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) +
get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) +
get_value_to_append(_QueryStringConstants.SIGNED_START) +
get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) +
get_value_to_append(_QueryStringConstants.SIGNED_IP) +
get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) +
get_value_to_append(_QueryStringConstants.SIGNED_VERSION))
self._add_query(_QueryStringConstants.SIGNED_SIGNATURE,
_sign_string(account_key, string_to_sign))
def get_token(self):
return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) | apache-2.0 | -6,093,724,331,806,510,000 | 51.73503 | 113 | 0.634105 | false |
bswartz/manila | manila/api/v2/share_networks.py | 1 | 14622 | # Copyright 2014 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The shares api."""
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_utils import timeutils
import six
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import api_version_request as api_version
from manila.api.openstack import wsgi
from manila.api.views import share_networks as share_networks_views
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
from manila import policy
from manila import quota
from manila.share import rpcapi as share_rpcapi
RESOURCE_NAME = 'share_network'
RESOURCES_NAME = 'share_networks'
LOG = log.getLogger(__name__)
QUOTAS = quota.QUOTAS
class ShareNetworkController(wsgi.Controller):
"""The Share Network API controller for the OpenStack API."""
_view_builder_class = share_networks_views.ViewBuilder
def __init__(self):
super(ShareNetworkController, self).__init__()
self.share_rpcapi = share_rpcapi.ShareAPI()
def show(self, req, id):
"""Return data about the requested network info."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'show')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def delete(self, req, id):
"""Delete specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'delete')
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
share_instances = (
db_api.share_instances_get_all_by_share_network(context, id)
)
if share_instances:
msg = _("Can not delete share network %(id)s, it has "
"%(len)s share(s).") % {'id': id,
'len': len(share_instances)}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
# NOTE(ameade): Do not allow deletion of share network used by share
# group
sg_count = db_api.count_share_groups_in_share_network(context, id)
if sg_count:
msg = _("Can not delete share network %(id)s, it has %(len)s "
"share group(s).") % {'id': id, 'len': sg_count}
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
for share_server in share_network['share_servers']:
self.share_rpcapi.delete_share_server(context, share_server)
db_api.share_network_delete(context, id)
try:
reservations = QUOTAS.reserve(
context, project_id=share_network['project_id'],
share_networks=-1, user_id=share_network['user_id'])
except Exception:
LOG.exception("Failed to update usages deleting "
"share-network.")
else:
QUOTAS.commit(context, reservations,
project_id=share_network['project_id'],
user_id=share_network['user_id'])
return webob.Response(status_int=202)
def _get_share_networks(self, req, is_detail=True):
"""Returns a list of share networks."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
if 'security_service_id' in search_opts:
networks = db_api.share_network_get_all_by_security_service(
context, search_opts['security_service_id'])
elif context.is_admin and 'project_id' in search_opts:
networks = db_api.share_network_get_all_by_project(
context, search_opts['project_id'])
elif context.is_admin and 'all_tenants' in search_opts:
networks = db_api.share_network_get_all(context)
else:
networks = db_api.share_network_get_all_by_project(
context,
context.project_id)
date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.'''
if 'created_since' in search_opts:
try:
created_since = timeutils.parse_strtime(
search_opts['created_since'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_since']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] >= created_since]
if 'created_before' in search_opts:
try:
created_before = timeutils.parse_strtime(
search_opts['created_before'],
fmt="%Y-%m-%d")
except ValueError:
msg = date_parsing_error_msg % search_opts['created_before']
raise exc.HTTPBadRequest(explanation=msg)
networks = [network for network in networks
if network['created_at'] <= created_before]
opts_to_remove = [
'all_tenants',
'created_since',
'created_before',
'limit',
'offset',
'security_service_id',
'project_id'
]
for opt in opts_to_remove:
search_opts.pop(opt, None)
if search_opts:
for key, value in search_opts.items():
if key in ['ip_version', 'segmentation_id']:
value = int(value)
if (req.api_version_request >=
api_version.APIVersionRequest("2.36")):
networks = [network for network in networks
if network.get(key) == value or
(value in network.get(key.rstrip('~'))
if key.endswith('~') and
network.get(key.rstrip('~')) else ())]
else:
networks = [network for network in networks
if network.get(key) == value]
limited_list = common.limited(networks, req)
return self._view_builder.build_share_networks(
req, limited_list, is_detail)
def index(self, req):
"""Returns a summary list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'index')
return self._get_share_networks(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of share networks."""
policy.check_policy(req.environ['manila.context'], RESOURCE_NAME,
'detail')
return self._get_share_networks(req)
def update(self, req, id, body):
"""Update specified share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'update')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
try:
share_network = db_api.share_network_get(context, id)
except exception.ShareNetworkNotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
update_values = body[RESOURCE_NAME]
if 'nova_net_id' in update_values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
if share_network['share_servers']:
for value in update_values:
if value not in ['name', 'description']:
msg = (_("Cannot update share network %s. It is used by "
"share servers. Only 'name' and 'description' "
"fields are available for update") %
share_network['id'])
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_update(context,
id,
update_values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
return self._view_builder.build_share_network(req, share_network)
def create(self, req, body):
"""Creates a new share network."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'create')
if not body or RESOURCE_NAME not in body:
raise exc.HTTPUnprocessableEntity()
values = body[RESOURCE_NAME]
values['project_id'] = context.project_id
values['user_id'] = context.user_id
if 'nova_net_id' in values:
msg = _("nova networking is not supported starting in Ocata.")
raise exc.HTTPBadRequest(explanation=msg)
try:
reservations = QUOTAS.reserve(context, share_networks=1)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
if 'share_networks' in overs:
LOG.warning("Quota exceeded for %(s_pid)s, "
"tried to create "
"share-network (%(d_consumed)d of %(d_quota)d "
"already consumed).", {
's_pid': context.project_id,
'd_consumed': _consumed('share_networks'),
'd_quota': quotas['share_networks']})
raise exception.ShareNetworksLimitExceeded(
allowed=quotas['share_networks'])
else:
try:
share_network = db_api.share_network_create(context, values)
except db_exception.DBError:
msg = "Could not save supplied data due to database error"
raise exc.HTTPBadRequest(explanation=msg)
QUOTAS.commit(context, reservations)
return self._view_builder.build_share_network(req, share_network)
def action(self, req, id, body):
_actions = {
'add_security_service': self._add_security_service,
'remove_security_service': self._remove_security_service
}
for action, data in body.items():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Share networks does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
def _add_security_service(self, req, id, data):
"""Associate share network with a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'add_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot add security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
security_service = db_api.security_service_get(
context, data['security_service_id'])
for attached_service in share_network['security_services']:
if attached_service['type'] == security_service['type']:
msg = _("Cannot add security service to share network. "
"Security service with '%(ss_type)s' type already "
"added to '%(sn_id)s' share network") % {
'ss_type': security_service['type'],
'sn_id': share_network['id']}
raise exc.HTTPConflict(explanation=msg)
try:
share_network = db_api.share_network_add_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceAssociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def _remove_security_service(self, req, id, data):
"""Dissociate share network from a given security service."""
context = req.environ['manila.context']
policy.check_policy(context, RESOURCE_NAME, 'remove_security_service')
share_network = db_api.share_network_get(context, id)
if share_network['share_servers']:
msg = _("Cannot remove security services. Share network is used.")
raise exc.HTTPForbidden(explanation=msg)
try:
share_network = db_api.share_network_remove_security_service(
context,
id,
data['security_service_id'])
except KeyError:
msg = "Malformed request body"
raise exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except exception.ShareNetworkSecurityServiceDissociationError as e:
raise exc.HTTPBadRequest(explanation=six.text_type(e))
return self._view_builder.build_share_network(req, share_network)
def create_resource():
return wsgi.Resource(ShareNetworkController())
| apache-2.0 | -7,893,771,970,282,250,000 | 41.382609 | 78 | 0.576323 | false |
tsvstar/vk_downloader | pytube/api.py | 1 | 16387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from models import Video
from utils import safe_filename
from utils import MultipleObjectsReturned, YouTubeError, CipherError
try:
from urllib2 import urlopen
from urlparse import urlparse, parse_qs, unquote
except ImportError:
from urllib.parse import urlparse, parse_qs, unquote
from urllib.request import urlopen
import re
import json
YT_BASE_URL = 'http://www.youtube.com/get_video_info'
# YouTube quality and codecs id map.
# source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs
YT_ENCODING = {
# Flash Video
5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"],
6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"],
34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"],
35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"],
# 3GP
36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"],
13: ["3gp", "NA", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"],
17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"],
# MPEG-4
18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"],
22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"],
37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"],
38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"],
82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"],
83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"],
84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"],
85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"],
# WebM
43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"],
44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"],
45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"],
46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"],
100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"],
101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"],
102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"]
}
# The keys corresponding to the quality/codec map above.
YT_ENCODING_KEYS = (
'extension',
'resolution',
'video_codec',
'profile',
'video_bitrate',
'audio_codec',
'audio_bitrate'
)
class BaseProvider(object):
# TODO: just cause you CAN do this, doesn't mean you should. `hasattr` is
# much cleaner.
_filename = None
_fmt_values = []
_video_url = None
_js_code = False
_precompiled = False
title = None
videos = []
# fmt was an undocumented URL parameter that allowed selecting
# YouTube quality mode without using player user interface.
@property
def url(self):
return self._video_url
@url.setter
def url(self, url):
self._video_url = url
self._filename = None
self._get_video_info()
@property
def filename(self):
if not self._filename:
self._filename = safe_filename(self.title)
return self._filename
@filename.setter
def filename(self, filename):
self._filename = filename
if self.videos:
for video in self.videos:
video.filename = filename
def get(self, extension=None, resolution=None, profile="High"):
"""Return a single video given an extention and resolution.
:params extention: The desired file extention (e.g.: mp4).
:params resolution: The desired video broadcasting standard.
:params profile: The desired quality profile.
"""
result = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
elif profile and v.profile != profile:
continue
else:
result.append(v)
if not len(result):
return
elif len(result) is 1:
return result[0]
else:
raise MultipleObjectsReturned(
"get() returned more than one object")
def filter(self, extension=None, resolution=None):
"""Return a filtered list of videos given an extention and resolution
criteria.
:params extention: The desired file extention (e.g.: mp4).
:params resolution: The desired video broadcasting standard.
"""
results = []
for v in self.videos:
if extension and v.extension != extension:
continue
elif resolution and v.resolution != resolution:
continue
else:
results.append(v)
return results
"""
================================================
YOUTUBE SERVICE
================================================
"""
class YouTube(BaseProvider):
@property
def video_id(self):
"""Gets the video ID extracted from the URL.
"""
parts = urlparse(self._video_url)
qs = getattr(parts, 'query', None)
if qs:
video_id = parse_qs(qs).get('v', None)
if video_id:
return video_id.pop()
def _fetch(self, path, data):
"""Given a path, traverse the response for the desired data. (A
modified ver. of my dictionary traverse method:
https://gist.github.com/2009119)
:params path: A tuple representing a path to a node within a tree.
:params data: The data containing the tree.
"""
elem = path[0]
# Get first element in tuple, and check if it contains a list.
if type(data) is list:
# Pop it, and let's continue..
return self._fetch(path, data.pop())
# Parse the url encoded data
data = parse_qs(data)
# Get the element in our path
data = data.get(elem, None)
# Offset the tuple by 1.
path = path[1::1]
# Check if the path has reached the end OR the element return
# nothing.
if len(path) is 0 or data is None:
if type(data) is list and len(data) is 1:
data = data.pop()
return data
else:
# Nope, let's keep diggin'
return self._fetch(path, data)
def _parse_stream_map(self, text):
"""Python's `parse_qs` can't properly decode the stream map
containing video data so we use this instead.
"""
videoinfo = {
"itag": [],
"url": [],
"quality": [],
"fallback_host": [],
"s": [],
"type": []
}
# Split individual videos
videos = text.split(",")
# Unquote the characters and split to parameters
videos = [video.split("&") for video in videos]
for video in videos:
for kv in video:
key, value = kv.split("=")
value = value.encode('ascii') #@tsv hack - unquote fail to parse unicode
videoinfo.get(key, []).append(unquote(value))
return videoinfo
def _get_video_info(self):
"""This is responsable for executing the request, extracting the
necessary details, and populating the different video resolutions and
formats into a list.
"""
# TODO: split up into smaller functions. Cyclomatic complexity => 15
self.title = None
self.videos = []
url = self.url.replace('feature=player_embedded&','')
response = urlopen(url)
if response:
content = response.read().decode("utf-8")
try:
player_conf = content[18 + content.find("ytplayer.config = "):]
bracket_count = 0
for i, char in enumerate(player_conf):
if char == "{":
bracket_count += 1
elif char == "}":
bracket_count -= 1
if bracket_count == 0:
break
else:
raise YouTubeError("Cannot get JSON from HTML")
index = i + 1
data = json.loads(player_conf[:index])
except Exception as e:
raise YouTubeError("Cannot decode JSON: {0}".format(e))
is_vevo = False
if data['args'].get('ptk', '') in ['vevo', 'dashmpd']:
# Vevo videos with encrypted signatures
is_vevo = True
stream_map = self._parse_stream_map(
data["args"]["url_encoded_fmt_stream_map"])
self.title = data["args"]["title"]
js_url = "http:" + data["assets"]["js"]
video_urls = stream_map["url"]
for i, url in enumerate(video_urls):
try:
fmt, fmt_data = self._extract_fmt(url)
##print fmt_data
except (TypeError, KeyError):
continue
# If the signature must be ciphered...
###print "ptk=%s"%data['args'].get('ptk',''), is_vevo, (url.split('signature=')+[''])[1].split('&')[0]
if "signature=" not in url:
if is_vevo:
has_decrypted_signature = False
try:
signature = self._decrypt_signature(
stream_map['s'][0])
url += '&signature=' + signature
has_decrypted_signature = True
except TypeError:
pass
if not has_decrypted_signature:
raise CipherError(
"Couldn't cipher the vevo signature. "
"Maybe YouTube has changed the cipher "
"algorithm.")
else:
signature = self._cipher2(stream_map["s"][i], js_url)
url = "%s&signature=%s" % (url, signature)
self.videos.append(Video(url, self.filename, **fmt_data))
self._fmt_values.append(fmt)
self.videos.sort()
@staticmethod
def _decrypt_signature(s):
"""Comment me :)
"""
def tu(a, b):
c = a[0]
a[0] = a[b % len(a)]
a[b] = c
return a
def splice(a, b):
return a[b:]
a = list(s)
a = tu(a[::-1], 26)
a = tu(a[::-1], 28)
a = tu(a, 38)
a = splice(a[::-1], 3)
return "".join(a)
def _cipher(self, s, url):
"""
(OBSOLETE DECHYPHERING FUNCTION - user _cipher2)
Get the signature using the cipher implemented in the JavaScript code
:params s: Signature
:params url: url of JavaScript file
"""
import tinyjs
# Getting JS code (if hasn't downloaded yet)
if not self._js_code:
self._js_code = (urlopen(url).read().decode()
if not self._js_code else self._js_code)
try:
# Find first function
regexp = r'function \w{2}\(\w{1}\)\{\w{1}=\w{1}\.split\(\"\"\)' \
'\;(.*)\}'
code = re.findall(regexp, self._js_code)[0]
code = code[:code.index("}")]
# pre-code
signature = "a='" + s + "'"
# Tiny JavaScript VM
jsvm = tinyjs.JSVM()
# Precompiling with the super JavaScript VM (if hasn't compiled yet)
if not self._precompiled:
self._precompiled = jsvm.compile(code)
# Make first function + pre-code
jsvm.setPreinterpreted(jsvm.compile(signature) + self._precompiled)
# Executing the JS code
return jsvm.run()["return"]
except Exception as e:
##raise
raise CipherError("Couldn't cipher the signature. Maybe YouTube "
"has changed the cipher algorithm(%s)" % e)
def _re_search( self, pattern, s):
"""auxilary function - find first answer for pattern"""
mobj = re.search( pattern, s)
return next(g for g in mobj.groups() if g is not None)
def _parse_sig_js(self, jscode):
"""auxilary function - prepare javascript decryption function"""
import pytube.jsinterp
# found required function from by pattern
funcname = self._re_search(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode )
# prepare function body to run
jsi = pytube.jsinterp.JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _cipher2(self, s, url):
"""(ACTUAL DECIPHERING FUNCTION)
s - signature
url - url of decryptor javascript
"""
# load JS if needed
if not self._js_code:
self._js_code = (urlopen(url).read().decode()
if not self._js_code else self._js_code)
try:
# get decryptor function
dec_func = self._parse_sig_js(self._js_code)
# run decryptor function
return dec_func(s)
except Exception as e:
##raise
raise CipherError("Couldn't cipher the signature. Maybe YouTube "
"has changed the cipher algorithm(%s)" % e)
def _extract_fmt(self, text):
"""YouTube does not pass you a completely valid URLencoded form, I
suspect this is suppose to act as a deterrent.. Nothing some regulular
expressions couldn't handle.
:params text: The malformed data contained within each url node.
"""
itag = re.findall('itag=(\d+)', text)
if itag and len(itag) is 1:
itag = int(itag[0])
attr = YT_ENCODING.get(itag, None)
if not attr:
return itag, None
return itag, dict(zip(YT_ENCODING_KEYS, attr))
"""
================================================
VIMEO SERVICE
================================================
"""
VIMEO_ENCODING = {
'mobile': ["mp4", "270p", "H.264", "Base", "0.3", "AAC", "112"], # '480x270 H.264/AAC Stereo MP4',
'sd': ["mp4", "360p", "H.264", "High", "0.7", "AAC", "112"], # '640x360 H.264/AAC Stereo MP4',
'hd': ["mp4", "720p", "H.264", "High", "1.5-1.6", "AAC", "160"], #'1280x720 H.264/AAC Stereo MP4',
}
def _download(url):
response = urlopen(url)
if response:
return response.read().decode("utf-8")
return ''
class Vimeo(BaseProvider):
def _get_video_info(self):
"""This is responsable for executing the request, extracting the
necessary details, and populating the different video resolutions and
formats into a list.
"""
self.title = None
self.videos = []
html = _download(self.url)
match = re.search('content="?(https?://player.vimeo.com/video/([0-9]+))">',html)
if match:
_id = match.group(1)
self.title = "vimeo%s" % match.group(2)
else:
raise YouTubeError("Fail to parse vimeo page: %s"%self.url)
match = re.search('<meta name="description" content="([^"]+)',html)
if match:
self.title = match.group(1)
html = _download("%s/config"%_id)
try:
j = json.loads( html )
files = j['request']['files'].get('h264',{})
for t, fmap in files.items():
fmt = list( VIMEO_ENCODING['sd'] )
if t in VIMEO_ENCODING:
fmt = VIMEO_ENCODING[t]
fmt_data = dict(zip(YT_ENCODING_KEYS, fmt))
if 'height' in fmap:
fmt_data['resolution'] = str(fmap['height']) + 'p'
if 'bitrate' in fmap:
fmt_data['video_bitrate'] = "%0.1f" % (fmap['bitrate']/1000)
self.videos.append( Video(fmap['url'], self.filename, **fmt_data) )
except Exception as e:
raise YouTubeError("Cannot decode JSON: {0}".format(e))
| mit | 2,981,604,777,545,081,300 | 33.71822 | 118 | 0.505279 | false |
toddpalino/kafka-tools | tests/tools/assigner/test_batcher.py | 1 | 2277 | import unittest
from kafka.tools.exceptions import ProgrammingException
from kafka.tools.assigner.batcher import split_partitions_into_batches
from kafka.tools.models.broker import Broker
from kafka.tools.models.topic import Topic
from kafka.tools.assigner.models.reassignment import Reassignment
from kafka.tools.assigner.models.replica_election import ReplicaElection
class BatcherTests(unittest.TestCase):
def setUp(self):
self.topic = Topic('testTopic', 10)
self.broker = Broker('brokerhost1.example.com', id=1)
for i in range(10):
self.topic.partitions[i].replicas = [self.broker]
def test_split_batches_empty(self):
partitions = []
batches = split_partitions_into_batches(partitions, batch_size=1, use_class=Reassignment)
assert len(batches) == 0
def test_split_batches_no_class(self):
partitions = []
self.assertRaises(ProgrammingException, split_partitions_into_batches, partitions, batch_size=1)
def test_split_batches_proper_class(self):
batches = split_partitions_into_batches(self.topic.partitions, batch_size=100, use_class=Reassignment)
assert isinstance(batches[0], Reassignment)
batches = split_partitions_into_batches(self.topic.partitions, batch_size=100, use_class=ReplicaElection)
assert isinstance(batches[0], ReplicaElection)
def test_split_batches_singles(self):
batches = split_partitions_into_batches(self.topic.partitions, batch_size=1, use_class=Reassignment)
partition_count = sum([len(batch.partitions) for batch in batches])
assert len(batches) == 10
assert partition_count == 10
def test_split_batches_doubles(self):
batches = split_partitions_into_batches(self.topic.partitions, batch_size=2, use_class=Reassignment)
partition_count = sum([len(batch.partitions) for batch in batches])
assert len(batches) == 5
assert partition_count == 10
def test_split_batches_notenough(self):
batches = split_partitions_into_batches(self.topic.partitions, batch_size=20, use_class=Reassignment)
partition_count = sum([len(batch.partitions) for batch in batches])
assert len(batches) == 1
assert partition_count == 10
| apache-2.0 | -1,331,200,517,419,598,300 | 44.54 | 113 | 0.713658 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/common/js_wrapper.py | 1 | 5678 | ###########################################################
#
# Copyright (c) 2015, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['JsWrapper']
import tacticenv
from pyasm.common import Environment, jsonloads, jsondumps, Container
from tactic_client_lib import TacticServerStub
try:
import PyV8
HAS_PYV8 = True
except:
HAS_PYV8 = False
class PyV8:
class JSClass:
pass
def has_pyv8():
return HAS_PYV8
# Replace console.log
class MyConsole(PyV8.JSClass):
def log(self, *args):
args2 = []
for arg in args:
arg = PyV8.convert(arg)
args2.append(arg)
print(" ".join([str(x) for x in args2]))
class ApiDelegator(PyV8.JSClass):
def execute(self, func_name, args=[], kwargs={}):
server = TacticServerStub.get()
if args:
args = jsonloads(args)
if kwargs:
kwargs = jsonloads(kwargs)
if kwargs:
# Quirk ... when there is a kwargs, the last args is the kwargs
if args:
args.pop()
call = "server.%s(*args, **kwargs)" % func_name
else:
call = "server.%s(*args)" % func_name
try:
ret_val = eval(call)
except Exception as e:
print("ERROR: ", e)
raise
ret_val = jsondumps(ret_val)
return ret_val
class JSFile(object):
def copy(self, src, dst):
print("src: ", src)
print("dst: ", dst)
def move(self, src, dst):
pass
class GlobalContext(PyV8.JSClass):
console = MyConsole()
spt_delegator = ApiDelegator()
spt_file = JSFile()
class JsWrapper(object):
def __init__(self):
if HAS_PYV8:
with PyV8.JSLocker():
self.ctx = PyV8.JSContext(GlobalContext())
self.ctx.enter()
self.init()
self.ctx.leave()
def get():
key = "JsWrapper"
wrapper = Container.get(key)
if wrapper == None:
wrapper = JsWrapper()
Container.put(key, wrapper)
return wrapper
get = staticmethod(get)
def set_value(name, value):
self.ctx.locals[name] = value
def execute(self, js, kwargs={}):
if HAS_PYV8:
with PyV8.JSLocker():
self.ctx.enter()
try:
for name, value in kwargs.items():
self.ctx.locals[name] = value
data = self.ctx.eval(js)
finally:
self.ctx.leave()
return data
def execute_func(self, js, kwargs={}):
js = '''
var func = function() {
%s
}
var ret_val = func();
ret_val = JSON.stringify(ret_val);
''' % js
ret_val = self.execute(js, kwargs)
ret_val = jsonloads(ret_val)
return ret_val
def init(self):
install_dir = Environment.get_install_dir()
# initialize
js = '''
<!-- TACTIC -->
// Fixes
var spt = {};
spt.browser = {};
spt.browser.is_IE = function() { return false; }
spt.error = function(error) {
throw(error);
}
'''
self.ctx.eval(js)
sources = [
"environment.js",
"client_api.js"
]
for source in sources:
#path = "tactic/%s" % source
path = "%s/src/context/spt_js/%s" % (install_dir, source)
js = open(path).read()
self.ctx.eval(js)
js = '''
spt._delegate = function(func_name, args, kwargs) {
// convert everything to json
var args2 = [];
for (var i in args) {
args2.push(args[i]);
}
if (typeof(kwargs) == "undefined") {
kwargs = {};
}
args2 = JSON.stringify(args2);
kwargs = JSON.stringify(kwargs);
var ret_val = spt_delegator.execute(func_name, args2, kwargs);
ret_val = JSON.parse(ret_val);
return ret_val;
}
var server = TacticServerStub.get();
'''
self.ctx.eval(js)
def test():
# TEST
cmd = JsWrapper.get()
import time
start = time.time()
js = '''
console.log(server.ping() );
console.log("---");
var result = server.eval("@SOBJECT(sthpw/file)");
for (var i in result) {
var item = result[i];
if ( i > 5 ) break;
console.log(item.code);
}
'''
cmd.execute(js)
print(time.time() - start)
js = '''
console.log("---");
var result = server.get_by_search_key(result[0].__search_key__);
console.log(result.code);
'''
cmd.execute(js)
js = '''
console.log("---");
var result = server.eval("@SOBJECT(sthpw/file)", {single: true});
console.log(result.code);
'''
cmd.execute(js)
print("---")
js = '''
return ['stream1','stream2'];
''';
ret_val = cmd.execute_func(js)
print("ret_val: ", ret_val)
print("---")
js = '''
spt_file.copy("tactic.png", "tactic2.png");
''';
cmd.execute_func(js)
print("---")
kwargs = {
'a': 123,
'b': 234,
'c': "This isn't it"
}
js = '''
spt_file.copy("tactic.png", "tactic2.png");
''';
cmd.execute_func(js, kwargs)
if __name__ == '__main__':
from pyasm.security import Batch
Batch()
test()
| epl-1.0 | 5,923,317,514,424,031,000 | 18.312925 | 75 | 0.499648 | false |
gecos-team/gecosws-config-assistant | gecosws_config_assistant/view/LogTerminalDialog.py | 1 | 4670 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Francisco Fuentes Barrera <[email protected]>"
__copyright__ = "Copyright (C) 2015, Junta de Andalucía" + \
"<[email protected]>"
__license__ = "GPL-2"
import logging
import gettext
from gettext import gettext as _
import fcntl
import os
from subprocess import Popen, PIPE
from gi.repository import Gtk, Pango, GObject
from gecosws_config_assistant.view.GladeWindow import GladeWindow
gettext.textdomain('gecosws-config-assistant')
class LogTerminalDialog(GladeWindow):
'''
Dialog class that shows the system status.
'''
def __init__(self, controller, parent):
'''
Constructor
'''
self.parent = parent
self.controller = controller
self.logger = logging.getLogger('LogTerminalDialog')
self.gladepath = 'logterminal.glade'
self.data = None
self.initUI()
def get_data(self):
''' Getter data '''
return self.__data
def set_data(self, value):
''' Setter data '''
self.__data = value
def initUI(self):
''' Initialize UI '''
self.buildUI(self.gladepath)
self.logger.debug('UI initiated')
def initTerminal(self):
''' Initialize terminal '''
self.sub_proc = Popen(
"tail -n 200 -f /tmp/gecos-config-assistant.log",
shell=True,
stdout=PIPE)
self.sub_outp = ""
def non_block_read(self, output):
''' Non block read '''
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
def update_terminal(self):
''' Update terminal '''
self.textBuffer.insert_at_cursor(
self.non_block_read(self.sub_proc.stdout))
return self.sub_proc.poll() is None
def extractGUIElements(self):
''' Extract GUI elements '''
self.window = self.getElementById('window1')
self.acceptButton = self.getElementById('button1')
self.statusText = self.getElementById('textview1')
self.statusText.set_editable(False)
self.statusText.set_cursor_visible(False)
self.statusText.set_justification(Gtk.Justification.LEFT)
self.statusText.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self.textBuffer = self.statusText.get_buffer()
self.dialog = self.window
def modifyFont(self):
''' Modifying font '''
fontdesc = Pango.FontDescription("monospace")
self.statusText.modify_font(fontdesc)
def addHandlers(self):
''' Adding handlers '''
self.handlers = self.parent.get_common_handlers()
# add new handlers here
self.logger.debug("Adding back handler")
self.handlers["onBack"] = self.goBack
def show(self):
''' Show '''
self.logger.debug("Show")
self.extractGUIElements()
self.modifyFont()
self.initTerminal()
self.window.set_title(_('Log terminal'))
self.window.set_modal(True)
self.window.set_transient_for(self.parent.window)
GObject.timeout_add(100, self.update_terminal)
self.window.show_all()
x, y = self.parent.window.get_position()
w, h = self.parent.window.get_size()
sw, sh = self.window.get_size()
self.logger.debug('x={} y={} w={} h={} sw={} sh={}'.format(
x, y, w, h, sw, sh))
self.window.move(x + w/2 - sw/2, y + h/2 - sh/2)
while Gtk.events_pending():
Gtk.main_iteration()
def goBack(self, *args):
''' Go back '''
self.logger.debug("Go back")
self.dialog.destroy()
data = property(
get_data,
set_data,
None,
None)
| gpl-2.0 | -4,645,901,874,535,072,000 | 27.469512 | 74 | 0.615335 | false |
danbradham/nodify | nodify/view.py | 1 | 3580 | '''
view
====
Defines a view class for maintaining a graphics scene.
'''
import math
from PySide import QtCore, QtGui
class View(QtGui.QGraphicsView):
'''A View supporting smooth panning and zooming. Use Alt+Left Mouse to
pan and Alt+Middle or Right Mouse to zoom. Dragging without Alt drags out
a selection marquee.
.. seealso::
Documentation for :class:`QtGui.QGraphicsView`'''
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setTransformationAnchor(QtGui.QGraphicsView.NoAnchor)
self.setResizeAnchor(QtGui.QGraphicsView.NoAnchor)
self.setRubberBandSelectionMode(QtCore.Qt.IntersectsItemShape)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setRenderHint(QtGui.QPainter.Antialiasing)
# Set a massive canvas for seemingly unlimited pan and zoom
self.setSceneRect(0, 0, 32000, 32000)
self.centerOn(16000, 16000)
self._last_pos = QtCore.QPoint(0, 0)
self._drag_mod = QtCore.Qt.AltModifier
self._drag_buttons = [QtCore.Qt.LeftButton]
self._pan_buttons = [QtCore.Qt.LeftButton]
self._zoom_buttons = [QtCore.Qt.MiddleButton, QtCore.Qt.RightButton]
self._rel_scale = 1
def mousePressEvent(self, event):
'''Overloaded to support both marquee dragging and pan/zoom. Here we
setup the dragging mode and store the anchor position.'''
m = event.modifiers()
b = event.buttons()
if m == self._drag_mod or not b in self._drag_buttons:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
self._last_pos = self._anchor_pos = event.pos()
super(View, self).mousePressEvent(event)
def zoom(self, factor):
'''Zoom the view.
:param factor: Amount to scale'''
rel_scale = self._rel_scale * factor
if rel_scale < 0.2 or rel_scale > 8:
return
self._rel_scale = rel_scale
transform = self.transform()
transform.scale(factor, factor)
self.setTransform(transform)
def pan(self, x, y):
'''Pan the view.
:param x: Number of pixels in x
:param y: Number of pixels in y'''
self.translate(-x, -y)
def mouseMoveEvent(self, event):
if not event.modifiers() == QtCore.Qt.AltModifier:
super(View, self).mouseMoveEvent(event)
return
b = event.buttons()
pos = event.pos()
delta = pos - self._last_pos
if b in self._pan_buttons:
delta /= self.transform().m11()
self.pan(-delta.x(), -delta.y())
elif b in self._zoom_buttons:
old_pos = self.mapToScene(self._anchor_pos)
step = 0.02 * max(math.sqrt(delta.x() ** 2 + delta.y() ** 2), 1.0)
if delta.x() < 0 or -delta.y() < 0:
step *= -1
factor = 1 + step
self.zoom(factor) # Zoom
delta = self.mapToScene(self._anchor_pos) - old_pos
self.pan(-delta.x(), -delta.y()) # Pan to center on mouse pivot
self._last_pos = pos
def mouseReleaseEvent(self, event):
if event.modifiers() == self._drag_mod:
self.setDragMode(QtGui.QGraphicsView.NoDrag)
else:
self.setDragMode(QtGui.QGraphicsView.RubberBandDrag)
super(View, self).mouseReleaseEvent(event)
| mit | 3,127,646,211,352,622,600 | 29.598291 | 78 | 0.609777 | false |
gpoisoned/hars-app | server/server.py | 1 | 3487 | from flask import Flask, jsonify
import threading
import zmq
import time
import logging
from Queue import Queue
# Clear the Log file if it exists
with open("server.log", "w"):
pass
logging.basicConfig(filename='server.log',level=logging.DEBUG,\
format='%(levelname)s:%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
app = Flask(__name__)
context = zmq.Context()
servers = ['tcp://127.0.0.1:5558', 'tcp://127.0.0.1:5559']
servers_heartbeats = ['tcp://127.0.0.1:6668', 'tcp://127.0.0.1:6669']
server_nbr = 0
message_queue = Queue()
primary_router_msg = context.socket(zmq.PUB)
primary_router_msg.connect(servers[0])
backup_router_msg = context.socket(zmq.PUB)
backup_router_msg.connect(servers[1])
@app.route("/square/<int:num>")
def square(num):
message_queue.put(num)
return jsonify(status="Work will be sent to worker!")
@app.route("/")
def root():
return jsonify(status="Web server is running!")
@app.route("/health")
def health():
return jsonify(heath="It's all good :)")
def message_sender():
global servers
global server_nbr
global context
global send_message
while True:
message = message_queue.get()
print message
if server_nbr == 0:
primary_router_msg.send("%s %s" %("DATA", message))
elif server_nbr == 1:
backup_router_msg.send("%s %s" %("DATA", message))
message_queue.task_done()
# Background thread to do heartbeat with router
def heartbeat_listener():
# We want to modify the global states server_nbr
# and use global zeromq context
global servers_heartbeats
global server_nbr
global context
HEARTBEAT_TIMEOUT = 1000 * 5 # Timeout in seconds
DELAY = 3000
router_heartbeat = context.socket(zmq.REQ)
router_heartbeat.connect(servers_heartbeats[server_nbr])
poller = zmq.Poller()
poller.register(router_heartbeat, zmq.POLLIN)
heartbeat = "HB"
while True:
try:
router_heartbeat.send(heartbeat,zmq.NOBLOCK)
expect_reply = True
except:
except_reply = False
pass
while expect_reply:
socks = dict(poller.poll(HEARTBEAT_TIMEOUT))
if router_heartbeat in socks:
reply = router_heartbeat.recv(zmq.NOBLOCK)
expect_reply = False
else:
logging.warning("Router is probably dead. Connecting to backup router")
time.sleep(DELAY/1000)
# Unregister old socket and delete it
poller.unregister(router_heartbeat)
router_heartbeat.close()
# Change server and recreate sockets
server_nbr = (server_nbr + 1) % 2
router_heartbeat = context.socket(zmq.REQ)
poller.register(router_heartbeat, zmq.POLLIN)
# reconnect and resend request
router_heartbeat.connect(servers_heartbeats[server_nbr])
router_heartbeat.send(heartbeat,zmq.NOBLOCK)
if __name__ == "__main__":
app.debug = True
logging.info("Starting a heartbeat daemon process...")
listner = threading.Thread(name="Heartbeat_listener", target = heartbeat_listener).start()
sender = threading.Thread(name="Message sender", target = message_sender).start()
logging.info("**** Daemon started. Now running app server ****")
app.run(threaded=True)
logging.error("App server crashed.")
context.term()
| apache-2.0 | 796,355,831,453,210,900 | 31.588785 | 94 | 0.629481 | false |
jiasir/pycs | vulpo/resultset.py | 1 | 6559 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from vulpo.scs.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| mit | -787,891,861,120,344,300 | 36.695402 | 79 | 0.606495 | false |
ceph/ceph-ansible | tests/functional/tests/rbd-mirror/test_rbd_mirror.py | 1 | 1848 | import pytest
import json
class TestRbdMirrors(object):
@pytest.mark.no_docker
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
def test_rbd_mirror_service_enabled_and_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
s = host.service(service_name)
assert s.is_enabled
assert s.is_running
def test_rbd_mirror_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])
assert hostname in daemons
| apache-2.0 | 4,185,165,945,069,044,000 | 41.976744 | 216 | 0.605519 | false |
brchiu/tensorflow | tensorflow/contrib/distribute/python/cross_device_ops_test.py | 1 | 23595 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import device_util
def _make_per_replica(values, devices, regroup=False):
devices = cross_device_ops_lib.get_devices_from(devices)
assert len(values) == len(devices)
# We simulate the result of regroup called on PerReplica which strips the
# PerReplica wrapper if it has only one value.
if len(values) == 1 and regroup:
with ops.device(devices[0]):
placed_v = array_ops.identity(values[0])
return placed_v
index = {}
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index[d] = placed_v
return value_lib.PerReplica(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored(value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = cross_device_ops_lib.get_devices_from(devices)
return value_lib.Mirrored(
{d: v for d, v in zip(devices, [value] * len(devices))})
def _make_indexed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
return value_lib.Mirrored({
d: _make_indexed_slices(values, indices, dense_shape, d) for d in devices
})
_cpu_device = "/device:CPU:0"
class CrossDeviceOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(device_util.resolve(left.device),
device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_values_equal(self, left, right):
if isinstance(left, list):
for l, r in zip(left, right):
self._assert_values_equal(l, r)
else:
self.assertEqual(type(left), type(right))
self.assertEqual(set(left.devices), set(right.devices))
if isinstance(list(left._index.values())[0], ops.IndexedSlices):
for (d, v) in left._index.items():
self._assert_indexed_slices_equal(v, right._index[d])
elif context.executing_eagerly():
self.assertEqual([v.numpy() for v in left._index.values()],
list(right._index.values()))
else:
with self.cached_session() as sess:
self.assertEqual(
sess.run(list(left._index.values())), list(right._index.values()))
def _testReductionAndBroadcast(self, cross_device_ops, distribution):
devices = distribution.worker_devices
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
destination_list = devices
all_destinations = [
destination_mirrored, destination_different, destination_str,
destination_list
]
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations),
_fake_mirrored(mean, destinations))
self._assert_values_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations))
self._assert_values_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM, per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices), destinations))
self._assert_values_equal(
cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations))
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
cross_device_ops.batch_reduce(
reduce_util.ReduceOp.MEAN,
[(per_replica, d1), (per_replica_2, d2)]),
[
_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)
])
self._assert_values_equal(
cross_device_ops.batch_reduce(
reduce_util.ReduceOp.SUM,
[(per_replica, d1), (per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices), d1),
_fake_mirrored(mean_2 * len(devices), d2)
])
# test broadcast()
for destinations in all_destinations:
self._assert_values_equal(
cross_device_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations))
class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
# TODO(yuefengz): decouple the num_gpus check from distribution in
# combinations module so that we can pass in devices instead of a distribution
# strategy.
reduction_to_one_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"DefaultReductionToOneDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps(
reduce_to_device=_cpu_device)),
combinations.NamedObject(
"AccumulateNCrossDeviceOp",
cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps(
accumulation_fn=math_ops.accumulate_n)),
],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"AllReduce",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 1, 0, 0)),
combinations.NamedObject(
"HierarchicalCopy",
cross_device_ops_lib.AllReduceCrossDeviceOps(
"hierarchical_copy", 8, 0, 0)),
combinations.NamedObject(
"AllReduceNoGradientRepacking",
cross_device_ops_lib.AllReduceCrossDeviceOps("nccl", 0, 0, 0)),
combinations.NamedObject(
"HierarchicalCopyAggregateSmallTensors",
cross_device_ops_lib.AllReduceCrossDeviceOps(
"hierarchical_copy", 0, 100, 10))
],
distribution=[combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus],
mode=["graph", "eager"])
@combinations.generate(reduction_to_one_combinations + allreduce_combinations)
def testReductionAndBroadcast(self, cross_device_ops, distribution):
with distribution.scope():
self._testReductionAndBroadcast(cross_device_ops, distribution)
def testChooseAlgorithm(self):
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
result = cross_device_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_device_ops_lib.AllReduceCrossDeviceOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if there are only 4 devices
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7]]
result = cross_device_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_device_ops_lib.AllReduceCrossDeviceOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
# if devices links contain each device itself
device_links = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 5], [0, 1, 2, 3, 6],
[0, 1, 2, 3, 7], [0, 4, 5, 6, 7], [1, 4, 5, 6, 7],
[2, 4, 5, 6, 7], [3, 4, 5, 6, 7]]
result = cross_device_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_device_ops_lib.AllReduceCrossDeviceOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if not dgx1-like links
device_links = [[0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7], [0, 5, 6, 7],
[1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6], [1, 2, 3, 4]]
result = cross_device_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_device_ops_lib.AllReduceCrossDeviceOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testSimpleReduceWithIndexedSlices(self):
devices = ["/cpu:0", "/gpu:0"]
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_replica = value_lib.PerReplica({devices[0]: t0, devices[1]: t1})
result = cross_device_ops_lib._simple_reduce(
per_replica, devices[0], math_ops.add_n, reduce_util.ReduceOp.SUM)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
total_with_dups = _make_indexed_slices(
[[1., 2.], [3., 4.], [5., 6.]], [1, 1, 3], [5, 2], devices[0])
total_without_dups = _make_indexed_slices(
[[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
@combinations.generate(
combinations.combine(
cross_device_ops_instance=[
combinations.NamedObject(
"ReductionToOneDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDeviceCrossDeviceOps()),
combinations.NamedObject(
"AllReduceCrossDeviceOps",
cross_device_ops_lib.AllReduceCrossDeviceOps())
],
reduce_op=[reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN],
batch_reduce=[True, False],
mode=["graph", "eager"],
required_gpus=1))
def testIndexedSlicesAllReduce(self, cross_device_ops_instance, reduce_op,
batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices(
[[3., 4.], [5., 6.]], [1, 3], dense_shape, devices[1])
per_replica = value_lib.PerReplica({devices[0]: t0, devices[1]: t1})
if batch_reduce:
result = cross_device_ops_instance.batch_reduce(
reduce_op, [(per_replica, devices)])
else:
result = cross_device_ops_instance.reduce(
reduce_op, per_replica, devices)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if reduce_op == reduce_util.ReduceOp.SUM:
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert reduce_op == reduce_util.ReduceOp.MEAN
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_values_equal(total_mirrored_with_dups, result)
self._assert_values_equal(total_mirrored_without_dups, result)
class MultiWorkerCrossDeviceOpsTest(multi_worker_test_base.MultiWorkerTestBase,
CrossDeviceOpsTestBase):
worker_devices = [
"/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"
]
multi_worker_allreduce_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"MultiWorkerAllReduce",
cross_device_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReducePack",
cross_device_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 1, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReduceAggregation",
cross_device_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 100, 10)),
combinations.NamedObject(
"MultiWorkerAllReduceMultipleSpecs",
cross_device_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, [("pscpu/pscpu", 2, 100),
("xring", 2, -1)], 0, 0, 0)),
],
distribution=[
combinations.NamedDistribution(
"MirroredCPU",
lambda: mirrored_strategy.MirroredStrategy(num_gpus=0),
required_gpus=0),
combinations.NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_strategy.MirroredStrategy(num_gpus=1),
required_gpus=1),
combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_strategy.MirroredStrategy(num_gpus=2),
required_gpus=2),
],
mode=["graph"])
@combinations.generate(multi_worker_allreduce_combinations)
def testReductionAndBroadcast(self, cross_device_ops, distribution):
distribution.configure(cluster_spec={
"worker":
["/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"]
})
with distribution.scope():
self._testReductionAndBroadcast(cross_device_ops, distribution)
class MultiWorkerCollectiveAllReduceTest(
multi_worker_test_base.MultiWorkerTestBase, parameterized.TestCase):
collective_key_base = 100000
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0)
def setUp(self):
super(MultiWorkerCollectiveAllReduceTest, self).setUp()
# Reusing keys are not supported well. So we have to give a different
# collective key base for different tests.
MultiWorkerCollectiveAllReduceTest.collective_key_base += 100000
def _get_test_objects(self, task_type, task_id, num_gpus=0, local_mode=False):
collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=10 * num_gpus +
MultiWorkerCollectiveAllReduceTest.collective_key_base,
instance_key_start=num_gpus * 100 +
MultiWorkerCollectiveAllReduceTest.collective_key_base,
instance_key_with_id_start=num_gpus * 10000 +
MultiWorkerCollectiveAllReduceTest.collective_key_base)
if local_mode:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
1, num_gpus, collective_keys=collective_keys)
if num_gpus:
devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
else:
devices = ["/device:CPU:0"]
return collective_all_reduce_ops, devices, ""
else:
collective_all_reduce_ops = cross_device_ops_lib.CollectiveAllReduce(
3, num_gpus, collective_keys=collective_keys)
if num_gpus:
devices = [
"/job:%s/task:%d/device:GPU:%d" % (task_type, task_id, i)
for i in range(num_gpus)
]
else:
devices = ["/job:%s/task:%d" % (task_type, task_id)]
return (collective_all_reduce_ops, devices,
"grpc://" + self._cluster_spec[task_type][task_id])
def _assert_values_equal(self, left, right, sess):
if isinstance(left, list):
for l, r in zip(left, right):
self._assert_values_equal(l, r, sess)
else:
self.assertEqual(type(left), type(right))
self.assertEqual(set(left.devices), set(right.devices))
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 6
left_values = np.array(
sess.run(list(left._index.values()), options=run_options)).flatten()
right_values = np.array(list(right._index.values())).flatten()
self.assertEqual(len(left_values), len(right_values))
for l, r in zip(left_values, right_values):
self.assertEqual(l, r)
def _test_reduction(self, task_type, task_id, num_gpus, local_mode=False):
collective_all_reduce, devices, master_target = self._get_test_objects(
task_type, task_id, num_gpus, local_mode=local_mode)
if local_mode:
num_workers = 1
worker_device = None
else:
num_workers = len(self._cluster_spec.get("chief", [])) + len(
self._cluster_spec.get("worker", []))
worker_device = "/job:%s/task:%d" % (task_type, task_id)
with ops.Graph().as_default(), \
ops.device(worker_device), \
self.cached_session(target=master_target) as sess:
# Collective ops doesn't support scalar tensors, so we have to construct
# 1-d tensors.
values = [constant_op.constant([float(d)]) for d in range(len(devices))]
per_replica = _make_per_replica(values, devices, regroup=True)
mean = np.array([(len(devices) - 1.) / 2.])
values_2 = [constant_op.constant([d + 1.0]) for d in range(len(devices))]
per_replica_2 = _make_per_replica(values_2, devices)
mean_2 = np.array([mean[0] + 1.])
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
destination_list = devices
all_destinations = [
destination_different, destination_mirrored, destination_str,
destination_list
]
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
collective_all_reduce.reduce(
reduce_util.ReduceOp.MEAN,
per_replica,
destinations=destinations),
_fake_mirrored(mean, destinations), sess)
self._assert_values_equal(
collective_all_reduce.reduce(
reduce_util.ReduceOp.MEAN,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2, destinations), sess)
self._assert_values_equal(
collective_all_reduce.reduce(
reduce_util.ReduceOp.SUM,
per_replica,
destinations=destinations),
_fake_mirrored(mean * len(devices) * num_workers, destinations),
sess)
self._assert_values_equal(
collective_all_reduce.reduce(
reduce_util.ReduceOp.SUM,
per_replica_2,
destinations=destinations),
_fake_mirrored(mean_2 * len(devices) * num_workers, destinations),
sess)
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
collective_all_reduce.batch_reduce(reduce_util.ReduceOp.MEAN,
[(per_replica, d1),
(per_replica_2, d2)]),
[
_fake_mirrored(mean, d1),
_fake_mirrored(mean_2, d2)
], sess)
self._assert_values_equal(
collective_all_reduce.batch_reduce(reduce_util.ReduceOp.SUM,
[(per_replica, d1),
(per_replica_2, d2)]),
[
_fake_mirrored(mean * len(devices) * num_workers, d1),
_fake_mirrored(mean_2 * len(devices) * num_workers, d2)
], sess)
return True
@combinations.generate(
combinations.combine(mode=["graph"], num_gpus=[0, 1, 2], required_gpus=1))
def testReductionDistributed(self, num_gpus):
if context.num_gpus() < num_gpus:
return
self._run_between_graph_clients(self._test_reduction, self._cluster_spec,
num_gpus)
# Collective ops doesn't support strategy with one device.
def testReductionLocal(self, num_gpus=2):
if context.num_gpus() < num_gpus:
return
self._test_reduction(None, None, num_gpus, local_mode=True)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,392,647,393,940,823,000 | 40.394737 | 81 | 0.625556 | false |
foggy0400/pyrat | source/httprequest.py | 1 | 1139 | ######################################################################
# pyrat - An Eve Online PvE analyser
# Copyright (C) 2017 Instigo Pares [SUAD]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# If you need to, contact me on reddit @ /u/instigo
######################################################################
# Delete these later when pyrat.py is done
import regioncall
import listsystems
# not these lol
from ratelimit import rate_limited
import urllib.request
systems = listsystems.pull_systems(regioncall.query_region())
print(systems)
| gpl-3.0 | -5,878,944,023,011,555,000 | 39.678571 | 70 | 0.673398 | false |
calisthenics/site | bin/wikipedia-bodyweight-exercise.py | 1 | 4818 | #!/usr/bin/env python
# coding: utf-8
import os
import re
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from logya.core import Logya
from logya.path import slugify, target_file
from logya.writer import encode_content, write
logya = Logya()
logya.init_env()
url = 'https://en.wikipedia.org/wiki/Bodyweight_exercise'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')
replacements = {
'bams': 'bam',
'bodybuilders': 'bodybuilder',
'boots': 'boot',
'chairs': 'chair',
'climbers': 'climber',
'crosses': 'cross',
'curls': 'curl',
'darlings': 'darling',
'dips': 'dip',
'dogs': 'dog',
'extensions': 'extension',
'humpers': 'humper',
'ins': 'in',
'kicks': 'kick',
'knives': 'knife',
'lifts': 'lift',
'little piggies': '3 little pigs',
'lunges': 'lunge',
'maybes': 'maybe',
'mikes': 'mike',
'mornings': 'morning',
'offs': 'off',
'plunges': 'plunge',
'push exercises': 'push',
'raises': 'raise',
'rotations': 'rotation',
'scissors': 'scissor',
'spidermans': 'spiderman',
'supermans': 'superman',
'swimmers': 'swimmer',
'squats': 'squat',
'ups': 'up'
}
resources = '## Resources\n\n* [Wikipedia: Bodyweight exercise]({})'.format(url)
def canonical_name(name):
name = name.strip().lower()
if name.startswith('full body'):
return ''
for source, target in replacements.items():
name = re.sub(r'\b{}\b'.format(source), target, name)
return name.title()
def clean_text(text):
return text.replace('[citation needed]', '').strip()
# Only interested in TOC numbers 4 to 8.
tocnumbers = range(4, 9)
toc1_items = soup.find(id='toc').find_all(class_='toclevel-1')
groups = [i for i in toc1_items if int(i.find('a').find(class_='tocnumber').text) in tocnumbers]
assert len(groups) == len(tocnumbers)
# Assemble exercise documents
for group in groups:
group_name = group.find('a').find(class_='toctext').text.strip()
for item in group.find('ul').find_all('a'):
href = item.attrs['href']
heading = soup.find(id=href.lstrip('#')).parent
name = canonical_name(item.find(class_='toctext').text)
groups = [canonical_name(group_name)]
body = []
variants = []
muscles = []
for sibling in heading.find_next_siblings():
if sibling.name == 'p':
body.append(clean_text(sibling.text))
elif sibling.name == 'dl':
dth = sibling.find('dt').text.strip().lower()
if dth == 'common variants':
variants = list(filter(None, [canonical_name(i.text) for i in sibling.find_all('dd') if i.text != 'none']))
elif dth == 'muscle groups':
muscles = list(filter(None, [canonical_name(i.text) for i in sibling.find_all('dd')]))
elif sibling.name == 'h3':
break
if body:
body.append(resources)
doc = {
'created': datetime.now(),
'description': body[0].split('. ')[0] + '.',
'groups': groups,
'muscles': muscles,
'template': 'exercise.html',
'title': name,
'variants': variants
}
# Files shall be saved as md files, so calling write_content directly
# is not possible as it would save as html.
filename = target_file(logya.dir_content, '/exercise/{}.md'.format(slugify(name)))
if not os.path.exists(filename):
write(filename, encode_content(doc, '\n\n'.join(body)))
# Create stub files for variants
for variant in variants:
filename = target_file(logya.dir_content, '/exercise/{}.md'.format(slugify(variant)))
if not os.path.exists(filename):
ex_variants = list(set(variants).union(set([name])).difference(set([variant])))
doc = {
'created': datetime.now(),
'description': '',
'groups': groups,
'muscles': muscles,
'template': 'exercise.html',
'title': variant,
'variants': ex_variants
}
write(filename, encode_content(doc, ''))
# Create stub files for muscles
for muscle in muscles:
filename = target_file(logya.dir_content, '/muscle/{}.md'.format(slugify(muscle)))
if not os.path.exists(filename):
doc = {
'created': datetime.now(),
'description': '',
'template': 'muscle.html',
'title': muscle
}
write(filename, encode_content(doc, '')) | mit | 3,966,870,147,894,004,000 | 31.126667 | 127 | 0.547945 | false |
gdorion/advent-of-code | 2015/python/Day14/race.py | 1 | 1703 | #!/bin/env python
#
# Adventofcode.com
#
# Author : Guillaume Dorion
# Email : [email protected]
#
class Reindeer(object):
def __init__(self, name, speed, flightTime, restTime):
self.name = name
self.speed = speed
self.flightTime = flightTime
self.restTime = restTime
# Results
self.timeFlying = 0
self.distance = 0
self.points = 0
def takeAction(self, iteration, duration):
if self.isFlying(iteration, duration):
self.distance = self.distance + self.speed
return True
def isFlying(self, iteration, duration):
if (iteration % (self.flightTime + self.restTime)) < self.flightTime:
return True
return False
reindeers = []
dt = 2503
def findLeader(reindeers):
best = 0
for r in reindeers:
if r.distance >= best:
best = r.distance
for r in reindeers:
if r.distance == best:
r.points = r.points + 1
with open('data.txt') as f:
for line in f:
l = line.rstrip('\n')
params = l.split('.')[0].split(' ')
# Set the speed all on a km/s base
reindeers.append(Reindeer(params[0], int(params[3]), int(params[6]), int(params[13])))
for second in range(dt):
for reindeer in reindeers:
reindeer.takeAction(second, dt)
findLeader(reindeers)
longest = 0
for reindeer in reindeers:
print reindeer.name + " " + str(reindeer.distance) + "(%s)" % (str(reindeer.points))
winner = reindeers[0]
for reindeer in reindeers:
if winner.points < reindeer.points:
winner = reindeer
print winner.name + ' ' + str(winner.distance) + ' ' + str(winner.points)
| mit | -1,761,703,601,739,938,600 | 23.328571 | 94 | 0.600117 | false |
sahilshekhawat/ApkDecompiler | javadecompiler/Krakatau/java/ast2.py | 1 | 3633 | from . import ast
from .stringescape import escapeString as escape
class MethodDef(object):
def __init__(self, class_, flags, name, desc, retType, paramDecls, body):
self.flagstr = flags + ' ' if flags else ''
self.retType, self.paramDecls = retType, paramDecls
self.body = body
self.comment = None
self.triple = class_.name, name, desc
if name == '<clinit>':
self.isStaticInit, self.isConstructor = True, False
elif name == '<init>':
self.isStaticInit, self.isConstructor = False, True
self.clsname = ast.TypeName((class_.name, 0))
else:
self.isStaticInit, self.isConstructor = False, False
def print_(self, printer, print_):
argstr = ', '.join(print_(decl) for decl in self.paramDecls)
if self.isStaticInit:
header = 'static'
elif self.isConstructor:
name = print_(self.clsname).rpartition('.')[-1]
header = '{}{}({})'.format(self.flagstr, name, argstr)
else:
name = printer.methodName(*self.triple)
header = '{}{} {}({})'.format(self.flagstr, print_(self.retType), escape(name), argstr)
if self.comment:
header = '//{}\n{}'.format(self.comment, header)
if self.body is None:
return header + ';\n'
else:
return header + '\n' + print_(self.body)
class FieldDef(object):
def __init__(self, flags, type_, class_, name, desc, expr=None):
self.flagstr = flags + ' ' if flags else ''
self.type_ = type_
self.name = name
self.expr = None if expr is None else ast.makeCastExpr(type_.tt, expr)
self.triple = class_.name, name, desc
def print_(self, printer, print_):
name = escape(printer.fieldName(*self.triple))
if self.expr is not None:
return '{}{} {} = {};'.format(self.flagstr, print_(self.type_), name, print_(self.expr))
return '{}{} {};'.format(self.flagstr, print_(self.type_), name)
class ClassDef(object):
def __init__(self, flags, isInterface, name, superc, interfaces, fields, methods):
self.flagstr = flags + ' ' if flags else ''
self.isInterface = isInterface
self.name = ast.TypeName((name,0))
self.super = ast.TypeName((superc,0)) if superc is not None else None
self.interfaces = [ast.TypeName((iname,0)) for iname in interfaces]
self.fields = fields
self.methods = methods
if superc == 'java/lang/Object':
self.super = None
def print_(self, printer, print_):
contents = ''
if self.fields:
contents = '\n'.join(print_(x) for x in self.fields)
if self.methods:
if contents:
contents += '\n\n' #extra line to divide fields and methods
contents += '\n\n'.join(print_(x) for x in self.methods)
indented = [' '+line for line in contents.splitlines()]
name = print_(self.name).rpartition('.')[-1]
defname = 'interface' if self.isInterface else 'class'
header = '{}{} {}'.format(self.flagstr, defname, name)
if self.super:
header += ' extends ' + print_(self.super)
if self.interfaces:
if self.isInterface:
assert(self.super is None)
header += ' extends ' + ', '.join(print_(x) for x in self.interfaces)
else:
header += ' implements ' + ', '.join(print_(x) for x in self.interfaces)
lines = [header + ' {'] + indented + ['}']
return '\n'.join(lines) | gpl-2.0 | -7,945,223,551,241,829,000 | 39.831461 | 100 | 0.56262 | false |
apieum/inxpect | inxpect/expect/property.py | 1 | 1966 | # -*- coding: utf8 -*-
from .chain import AndChain
from .operator import *
from .should import Should, ShouldNot
class DefaultProperty(object):
def __init__(self, getter=None, returns=AndChain):
self.should = Should(getter, returns)
self.should_not = ShouldNot(getter, returns)
def equal_to(self, expected, closure=None):
return self.should(Equal, expected, closure)
def not_equal_to(self, expected, closure=None):
return self.should_not(Equal, expected, closure)
def lower_than(self, expected, closure=None):
return self.should(LowerThan, expected, closure)
def lower_or_equal_than(self, expected, closure=None):
return self.should(LowerOrEqualThan, expected, closure)
def greater_than(self, expected, closure=None):
return self.should(GreaterThan, expected, closure)
def greater_or_equal_than(self, expected, closure=None):
return self.should(GreaterOrEqualThan, expected, closure)
def same_as(self, expected, closure=None):
return self.should(SameAs, expected, closure)
def not_same_as(self, expected, closure=None):
return self.should_not(SameAs, expected, closure)
def type_is(self, expected, closure=None):
return self.should(TypeIs, expected, closure)
def type_is_not(self, expected, closure=None):
return self.should_not(TypeIs, expected, closure)
def instance_of(self, expected, closure=None):
return self.should(InstanceOf, expected, closure)
def not_instance_of(self, expected, closure=None):
return self.should_not(InstanceOf, expected, closure)
@property
def len(self):
return DefaultProperty(getter=self.should.closure(len))
def __get__(self, instance, ownerCls):
return self
__eq__ = equal_to
__ne__ = not_equal_to
__lt__ = lower_than
__gt__ = greater_than
__le__ = lower_or_equal_than
__ge__ = greater_or_equal_than
| lgpl-3.0 | -9,122,398,435,168,640,000 | 31.229508 | 65 | 0.671414 | false |
projectatomic/atomic-reactor | atomic_reactor/plugins/pre_reactor_config.py | 1 | 22264 | """
Copyright (c) 2017, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from copy import deepcopy
from atomic_reactor.utils.cachito import CachitoAPI
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.constants import (CONTAINER_BUILD_METHODS, CONTAINER_DEFAULT_BUILD_METHOD,
CONTAINER_BUILDAH_BUILD_METHOD)
from atomic_reactor.util import (read_yaml, read_yaml_from_file_path,
get_build_json, DefaultKeyDict)
from osbs.utils import RegistryURI
import logging
import os
import six
# Key used to store the config object in the plugin workspace
WORKSPACE_CONF_KEY = 'reactor_config'
NO_FALLBACK = object()
def get_config(workflow):
"""
Obtain configuration object
Does not fail
:return: ReactorConfig instance
"""
try:
workspace = workflow.plugin_workspace[ReactorConfigPlugin.key]
return workspace[WORKSPACE_CONF_KEY]
except KeyError:
# The plugin did not run or was not successful: use defaults
conf = ReactorConfig()
workspace = workflow.plugin_workspace.get(ReactorConfigPlugin.key, {})
workspace[WORKSPACE_CONF_KEY] = conf
workflow.plugin_workspace[ReactorConfigPlugin.key] = workspace
return conf
def get_value(workflow, name, fallback):
try:
# make a deep copy to prevent plugins from changing the value for other plugins
value = deepcopy(get_config(workflow).conf[name])
return value
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
def get_koji(workflow):
koji_map = get_value(workflow, 'koji', NO_FALLBACK)
if 'auth' in koji_map:
krb_principal = koji_map['auth'].get('krb_principal')
krb_keytab = koji_map['auth'].get('krb_keytab_path')
if bool(krb_principal) != bool(krb_keytab):
raise RuntimeError("specify both koji_principal and koji_keytab or neither")
return koji_map
def get_koji_session(workflow):
config = get_koji(workflow)
from atomic_reactor.utils.koji import create_koji_session
auth_info = {
"proxyuser": config['auth'].get('proxyuser'),
"ssl_certs_dir": config['auth'].get('ssl_certs_dir'),
"krb_principal": config['auth'].get('krb_principal'),
"krb_keytab": config['auth'].get('krb_keytab_path')
}
use_fast_upload = config.get('use_fast_upload', True)
return create_koji_session(config['hub_url'], auth_info, use_fast_upload)
def get_koji_path_info(workflow):
config = get_koji(workflow)
from koji import PathInfo
# Make sure koji root_url doesn't end with a slash since the url
# is used to construct resource urls (e.g. log links)
root_url = config['root_url'].rstrip('/')
return PathInfo(topdir=root_url)
def get_odcs(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'odcs', fallback)
def get_odcs_session(workflow):
config = get_odcs(workflow)
from atomic_reactor.utils.odcs import ODCSClient
client_kwargs = {
'insecure': config.get('insecure', False),
'timeout': config.get('timeout', None),
}
openidc_dir = config['auth'].get('openidc_dir')
if openidc_dir:
token_path = os.path.join(openidc_dir, 'token')
with open(token_path, "r") as f:
client_kwargs['token'] = f.read().strip()
ssl_certs_dir = config['auth'].get('ssl_certs_dir')
if ssl_certs_dir:
cert_path = os.path.join(ssl_certs_dir, 'cert')
if os.path.exists(cert_path):
client_kwargs['cert'] = cert_path
else:
raise KeyError("ODCS ssl_certs_dir doesn't exist")
return ODCSClient(config['api_url'], **client_kwargs)
def get_smtp(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'smtp', fallback)
def get_smtp_session(workflow, fallback):
config = get_smtp(workflow, fallback)
import smtplib
return smtplib.SMTP(config['host'])
def get_cachito(workflow):
return get_value(workflow, 'cachito', NO_FALLBACK)
def get_cachito_session(workflow):
config = get_cachito(workflow)
api_kwargs = {
'insecure': config.get('insecure', False),
'timeout': config.get('timeout'),
}
ssl_certs_dir = config['auth'].get('ssl_certs_dir')
if ssl_certs_dir:
cert_path = os.path.join(ssl_certs_dir, 'cert')
if os.path.exists(cert_path):
api_kwargs['cert'] = cert_path
else:
raise RuntimeError("Cachito ssl_certs_dir doesn't exist")
return CachitoAPI(config['api_url'], **api_kwargs)
def get_arrangement_version(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'arrangement_version', fallback)
def get_artifacts_allowed_domains(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'artifacts_allowed_domains', fallback)
def get_yum_repo_allowed_domains(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'yum_repo_allowed_domains', fallback)
def get_image_labels(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'image_labels', fallback)
def get_image_label_info_url_format(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'image_label_info_url_format', fallback)
def get_image_equal_labels(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'image_equal_labels', fallback)
def get_openshift(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'openshift', fallback)
def get_openshift_session(workflow, fallback):
config = get_openshift(workflow, fallback)
namespace = get_build_json().get('metadata', {}).get('namespace', None)
from osbs.api import OSBS
from osbs.conf import Configuration
config_kwargs = {
'verify_ssl': not config.get('insecure', False),
'namespace': namespace,
'use_auth': False,
'conf_file': None,
'openshift_url': config['url'],
'build_json_dir': config.get('build_json_dir')
}
if config.get('auth'):
krb_keytab_path = config['auth'].get('krb_keytab_path')
if krb_keytab_path:
config_kwargs['kerberos_keytab'] = krb_keytab_path
krb_principal = config['auth'].get('krb_principal')
if krb_principal:
config_kwargs['kerberos_principal'] = krb_principal
krb_cache_path = config['auth'].get('krb_cache_path')
if krb_cache_path:
config_kwargs['kerberos_ccache'] = krb_cache_path
ssl_certs_dir = config['auth'].get('ssl_certs_dir')
if ssl_certs_dir:
config_kwargs['client_cert'] = os.path.join(ssl_certs_dir, 'cert')
config_kwargs['client_key'] = os.path.join(ssl_certs_dir, 'key')
config_kwargs['use_auth'] = config['auth'].get('enable', False)
osbs_conf = Configuration(**config_kwargs)
return OSBS(osbs_conf, osbs_conf)
def get_group_manifests(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'group_manifests', fallback)
def get_platform_descriptors(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'platform_descriptors', fallback)
def get_prefer_schema1_digest(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'prefer_schema1_digest', fallback)
def get_content_versions(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'content_versions', fallback)
def get_registries_organization(workflow):
return get_config(workflow).conf.get('registries_organization')
def get_registries(workflow, fallback=NO_FALLBACK):
try:
all_registries = get_config(workflow).conf['registries']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
registries_cm = {}
for registry in all_registries:
reguri = RegistryURI(registry.get('url'))
regdict = {}
regdict['version'] = reguri.version
if registry.get('auth'):
regdict['secret'] = registry['auth']['cfg_path']
regdict['insecure'] = registry.get('insecure', False)
regdict['expected_media_types'] = registry.get('expected_media_types', [])
registries_cm[reguri.docker_uri] = regdict
return registries_cm
def get_docker_registry(workflow, fallback=NO_FALLBACK):
try:
all_registries = get_config(workflow).conf['registries']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
for registry in all_registries:
reguri = RegistryURI(registry.get('url'))
if reguri.version == 'v2':
regdict = {
'url': reguri.uri,
'insecure': registry.get('insecure', False)
}
if registry.get('auth'):
regdict['secret'] = registry['auth']['cfg_path']
return regdict
raise RuntimeError("Expected V2 registry but none in REACTOR_CONFIG")
def get_yum_proxy(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'yum_proxy', fallback)
def _as_source_registry(registry):
return {
'uri': RegistryURI(registry['url']),
'insecure': registry.get('insecure', False),
'dockercfg_path': registry.get('auth', {}).get('cfg_path', None)
}
def get_source_registry(workflow, fallback=NO_FALLBACK):
try:
source_registry = get_config(workflow).conf['source_registry']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
return _as_source_registry(source_registry)
def get_pull_registries(workflow, fallback=NO_FALLBACK):
"""
Get list of pull_registries from config map, list entries follow the same
format as the result of get_source_registry()
"""
try:
pull_registries = get_config(workflow).conf['pull_registries']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
return [_as_source_registry(reg) for reg in pull_registries]
def get_sources_command(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'sources_command', fallback)
def get_required_secrets(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'required_secrets', fallback)
def get_worker_token_secrets(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'worker_token_secrets', fallback)
def get_clusters(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'clusters', fallback)
def get_clusters_client_config_path(workflow, fallback=NO_FALLBACK):
client_config_dir = get_value(workflow, 'clusters_client_config_dir', fallback)
return os.path.join(client_config_dir, 'osbs.conf')
def get_platform_to_goarch_mapping(workflow,
descriptors_fallback=NO_FALLBACK):
platform_descriptors = get_platform_descriptors(
workflow,
fallback=descriptors_fallback,
)
return DefaultKeyDict(
(descriptor['platform'], descriptor['architecture'])
for descriptor in platform_descriptors)
def get_goarch_to_platform_mapping(workflow,
descriptors_fallback=NO_FALLBACK):
platform_descriptors = get_platform_descriptors(
workflow,
fallback=descriptors_fallback,
)
return DefaultKeyDict(
(descriptor['architecture'], descriptor['platform'])
for descriptor in platform_descriptors)
def get_build_image_override(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'build_image_override', fallback)
def get_default_image_build_method(workflow, fallback=CONTAINER_DEFAULT_BUILD_METHOD):
value = get_value(workflow, 'default_image_build_method', fallback)
assert value in CONTAINER_BUILD_METHODS, (
"unknown default_image_build_method '{}' in reactor config; "
"config schema validation should have caught this."
).format(value)
return value
def get_buildstep_alias(workflow):
return get_value(workflow, 'buildstep_alias', {})
def get_flatpak_base_image(workflow, fallback=NO_FALLBACK):
flatpak = get_value(workflow, 'flatpak', {})
try:
return flatpak['base_image']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
def get_flatpak_metadata(workflow, fallback=NO_FALLBACK):
flatpak = get_value(workflow, 'flatpak', {})
try:
return flatpak['metadata']
except KeyError:
if fallback != NO_FALLBACK:
return fallback
raise
def get_package_comparison_exceptions(workflow):
return set(get_config(workflow).conf.get('package_comparison_exceptions', []))
def get_hide_files(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'hide_files', fallback)
def get_skip_koji_check_for_base_image(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'skip_koji_check_for_base_image', fallback)
def get_omps_config(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'omps', fallback)
def get_deep_manifest_list_inspection(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'deep_manifest_list_inspection', fallback)
def get_fail_on_digest_mismatch(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'fail_on_digest_mismatch', fallback)
def get_source_container(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'source_container', fallback)
def get_operator_manifests(workflow, fallback=NO_FALLBACK):
return get_value(workflow, 'operator_manifests', fallback)
def get_image_size_limit(workflow):
config = get_value(workflow, 'image_size_limit', {})
return {
'binary_image': config.get('binary_image', 0),
}
class ClusterConfig(object):
"""
Configuration relating to a particular cluster
"""
def __init__(self, name, max_concurrent_builds, enabled=True, priority=0):
self.name = str(name)
self.max_concurrent_builds = int(max_concurrent_builds)
self.enabled = enabled
self.priority = priority
class ReactorConfigKeys(object):
"""
Symbolic names to use for the key names in the configuration file
Use the symbols defined in this class to fetch key values from
the configuration file rather than using string literals. This
way if you mis-spell one it will cause an exception to be raised
rather than the key seeming not to be present in the config file.
At top level:
- VERSION_KEY: this is the version of the config file schema
- CLUSTERS_KEY: this holds details about clusters, by platform
"""
VERSION_KEY = 'version'
CLUSTERS_KEY = 'clusters'
ODCS_KEY = 'odcs'
class ReactorConfig(object):
"""
Class to parse the atomic-reactor configuration file
"""
DEFAULT_CONFIG = {ReactorConfigKeys.VERSION_KEY: 1}
def __init__(self, config=None):
self.conf = deepcopy(config or self.DEFAULT_CONFIG)
version = self.conf[ReactorConfigKeys.VERSION_KEY]
if version != 1:
raise ValueError("version %r unknown" % version)
# Prepare cluster configurations
self.cluster_configs = {}
for platform, clusters in self.conf.get(ReactorConfigKeys.CLUSTERS_KEY,
{}).items():
cluster_configs = [ClusterConfig(priority=priority, **cluster)
for priority, cluster in enumerate(clusters)]
self.cluster_configs[platform] = [conf for conf in cluster_configs
if conf.enabled]
def get_enabled_clusters_for_platform(self, platform):
return self.cluster_configs.get(platform, [])
def get_odcs_config(self):
"""
Return an odcs config object created from the odcs config configured in
reactor config
:return: the object of ODCSConfig. If there is no odcs configured in
reactor config, None is returned.
:rtype: :class:`ODCSConfig` or None
"""
odcs_config = self.conf.get('odcs')
if odcs_config:
return ODCSConfig(
signing_intents=odcs_config['signing_intents'],
default_signing_intent=odcs_config['default_signing_intent']
)
def is_default(self):
return self.conf == self.DEFAULT_CONFIG
class ODCSConfig(object):
"""
Configuration for ODCS integration.
"""
def __init__(self, signing_intents, default_signing_intent):
self.log = logging.getLogger(self.__class__.__name__)
self.default_signing_intent = default_signing_intent
self.signing_intents = []
# Signing intents are listed in reverse restrictive order in configuration.
# Since the input signing_intents will be modified by inserting a new
# key restrictiveness, this deepcopy ensures the original
# signing_intent dict objects are not modified accidentally.
for restrictiveness, intent in enumerate(reversed(deepcopy(signing_intents))):
intent['restrictiveness'] = restrictiveness
self.signing_intents.append(intent)
# Verify default_signing_intent is valid
self.get_signing_intent_by_name(self.default_signing_intent)
def get_signing_intent_by_name(self, name):
valid = []
for entry in self.signing_intents:
this_name = entry['name']
if this_name == name:
return entry
valid.append(this_name)
raise ValueError('unknown signing intent name "{}", valid names: {}'
.format(name, ', '.join(valid)))
def get_signing_intent_by_keys(self, keys):
if isinstance(keys, six.text_type):
keys = keys.split()
keys = set(keys)
intents_matching_deprecated_keys = []
for entry in reversed(self.signing_intents):
keys_set = set(entry['keys'])
if (keys and keys_set >= keys) or keys == keys_set:
return entry
permissive_keys_set = set(entry['keys'] + entry.get('deprecated_keys', []))
if keys and permissive_keys_set >= keys:
intents_matching_deprecated_keys.append(entry)
if not intents_matching_deprecated_keys:
raise ValueError('unknown signing intent keys "{}"'.format(keys))
self.log.warning(
'signing intent keys "%s" contain deprecated entries in the "%s" signing intent',
keys,
intents_matching_deprecated_keys[0]['name']
)
return intents_matching_deprecated_keys[0]
class ReactorConfigPlugin(PreBuildPlugin):
"""
Parse atomic-reactor configuration file
"""
# Name of this plugin
key = 'reactor_config'
# Exceptions from this plugin should fail the build
is_allowed_to_fail = False
def __init__(self, tasker, workflow, config_path=None, basename='config.yaml'):
"""
constructor
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param config_path: str, configuration path (directory); default is None
:param basename: str, filename within directory; default is config.yaml
"""
# call parent constructor
super(ReactorConfigPlugin, self).__init__(tasker, workflow)
self.config_path = config_path
self.basename = basename
self.reactor_config_map = os.environ.get('REACTOR_CONFIG', None)
def run(self):
"""
Run the plugin
Parse and validate config.
Store in workflow workspace for later retrieval.
"""
if self.reactor_config_map:
self.log.info("reading config from REACTOR_CONFIG env variable")
conf = read_yaml(self.reactor_config_map, 'schemas/config.json')
else:
config_filename = os.path.join(self.config_path, self.basename)
self.log.info("reading config from %s", config_filename)
conf = read_yaml_from_file_path(config_filename, 'schemas/config.json')
reactor_conf = ReactorConfig(conf)
workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
workspace[WORKSPACE_CONF_KEY] = reactor_conf
self.log.info("reading config content %s", reactor_conf.conf)
# need to stash this on the workflow for access in a place that can't import this module
buildstep_aliases = get_buildstep_alias(self.workflow)
default_image_build_method = get_default_image_build_method(self.workflow)
source_image_build_method = self.workflow.builder.source.config.image_build_method
if source_image_build_method in buildstep_aliases:
source_image_build_method = buildstep_aliases[source_image_build_method]
if default_image_build_method in buildstep_aliases:
default_image_build_method = buildstep_aliases[default_image_build_method]
if (source_image_build_method == CONTAINER_BUILDAH_BUILD_METHOD or
default_image_build_method == CONTAINER_BUILDAH_BUILD_METHOD):
raise NotImplementedError('{} method not yet fully implemented'.
format(CONTAINER_BUILDAH_BUILD_METHOD))
self.workflow.builder.source.config.image_build_method = source_image_build_method
self.workflow.default_image_build_method = default_image_build_method
self.workflow.builder.tasker.build_method = (source_image_build_method or
default_image_build_method)
# set source registry and organization
if self.workflow.builder.dockerfile_images is not None:
source_registry_docker_uri = get_source_registry(self.workflow)['uri'].docker_uri
organization = get_registries_organization(self.workflow)
self.workflow.builder.dockerfile_images.set_source_registry(source_registry_docker_uri,
organization)
| bsd-3-clause | 1,514,774,890,125,996,000 | 32.99084 | 99 | 0.652264 | false |
Subsets and Splits