repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
DaveTCode/CreatureRogue | CreatureRogue/models/battle_creature.py | 1 | 3117 | from CreatureRogue.models.creature import Creature
from CreatureRogue.data_layer.data import StaticGameData, HP_STAT
from CreatureRogue.data_layer.pokeball import Pokeball
from CreatureRogue.data_layer.stat import Stat
class BattleCreature:
"""
When in battle a creature can have stat adjustments and other values
can be modified.
The battle creature is an encapsulating object which is used to capture
this information in an easily discardable manner.
"""
stat_adjust_factors = {-6: 1 / 4, -5: 2 / 7, -4: 1 / 3, -3: 2 / 5, -2: 1 / 2, -1: 2 / 3,
0: 1.0,
1: 1.5, 2: 2.0, 3: 2.5, 4: 3.0, 5: 3.5, 6: 4.0}
def __init__(self, creature: Creature, static_game_data: StaticGameData):
self.static_game_data = static_game_data
self.creature = creature
self.stat_adjusts = {stat: 0 for stat in self.creature.stats}
def adjust_stat_adjusts(self, stat: Stat, value: int) -> int:
"""
The only adjustment to statistics of a creature in battle is done
through these factors which range from -6 to 6.
Returns the amount by which we actually adjusted the stat.
:param stat: The stat to update.
:param value: An integer amount to adjust the stat. Will be capped
so safe to call with any value.
"""
old_val = self.stat_adjusts[stat]
self.stat_adjusts[stat] += value
self.stat_adjusts[stat] = max(-6, min(6, self.stat_adjusts[stat]))
return self.stat_adjusts[stat] - old_val
def stat_value(self, stat: Stat) -> float:
"""
The current value of a stat in battle is the base stat for that
creature (i.e. the value pre battle) multiplied by the factor
gained from moves performed on the creature during battle.
These factors are fixed and are capped at 1/4 to 4.
:param stat: The stat is an object from the static game data that
specifies which statistic we're interested in.
"""
return self.creature.stats[stat] * BattleCreature.stat_adjust_factors[self.stat_adjusts[stat]]
def modified_catch_rate(self, pokeball: Pokeball) -> float:
"""
Calculates the modified catch rate of a creature. This is based on
a variety of factors including the status of the creature, the ball
used and the current hit points.
It is calculated in BattleCreature rather than Creature because it
is only applicable during a battle.
:param pokeball: The catch rate is also determined by the type of
pokeball used to catch the creature.
"""
# TODO - Add status effects
hp_stat = self.static_game_data.stats[HP_STAT]
triple_max_hp = 3 * self.creature.max_stat(hp_stat)
return (triple_max_hp - 2 * self.stat_value(hp_stat)) * self.creature.species.capture_rate * pokeball.catch_rate / triple_max_hp
def __str__(self):
return str(self.creature)
| mit | -7,488,282,965,211,967,000 | 41.69863 | 136 | 0.623997 | false |
maqnius/compscie-mc | setup.py | 1 | 4429 | # particlesim
# Copyright (C) 2017 Mark Niehues, Stefaan Hessmann, Jaap Pedersen,
# Simon Treu, Hanna Wulkow, Thomas Hadler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#
from setuptools import setup
import versioneer
import sys
from setuptools import Extension
import os
class lazy_cythonize(list):
"""evaluates extension list lazyly.
pattern taken from http://tinyurl.com/qb8478q"""
def __init__(self, callback):
self._list, self.callback = None, callback
def c_list(self):
if self._list is None: self._list = self.callback()
return self._list
def __iter__(self):
for e in self.c_list(): yield e
def __getitem__(self, ii): return self.c_list()[ii]
def __len__(self): return len(self.c_list())
def extensions():
from numpy import get_include
from Cython.Build import cythonize
ext_fast_sor = Extension(
"*",
sources=["particlesim/*.pyx"],
include_dirs=[get_include()],
extra_compile_args=["-O3", "-std=c99"])
exts = [ext_fast_sor]
return cythonize(exts)
def get_cmdclass():
versioneer_cmds = versioneer.get_cmdclass()
class sdist(versioneer_cmds['sdist']):
"""ensure cython files are compiled to c, when distributing"""
def run(self):
# only run if .git is present
if not os.path.exists('.git'):
print("Not on git, can not create source distribution")
return
try:
from Cython.Build import cythonize
print("cythonizing sources")
cythonize(extensions())
except ImportError:
warnings.warn('sdist cythonize failed')
return versioneer_cmds['sdist'].run(self)
versioneer_cmds['sdist'] = sdist
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['particlesim']
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
versioneer_cmds['test'] = PyTest
return versioneer_cmds
setup(
cmdclass=get_cmdclass(),
ext_modules=lazy_cythonize(extensions),
name='particlesim',
version=versioneer.get_version(),
description="Simulates multi particle systems with MMC",
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'],
keywords=[],
url='https://github.com/maqnius/compscie-mc',
author='Mark Niehues, Stefaan Hessmann, Jaap Pedersen, Simon Treu, Hanna Wulkow',
author_email='[email protected], [email protected], [email protected], [email protected], [email protected]',
license='GPLv3+',
packages=['particlesim', 'particlesim.utils', 'particlesim.lib'],
setup_requires=[
'numpy>=1.7.0',
'setuptools>=0.6',
'scipy>=0.6'],
package_dir = {'particlesim': 'particlesim'},
install_requires=['numpy>=1.7.0','cython>=0.22'],
tests_require=['pytest']
)
| gpl-3.0 | -7,637,551,307,956,457,000 | 37.181034 | 134 | 0.632423 | false |
bolkedebruin/airflow | tests/providers/jenkins/operators/test_jenkins_job_trigger.py | 1 | 7229 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jenkins
import mock
from airflow.exceptions import AirflowException
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
from airflow.providers.jenkins.operators.jenkins_job_trigger import JenkinsJobTriggerOperator
class TestJenkinsOperator(unittest.TestCase):
@unittest.skipIf(mock is None, 'mock package not present')
def test_execute(self):
jenkins_mock = mock.Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_build_info.return_value = \
{'result': 'SUCCESS',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job'}
jenkins_mock.build_job_url.return_value = \
'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = mock.Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
the_parameters = {'a_param': 'blip', 'another_param': '42'}
with mock.patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked,\
mock.patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers') \
as mock_make_request:
mock_make_request.side_effect = \
[{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}}]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=the_parameters,
sleep_time=1)
operator.execute(None)
self.assertEqual(jenkins_mock.get_build_info.call_count, 1)
jenkins_mock.get_build_info.assert_called_once_with(name='a_job_on_jenkins',
number='1')
@unittest.skipIf(mock is None, 'mock package not present')
def test_execute_job_polling_loop(self):
jenkins_mock = mock.Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.side_effect = \
[{'result': None},
{'result': 'SUCCESS',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job'}]
jenkins_mock.build_job_url.return_value = \
'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = mock.Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
the_parameters = {'a_param': 'blip', 'another_param': '42'}
with mock.patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked,\
mock.patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers') \
as mock_make_request:
mock_make_request.side_effect = \
[{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}}]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
parameters=the_parameters,
sleep_time=1)
operator.execute(None)
self.assertEqual(jenkins_mock.get_build_info.call_count, 2)
@unittest.skipIf(mock is None, 'mock package not present')
def test_execute_job_failure(self):
jenkins_mock = mock.Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': 'FAILURE',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job'}
jenkins_mock.build_job_url.return_value = \
'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = mock.Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
the_parameters = {'a_param': 'blip', 'another_param': '42'}
with mock.patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked,\
mock.patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers') \
as mock_make_request:
mock_make_request.side_effect = \
[{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}}]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=the_parameters,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
sleep_time=1)
self.assertRaises(AirflowException, operator.execute, None)
@unittest.skipIf(mock is None, 'mock package not present')
def test_build_job_request_settings(self):
jenkins_mock = mock.Mock(spec=jenkins.Jenkins, auth='secret', timeout=2)
jenkins_mock.build_job_url.return_value = 'http://apache.org'
with mock.patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="build_job_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection")
operator.build_job(jenkins_mock)
mock_request = mock_make_request.call_args_list[0][0][1]
self.assertEqual(mock_request.method, 'POST')
self.assertEqual(mock_request.url, 'http://apache.org')
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -5,934,285,332,554,292,000 | 44.465409 | 105 | 0.611841 | false |
SocialCognitiveSystems/PRIMO | examples/soft_evidence_example.py | 1 | 1034 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 9 16:52:39 2017
@author: jpoeppel
"""
import numpy as np
from primo2.networks import BayesianNetwork
from primo2.nodes import DiscreteNode
from primo2.inference.exact import VariableElimination
from primo2.inference.exact import FactorTree
from primo2.inference.mcmc import MCMC
from primo2.inference.mcmc import GibbsTransition
bn = BayesianNetwork()
cloth = DiscreteNode("cloth", ["green","blue", "red"])
sold = DiscreteNode("sold")
bn.add_node(cloth)
bn.add_node(sold)
bn.add_edge("cloth", "sold")
cloth.set_cpd(np.array([0.3,0.3,0.4]))
sold.set_cpd(np.array([[0.4, 0.4, 0.8],
[0.6, 0.6, 0.2]]))
tree = FactorTree.create_jointree(bn)
print(tree.marginals(["sold"]).get_potential())
tree.set_evidence({"cloth": np.array([0.7,0.25,0.05])})
print(tree.marginals(["sold"]).get_potential())
print(tree.marginals(["cloth"]).get_potential())
tree.set_evidence({"cloth": "green"})
print(tree.marginals(["cloth"]).get_potential())
| lgpl-3.0 | -5,473,858,586,788,435,000 | 23.046512 | 55 | 0.692456 | false |
PolyLAN/polybanking | server/paiements/models.py | 1 | 5633 | from django.db import models
from configs.models import Config
from django.template import defaultfilters
from django.utils.timezone import localtime
class Transaction(models.Model):
"""Represent one transation"""
config = models.ForeignKey(Config)
reference = models.CharField(max_length=255)
extra_data = models.TextField(blank=True, null=True)
amount = models.IntegerField()
postfinance_id = models.CharField(max_length=255, blank=True, null=True)
POSTFINANCE_STATUS = (
('??', 'Unknow'),
('0', 'Invalid or incomplete'),
('1', 'Cancelled by customer'),
('2', 'Authorisation declined'),
('4', 'Order stored'),
('40', 'Stored waiting external result'),
('41', 'Waiting for client payment'),
('5', 'Authorised'),
('50', 'Authorized waiting external result'),
('51', 'Authorisation waiting'),
('52', 'Authorisation not known'),
('55', 'Standby'),
('56', 'OK with scheduled payments'),
('57', 'Not OK with scheduled payments'),
('59', 'Authoris. to be requested manually'),
('6', 'Authorised and cancelled'),
('61', 'Author. deletion waiting'),
('62', 'Author. deletion uncertain'),
('63', 'Author. deletion refused'),
('64', 'Authorised and cancelled'),
('7', 'Payment deleted'),
('71', 'Payment deletion pending'),
('72', 'Payment deletion uncertain'),
('73', 'Payment deletion refused'),
('74', 'Payment deleted'),
('75', 'Deletion handled by merchant'),
('8', 'Refund'),
('81', 'Refund pending'),
('82', 'Refund uncertain'),
('83', 'Refund refused'),
('84', 'Refund'),
('85', 'Refund handled by merchant'),
('9', 'Payment requested'),
('91', 'Payment processing'),
('92', 'Payment uncertain'),
('93', 'Payment refused'),
('94', 'Refund declined by the acquirer'),
('95', 'Payment handled by merchant'),
('96', 'Refund reversed'),
('99', 'Being processed'),
)
postfinance_status = models.CharField(max_length=2, choices=POSTFINANCE_STATUS, default='??')
INTERNAL_STATUS = (
('cr', 'Transation created'),
('fw', 'User forwarded to PostFinance'),
('fb', 'Feedback from PostFinance'),
)
internal_status = models.CharField(max_length=2, choices=INTERNAL_STATUS, default='cr')
ipn_needed = models.BooleanField(default=False)
creation_date = models.DateTimeField(auto_now_add=True)
last_userforwarded_date = models.DateTimeField(blank=True, null=True)
last_user_back_from_postfinance_date = models.DateTimeField(blank=True, null=True)
last_postfinance_ipn_date = models.DateTimeField(blank=True, null=True)
last_ipn_date = models.DateTimeField(blank=True, null=True)
brand = models.CharField(max_length=128, default='')
card = models.CharField(max_length=128, default='')
def amount_chf(self):
"""Return the amount in CHF"""
return self.amount / 100.0
def postfinance_status_good(self):
"""Return true if the status of the transaction is good (valid)"""
return self.postfinance_status in ('5', '9')
def internal_status_good(self):
"""Return true if the internal status of the transaction if good (user back from postfinance)"""
return self.internal_status == 'fb'
def __unicode__(self):
return self.reference
def dump_api(self, add_config=False):
"""Return values for API"""
retour = {}
for val in ['reference', 'extra_data', 'amount', 'postfinance_id', 'postfinance_status', 'internal_status', 'ipn_needed', 'brand', 'card']:
retour[val] = str(getattr(self, val))
for val in ['creation_date', 'last_userforwarded_date', 'last_user_back_from_postfinance_date', 'last_postfinance_ipn_date', 'last_ipn_date']:
if getattr(self, val):
retour[val] = str(localtime(getattr(self, val)))
else:
retour[val] = ''
for cal, name in [('get_postfinance_status_display', 'postfinance_status_text'), ('get_internal_status_display', 'internal_status_text'), ('amount_chf', 'amount_chf')]:
retour[name] = getattr(self, cal)()
if add_config:
retour['config'] = self.config.name
return retour
class TransactionLog(models.Model):
"""A transaction log"""
transaction = models.ForeignKey(Transaction)
when = models.DateTimeField(auto_now_add=True)
extra_data = models.TextField()
LOG_TYPE = (
('created', 'Transaction created'),
('userForwarded', 'User forwarded'),
('userBackFromPostfinance', 'User back from postfinance'),
('postfinanceId', 'Postfinance ID set'),
('postfinanceStatus', 'Postfinance status changed'),
('ipnfailled', 'IPN Failled'),
('ipnsuccess', 'IPN Success'),
)
log_type = models.CharField(max_length=64, choices=LOG_TYPE)
def dump_api(self):
"""Return values for API"""
retour = {}
for val in ['when']:
if getattr(self, val):
retour[val] = str(localtime(getattr(self, val)))
else:
retour[val] = ''
for val in ['extra_data', 'log_type']:
retour[val] = str(getattr(self, val))
for cal, name in [('get_log_type_display', 'log_type_text')]:
retour[name] = getattr(self, cal)()
return retour
| bsd-2-clause | 6,611,243,239,914,717,000 | 33.558282 | 176 | 0.587431 | false |
simplereach/nsq2kafka | nsq2kafka/__main__.py | 1 | 3204 | """
USAGE: nsq2kafka [OPTIONS]
EXAMPLES:
# Basic example
nsq2kafka --nsq-topic=test --nsq-nsqd-tcp-addresses=localhost:4150
# Realistic example
nsq2kafka --nsq-topic=json_clicks \
--nsq-lookupd-http-addresses=lookupd1.example.com:4161,lookupd2.example.com:4161 \
--nsq-max-in-flight=5000 \
--nsq-channel=nsq2Kafka \
--kafka-bootstrap-servers=kafka1.example.com:9092,kafka2.exampkel.com:9092 \
--kafka-topic=click_stream_json \
--kafka-message-key=user_id
"""
from nsq2kafka import NSQ2Kafka
import tornado.options
import tornado.log
def main():
tornado.options.define('nsq_topic',
type=str,
group='NSQ',
help='specifies the desired NSQ topic')
tornado.options.define('nsq_channel',
type=str,
group='NSQ',
default='nsq2kafka#ephemeral',
help='specifies the desired NSQ channel')
tornado.options.define('nsq_nsqd_tcp_addresses',
type=str,
multiple=True,
group='NSQ',
help='a sequence of string addresses of the nsqd instances this reader should connect to')
tornado.options.define('nsq_lookupd_http_addresses',
type=str,
multiple=True,
group='NSQ',
help='a sequence of string addresses of the nsqlookupd instances this reader should query '
'for producers of the specified topic')
tornado.options.define('nsq_max_in_flight',
type=int,
default=500,
group='NSQ',
help='the maximum number of messages this reader will pipeline for processing. this value '
'will be divided evenly amongst the configured/discovered nsqd producers')
tornado.options.define('kafka_bootstrap_servers',
type=str,
group='Kafka',
default='localhost:9092',
multiple=True,
help='host[:port] string (or list of host[:port] strings) that the producer should contact '
'to bootstrap initial cluster metadata')
tornado.options.define('kafka_topic',
type=str,
group='Kafka',
help='The Kafka Topic to publish the messages')
tornado.options.define('kafka_message_key',
type=str,
group='Kafka',
help='When the message is in JSON format, use a key from the message to determine the kafka '
'partition')
tornado.options.parse_command_line()
nsq2kafka = NSQ2Kafka(**tornado.options.options.as_dict())
nsq2kafka.start()
if __name__ == '__main__':
main()
| apache-2.0 | -6,539,842,116,995,485,000 | 43.5 | 120 | 0.505306 | false |
getting-things-gnome/gtg | tests/tools/test_tags.py | 1 | 4125 | # -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2014 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
from GTG.core.tag import extract_tags_from_text, parse_tag_list
class TestExtractTags(TestCase):
""" extract_tags_from_text """
def assertTags(self, text, expected_tags):
tag_list = extract_tags_from_text(text)
self.assertEqual(expected_tags, tag_list)
def test_doesnt_find_empty_tag(self):
self.assertTags("", [])
def test_finds_tag_at_beginning(self):
self.assertTags("@tag some other text", ["@tag"])
def test_finds_tag_at_end(self):
self.assertTags("some text ended with @endtag", ["@endtag"])
def test_ignores_emails(self):
self.assertTags(
"no @emails allowed: [email protected]", ["@emails"])
def test_ignores_diffs(self):
self.assertTags("no @@diff stuff", [])
def test_accepts_hypen_in_tag(self):
self.assertTags("@do-this-today", ["@do-this-today"])
self.assertTags("@con--tinuous---hypen-s", ["@con--tinuous---hypen-s"])
def test_ignores_hypen_at_end_of_tag(self):
self.assertTags("@hypen-at-end- some other text", ["@hypen-at-end"])
self.assertTags("@hypen-at-end-, with comma", ["@hypen-at-end"])
def test_accepts_dot_in_tag(self):
self.assertTags("text @gtg-0.3", ["@gtg-0.3"])
def test_ignores_dot_at_end_of_tag(self):
self.assertTags("@tag.", ["@tag"])
def test_accepts_slash_in_tag(self):
self.assertTags("@do/this/today", ["@do/this/today"])
def test_ignores_slash_at_end_of_tag(self):
self.assertTags("@slash/es/", ["@slash/es"])
def test_accepts_colon_in_tag(self):
self.assertTags("@my:tag", ["@my:tag"])
def ignore_colon_at_end(self):
self.assertTags("@:a:b:c:", ["@:a:b:c"])
def test_accepts_ampersand_in_tag(self):
self.assertTags("@home&work", ["@home&work"])
class TestParseTagList(TestCase):
""" parse_tag_list """
def test_parses_positive_single_tag(self):
self.assertEqual(parse_tag_list("tag"), [("tag", True)])
self.assertEqual(parse_tag_list("@tag"), [("@tag", True)])
def test_parses_postivie_tag_list(self):
self.assertEqual(
parse_tag_list("a b c"),
[("a", True), ("b", True), ("c", True)],
)
self.assertEqual(
parse_tag_list("@a @b @c"),
[("@a", True), ("@b", True), ("@c", True)],
)
def test_parses_negative_single_tag(self):
self.assertEqual(parse_tag_list("!tag"), [("tag", False)])
self.assertEqual(parse_tag_list("!@tag"), [("@tag", False)])
def test_parses_negative_tag_list(self):
self.assertEqual(
parse_tag_list("!a !b !c"),
[("a", False), ("b", False), ("c", False)],
)
self.assertEqual(
parse_tag_list("!@a !@b !@c"),
[("@a", False), ("@b", False), ("@c", False)],
)
def test_parses_mixed_tags(self):
self.assertEqual(
parse_tag_list("add !remove"),
[("add", True), ("remove", False)],
)
self.assertEqual(
parse_tag_list("!remove add"),
[("remove", False), ("add", True)],
)
| gpl-3.0 | 8,190,053,820,863,928,000 | 34.560345 | 79 | 0.574303 | false |
pre-commit/pre-commit | tests/languages/r_test.py | 1 | 3688 | import os.path
import pytest
from pre_commit.languages import r
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
from tests.repository_test import _get_hook_no_install
def _test_r_parsing(
tempdir_factory,
store,
hook_id,
expected_hook_expr={},
expected_args={},
config={},
expect_path_prefix=True,
):
repo_path = 'r_hooks_repo'
path = make_repo(tempdir_factory, repo_path)
config = config or make_config_from_repo(path)
hook = _get_hook_no_install(config, store, hook_id)
ret = r._cmd_from_hook(hook)
expected_cmd = 'Rscript'
expected_opts = (
'--no-save', '--no-restore', '--no-site-file', '--no-environ',
)
expected_path = os.path.join(
hook.prefix.prefix_dir if expect_path_prefix else '',
f'{hook_id}.R',
)
expected = (
expected_cmd,
*expected_opts,
*(expected_hook_expr or (expected_path,)),
*expected_args,
)
assert ret == expected
def test_r_parsing_file_no_opts_no_args(tempdir_factory, store):
hook_id = 'parse-file-no-opts-no-args'
_test_r_parsing(tempdir_factory, store, hook_id)
def test_r_parsing_file_opts_no_args(tempdir_factory, store):
with pytest.raises(ValueError) as excinfo:
r._entry_validate(['Rscript', '--no-init', '/path/to/file'])
msg = excinfo.value.args
assert msg == (
'The only valid syntax is `Rscript -e {expr}`',
'or `Rscript path/to/hook/script`',
)
def test_r_parsing_file_no_opts_args(tempdir_factory, store):
hook_id = 'parse-file-no-opts-args'
expected_args = ['--no-cache']
_test_r_parsing(
tempdir_factory, store, hook_id, expected_args=expected_args,
)
def test_r_parsing_expr_no_opts_no_args1(tempdir_factory, store):
hook_id = 'parse-expr-no-opts-no-args-1'
_test_r_parsing(
tempdir_factory, store, hook_id, expected_hook_expr=('-e', '1+1'),
)
def test_r_parsing_expr_no_opts_no_args2(tempdir_factory, store):
with pytest.raises(ValueError) as execinfo:
r._entry_validate(['Rscript', '-e', '1+1', '-e', 'letters'])
msg = execinfo.value.args
assert msg == ('You can supply at most one expression.',)
def test_r_parsing_expr_opts_no_args2(tempdir_factory, store):
with pytest.raises(ValueError) as execinfo:
r._entry_validate(
[
'Rscript', '--vanilla', '-e', '1+1', '-e', 'letters',
],
)
msg = execinfo.value.args
assert msg == (
'The only valid syntax is `Rscript -e {expr}`',
'or `Rscript path/to/hook/script`',
)
def test_r_parsing_expr_args_in_entry2(tempdir_factory, store):
with pytest.raises(ValueError) as execinfo:
r._entry_validate(['Rscript', '-e', 'expr1', '--another-arg'])
msg = execinfo.value.args
assert msg == ('You can supply at most one expression.',)
def test_r_parsing_expr_non_Rscirpt(tempdir_factory, store):
with pytest.raises(ValueError) as execinfo:
r._entry_validate(['AnotherScript', '-e', '{{}}'])
msg = execinfo.value.args
assert msg == ('entry must start with `Rscript`.',)
def test_r_parsing_file_local(tempdir_factory, store):
path = 'path/to/script.R'
hook_id = 'local-r'
config = {
'repo': 'local',
'hooks': [{
'id': hook_id,
'name': 'local-r',
'entry': f'Rscript {path}',
'language': 'r',
}],
}
_test_r_parsing(
tempdir_factory,
store,
hook_id=hook_id,
expected_hook_expr=(path,),
config=config,
expect_path_prefix=False,
)
| mit | 8,101,526,320,860,956,000 | 27.589147 | 74 | 0.600325 | false |
QISKit/qiskit-sdk-py | test/python/quantum_info/states/test_densitymatrix.py | 1 | 12921 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for DensityMatrix quantum state class."""
import unittest
import logging
import numpy as np
from numpy.testing import assert_allclose
from qiskit.test import QiskitTestCase
from qiskit import QiskitError
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.extensions.standard import HGate
from qiskit.quantum_info.random import random_unitary
from qiskit.quantum_info.states import DensityMatrix, Statevector
from qiskit.quantum_info.operators.operator import Operator
logger = logging.getLogger(__name__)
class TestDensityMatrix(QiskitTestCase):
"""Tests for DensityMatrix class."""
@classmethod
def rand_vec(cls, n, normalize=False):
"""Return complex vector or statevector"""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_vec RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
vec = rng.rand(n) + 1j * rng.rand(n)
if normalize:
vec /= np.sqrt(np.dot(vec, np.conj(vec)))
return vec
@classmethod
def rand_rho(cls, n):
"""Return random pure state density matrix"""
rho = cls.rand_vec(n, normalize=True)
return np.outer(rho, np.conjugate(rho))
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
rho = self.rand_rho(8)
for dims in [None, 8]:
state = DensityMatrix(rho, dims=dims)
assert_allclose(state.data, rho)
self.assertEqual(state.dim, 8)
self.assertEqual(state.dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
rho = self.rand_rho(3)
state = DensityMatrix(rho)
assert_allclose(state.data, rho)
self.assertEqual(state.dim, 3)
self.assertEqual(state.dims(), (3,))
rho = self.rand_rho(2 * 3 * 4)
state = DensityMatrix(rho, dims=[2, 3, 4])
assert_allclose(state.data, rho)
self.assertEqual(state.dim, 2 * 3 * 4)
self.assertEqual(state.dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
rho = self.rand_rho(4)
self.assertRaises(QiskitError, DensityMatrix, rho, dims=[4, 2])
self.assertRaises(QiskitError, DensityMatrix, rho, dims=[2, 4])
self.assertRaises(QiskitError, DensityMatrix, rho, dims=5)
def test_init_densitymatrix(self):
"""Test initialization from DensityMatrix."""
rho1 = DensityMatrix(self.rand_rho(4))
rho2 = DensityMatrix(rho1)
self.assertEqual(rho1, rho2)
def test_init_statevector(self):
"""Test initialization from DensityMatrix."""
vec = self.rand_vec(4)
target = DensityMatrix(np.outer(vec, np.conjugate(vec)))
rho = DensityMatrix(Statevector(vec))
self.assertEqual(rho, target)
def test_from_circuit(self):
"""Test initialization from a circuit."""
# random unitaries
u0 = random_unitary(2).data
u1 = random_unitary(2).data
# add to circuit
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.unitary(u0, [qr[0]])
circ.unitary(u1, [qr[1]])
target_vec = Statevector(np.kron(u1, u0).dot([1, 0, 0, 0]))
target = DensityMatrix(target_vec)
rho = DensityMatrix.from_instruction(circ)
self.assertEqual(rho, target)
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
target = DensityMatrix.from_label('000').evolve(Operator(circuit))
rho = DensityMatrix.from_instruction(circuit)
self.assertEqual(rho, target)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.h(0)
circuit.h(1)
circuit.cu1(lam, 0, 1)
target = DensityMatrix.from_label('00').evolve(Operator(circuit))
rho = DensityMatrix.from_instruction(circuit)
self.assertEqual(rho, target)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circ.x(0)
circuit.ch(0, 1)
target = DensityMatrix.from_label('00').evolve(Operator(circuit))
rho = DensityMatrix.from_instruction(circuit)
self.assertEqual(rho, target)
def test_from_instruction(self):
"""Test initialization from an instruction."""
target_vec = Statevector(np.dot(HGate().to_matrix(), [1, 0]))
target = DensityMatrix(target_vec)
rho = DensityMatrix.from_instruction(HGate())
self.assertEqual(rho, target)
def test_from_label(self):
"""Test initialization from a label"""
x_p = DensityMatrix(np.array([[0.5, 0.5], [0.5, 0.5]]))
x_m = DensityMatrix(np.array([[0.5, -0.5], [-0.5, 0.5]]))
y_p = DensityMatrix(np.array([[0.5, -0.5j], [0.5j, 0.5]]))
y_m = DensityMatrix(np.array([[0.5, 0.5j], [-0.5j, 0.5]]))
z_p = DensityMatrix(np.diag([1, 0]))
z_m = DensityMatrix(np.diag([0, 1]))
label = '0+r'
target = z_p.tensor(x_p).tensor(y_p)
self.assertEqual(target, DensityMatrix.from_label(label))
label = '-l1'
target = x_m.tensor(y_m).tensor(z_m)
self.assertEqual(target, DensityMatrix.from_label(label))
def test_equal(self):
"""Test __eq__ method"""
for _ in range(10):
rho = self.rand_rho(4)
self.assertEqual(DensityMatrix(rho),
DensityMatrix(rho.tolist()))
def test_rep(self):
"""Test Operator representation string property."""
state = DensityMatrix(self.rand_rho(2))
self.assertEqual(state.rep, 'DensityMatrix')
def test_copy(self):
"""Test DensityMatrix copy method"""
for _ in range(5):
rho = self.rand_rho(4)
orig = DensityMatrix(rho)
cpy = orig.copy()
cpy._data[0] += 1.0
self.assertFalse(cpy == orig)
def test_is_valid(self):
"""Test is_valid method."""
state = DensityMatrix(np.eye(2))
self.assertFalse(state.is_valid())
for _ in range(10):
state = DensityMatrix(self.rand_rho(4))
self.assertTrue(state.is_valid())
def test_to_operator(self):
"""Test to_operator method for returning projector."""
for _ in range(10):
rho = self.rand_rho(4)
target = Operator(rho)
op = DensityMatrix(rho).to_operator()
self.assertEqual(op, target)
def test_evolve(self):
"""Test evolve method for operators."""
for _ in range(10):
op = random_unitary(4)
rho = self.rand_rho(4)
target = DensityMatrix(np.dot(op.data, rho).dot(op.adjoint().data))
evolved = DensityMatrix(rho).evolve(op)
self.assertEqual(target, evolved)
def test_evolve_subsystem(self):
"""Test subsystem evolve method for operators."""
# Test evolving single-qubit of 3-qubit system
for _ in range(5):
rho = self.rand_rho(8)
state = DensityMatrix(rho)
op0 = random_unitary(2)
op1 = random_unitary(2)
op2 = random_unitary(2)
# Test evolve on 1-qubit
op = op0
op_full = Operator(np.eye(4)).tensor(op)
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[0]), target)
# Evolve on qubit 1
op_full = Operator(np.eye(2)).tensor(op).tensor(np.eye(2))
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[1]), target)
# Evolve on qubit 2
op_full = op.tensor(np.eye(4))
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[2]), target)
# Test evolve on 2-qubits
op = op1.tensor(op0)
# Evolve on qubits [0, 2]
op_full = op1.tensor(np.eye(2)).tensor(op0)
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[0, 2]), target)
# Evolve on qubits [2, 0]
op_full = op0.tensor(np.eye(2)).tensor(op1)
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[2, 0]), target)
# Test evolve on 3-qubits
op = op2.tensor(op1).tensor(op0)
# Evolve on qubits [0, 1, 2]
op_full = op
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[0, 1, 2]), target)
# Evolve on qubits [2, 1, 0]
op_full = op0.tensor(op1).tensor(op2)
target = DensityMatrix(np.dot(op_full.data, rho).dot(op_full.adjoint().data))
self.assertEqual(state.evolve(op, qargs=[2, 1, 0]), target)
def test_conjugate(self):
"""Test conjugate method."""
for _ in range(10):
rho = self.rand_rho(4)
target = DensityMatrix(np.conj(rho))
state = DensityMatrix(rho).conjugate()
self.assertEqual(state, target)
def test_expand(self):
"""Test expand method."""
for _ in range(10):
rho0 = self.rand_rho(2)
rho1 = self.rand_rho(3)
target = np.kron(rho1, rho0)
state = DensityMatrix(rho0).expand(DensityMatrix(rho1))
self.assertEqual(state.dim, 6)
self.assertEqual(state.dims(), (2, 3))
assert_allclose(state.data, target)
def test_tensor(self):
"""Test tensor method."""
for _ in range(10):
rho0 = self.rand_rho(2)
rho1 = self.rand_rho(3)
target = np.kron(rho0, rho1)
state = DensityMatrix(rho0).tensor(DensityMatrix(rho1))
self.assertEqual(state.dim, 6)
self.assertEqual(state.dims(), (3, 2))
assert_allclose(state.data, target)
def test_add(self):
"""Test add method."""
for _ in range(10):
rho0 = self.rand_rho(4)
rho1 = self.rand_rho(4)
state0 = DensityMatrix(rho0)
state1 = DensityMatrix(rho1)
self.assertEqual(state0.add(state1), DensityMatrix(rho0 + rho1))
self.assertEqual(state0 + state1, DensityMatrix(rho0 + rho1))
def test_add_except(self):
"""Test add method raises exceptions."""
state1 = DensityMatrix(self.rand_rho(2))
state2 = DensityMatrix(self.rand_rho(3))
self.assertRaises(QiskitError, state1.add, state2)
def test_subtract(self):
"""Test subtract method."""
for _ in range(10):
rho0 = self.rand_rho(4)
rho1 = self.rand_rho(4)
state0 = DensityMatrix(rho0)
state1 = DensityMatrix(rho1)
self.assertEqual(state0.subtract(state1), DensityMatrix(rho0 - rho1))
self.assertEqual(state0 - state1, DensityMatrix(rho0 - rho1))
def test_subtract_except(self):
"""Test subtract method raises exceptions."""
state1 = DensityMatrix(self.rand_rho(2))
state2 = DensityMatrix(self.rand_rho(3))
self.assertRaises(QiskitError, state1.subtract, state2)
def test_multiply(self):
"""Test multiply method."""
for _ in range(10):
rho = self.rand_rho(4)
state = DensityMatrix(rho)
val = np.random.rand() + 1j * np.random.rand()
self.assertEqual(state.multiply(val), DensityMatrix(val * rho))
self.assertEqual(val * state, DensityMatrix(val * state))
def test_negate(self):
"""Test negate method"""
for _ in range(10):
rho = self.rand_rho(4)
state = DensityMatrix(rho)
self.assertEqual(-state, DensityMatrix(-1 * rho))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,895,433,002,586,173,000 | 36.452174 | 89 | 0.590512 | false |
kain88-de/mdanalysis | package/MDAnalysis/analysis/encore/similarity.py | 1 | 64696 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
=================================================================================
Ensemble Similarity Calculations --- :mod:`MDAnalysis.analysis.encore.similarity`
=================================================================================
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
The module contains implementations of similarity measures between protein
ensembles described in [Lindorff-Larsen2009]_. The implementation and examples
are described in [Tiberti2015]_.
The module includes facilities for handling ensembles and trajectories through
the :class:`Universe` class, performing clustering or dimensionality reduction
of the ensemble space, estimating multivariate probability distributions from
the input data, and more. ENCORE can be used to compare experimental and
simulation-derived ensembles, as well as estimate the convergence of
trajectories from time-dependent simulations.
ENCORE includes three different methods for calculations of similarity measures
between ensembles implemented in individual functions:
+ **Harmonic Ensemble Similarity** : :func:`hes`
+ **Clustering Ensemble Similarity** : :func:`ces`
+ **Dimensional Reduction Ensemble Similarity** : :func:`dres`
as well as two methods to evaluate the convergence of trajectories:
+ **Clustering based convergence evaluation** : :func:`ces_convergence`
+ **Dimensionality-reduction based convergence evaluation** : :func:`dres_convergence`
When using this module in published work please cite [Tiberti2015]_.
References
==========
.. [Lindorff-Larsen2009] Similarity Measures for Protein Ensembles. Lindorff-Larsen, K. Ferkinghoff-Borg, J. PLoS ONE 2008, 4, e4203.
.. [Tiberti2015] ENCORE: Software for Quantitative Ensemble Comparison. Matteo Tiberti, Elena Papaleo, Tone Bengtsen, Wouter Boomsma, Kresten Lindorff- Larsen. PLoS Comput Biol. 2015, 11
.. _Examples:
Examples
========
The examples show how to use ENCORE to calculate a similarity measurement
of two simple ensembles. The ensembles are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples first execute: ::
>>> from MDAnalysis import Universe
>>> import MDAnalysis.analysis.encore as encore
>>> from MDAnalysis.tests.datafiles import PSF, DCD, DCD2
To calculate the Harmonic Ensemble Similarity (:func:`hes`)
two ensemble objects are first created and then used for calculation: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> print encore.hes([ens1, ens2])
(array([[ 0. , 38279683.95892926],
[ 38279683.95892926, 0. ]]), None)
Here None is returned in the array as the default details parameter is False.
HES can assume any non-negative value, i.e. no upper bound exists and the
measurement can therefore be used as an absolute scale.
The calculation of the Clustering Ensemble Similarity (:func:`ces`)
is computationally more expensive. It is based on clustering algorithms that in
turn require a similarity matrix between the frames the ensembles are made
of. The similarity matrix is derived from a distance matrix (By default a RMSD
matrix; a full RMSD matrix between each pairs of elements needs to be computed).
The RMSD matrix is automatically calculated. ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> CES, details = encore.ces([ens1, ens2])
>>> print CES
[[ 0. 0.68070702]
[ 0.68070702 0. ]]
However, we may want to reuse the RMSD matrix in other calculations e.g.
running CES with different parameters or running DRES. In this
case we first compute the RMSD matrix alone:
>>> rmsd_matrix = encore.get_distance_matrix(
encore.utils.merge_universes([ens1, ens2]),
save_matrix="rmsd.npz")
In the above example the RMSD matrix was also saved in rmsd.npz on disk, and
so can be loaded and re-used at later times, instead of being recomputed:
>>> rmsd_matrix = encore.get_distance_matrix(
encore.utils.merge_universes([ens1, ens2]),
load_matrix="rmsd.npz")
For instance, the rmsd_matrix object can be re-used as input for the
Dimensional Reduction Ensemble Similarity (:func:`dres`) method.
DRES is based on the estimation of the probability density in
a dimensionally-reduced conformational space of the ensembles, obtained from
the original space using either the Stochastic Proximity Embedding algorithm or
the Principal Component Analysis.
As the algorithms require the distance matrix calculated on the original space,
we can reuse the previously-calculated RMSD matrix.
In the following example the dimensions are reduced to 3 using the
saved RMSD matrix and the default SPE dimensional reduction method. : ::
>>> DRES,details = encore.dres([ens1, ens2],
distance_matrix = rmsd_matrix)
>>> print DRES
[[ 0. , 0.67453198]
[ 0.67453198, 0. ]]
In addition to the quantitative similarity estimate, the dimensional reduction
can easily be visualized, see the ``Example`` section in
:mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality`.
Due to the stochastic nature of SPE, two identical ensembles will not
necessarily result in an exactly 0 estimate of the similarity, but will be very
close. For the same reason, calculating the similarity with the :func:`dres`
twice will not result in necessarily identical values but rather two very close
values.
It should be noted that both in :func:`ces` and :func:`dres` the similarity is
evaluated using the Jensen-Shannon divergence resulting in an upper bound of
ln(2), which indicates no similarity between the ensembles and a lower bound
of 0.0 signifying two identical ensembles. In contrast, the :func:`hes` function uses
a symmetrized version of the Kullback-Leibler divergence, which is unbounded.
Functions for ensemble comparisons
==================================
.. autofunction:: hes
.. autofunction:: ces
.. autofunction:: dres
Function reference
==================
.. All functions are included via automodule :members:.
"""
from __future__ import print_function, division, absolute_import
from six.moves import range, zip
import MDAnalysis as mda
import numpy as np
import warnings
import logging
try:
from scipy.stats import gaussian_kde
except ImportError:
gaussian_kde = None
msg = "scipy.stats.gaussian_kde could not be imported. " \
"Dimensionality reduction ensemble comparisons will not " \
"be available."
warnings.warn(msg,
category=ImportWarning)
logging.warn(msg)
del msg
from ...coordinates.memory import MemoryReader
from .confdistmatrix import get_distance_matrix
from .bootstrap import (get_distance_matrix_bootstrap_samples,
get_ensemble_bootstrap_samples)
from .clustering.cluster import cluster
from .clustering.ClusteringMethod import AffinityPropagationNative
from .dimensionality_reduction.DimensionalityReductionMethod import (
StochasticProximityEmbeddingNative)
from .dimensionality_reduction.reduce_dimensionality import (
reduce_dimensionality)
from .covariance import (
covariance_matrix, ml_covariance_estimator, shrinkage_covariance_estimator)
from .utils import merge_universes
from .utils import trm_indices_diag, trm_indices_nodiag
# Low boundary value for log() argument - ensure no nans
EPSILON = 1E-15
xlogy = np.vectorize(
lambda x, y: 0.0 if (x <= EPSILON and y <= EPSILON) else x * np.log(y))
def discrete_kullback_leibler_divergence(pA, pB):
"""Kullback-Leibler divergence between discrete probability distribution.
Notice that since this measure is not symmetric ::
:math:`d_{KL}(p_A,p_B) != d_{KL}(p_B,p_A)`
Parameters
----------
pA : iterable of floats
First discrete probability density function
pB : iterable of floats
Second discrete probability density function
Returns
-------
dkl : float
Discrete Kullback-Liebler divergence
"""
return np.sum(xlogy(pA, pA / pB))
# discrete dJS
def discrete_jensen_shannon_divergence(pA, pB):
"""Jensen-Shannon divergence between discrete probability distributions.
Parameters
----------
pA : iterable of floats
First discrete probability density function
pB : iterable of floats
Second discrete probability density function
Returns
-------
djs : float
Discrete Jensen-Shannon divergence
"""
return 0.5 * (discrete_kullback_leibler_divergence(pA, (pA + pB) * 0.5) +
discrete_kullback_leibler_divergence(pB, (pA + pB) * 0.5))
# calculate harmonic similarity
def harmonic_ensemble_similarity(sigma1,
sigma2,
x1,
x2):
"""
Calculate the harmonic ensemble similarity measure
as defined in [Tiberti2015]_.
Parameters
----------
sigma1 : numpy.array
Covariance matrix for the first ensemble.
sigma2 : numpy.array
Covariance matrix for the second ensemble.
x1: numpy.array
Mean for the estimated normal multivariate distribution of the first
ensemble.
x2: numpy.array
Mean for the estimated normal multivariate distribution of the second
ensemble.
Returns
-------
dhes : float
harmonic similarity measure
"""
# Inverse covariance matrices
sigma1_inv = np.linalg.pinv(sigma1)
sigma2_inv = np.linalg.pinv(sigma2)
# Difference between average vectors
d_avg = x1 - x2
# Distance measure
trace = np.trace(np.dot(sigma1, sigma2_inv) +
np.dot(sigma2, sigma1_inv)
- 2 * np.identity(sigma1.shape[0]))
d_hes = 0.25 * (np.dot(np.transpose(d_avg),
np.dot(sigma1_inv + sigma2_inv,
d_avg)) + trace)
return d_hes
def clustering_ensemble_similarity(cc, ens1, ens1_id, ens2, ens2_id,
selection="name CA"):
"""Clustering ensemble similarity: calculate the probability densities from
the clusters and calculate discrete Jensen-Shannon divergence.
Parameters
----------
cc : encore.clustering.ClustersCollection
Collection from cluster calculated by a clustering algorithm
(e.g. Affinity propagation)
ens1 : :class:`~MDAnalysis.core.universe.Universe`
First ensemble to be used in comparison
ens1_id : int
First ensemble id as detailed in the ClustersCollection metadata
ens2 : :class:`~MDAnalysis.core.universe.Universe`
Second ensemble to be used in comparison
ens2_id : int
Second ensemble id as detailed in the ClustersCollection metadata
selection : str
Atom selection string in the MDAnalysis format. Default is "name CA".
Returns
-------
djs : float
Jensen-Shannon divergence between the two ensembles, as calculated by
the clustering ensemble similarity method
"""
ens1_coordinates = ens1.trajectory.timeseries(ens1.select_atoms(selection),
format='fac')
ens2_coordinates = ens2.trajectory.timeseries(ens2.select_atoms(selection),
format='fac')
tmpA = np.array([np.where(c.metadata['ensemble_membership'] == ens1_id)[
0].shape[0] / float(ens1_coordinates.shape[0]) for
c in cc])
tmpB = np.array([np.where(c.metadata['ensemble_membership'] == ens2_id)[
0].shape[0] / float(ens2_coordinates.shape[0]) for
c in cc])
# Exclude clusters which have 0 elements in both ensembles
pA = tmpA[tmpA + tmpB > EPSILON]
pB = tmpB[tmpA + tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def cumulative_clustering_ensemble_similarity(cc, ens1_id, ens2_id,
ens1_id_min=1, ens2_id_min=1):
"""
Calculate clustering ensemble similarity between joined ensembles.
This means that, after clustering has been performed, some ensembles are
merged and the dJS is calculated between the probability distributions of
the two clusters groups. In particular, the two ensemble groups are defined
by their ensembles id: one of the two joined ensembles will comprise all
the ensembles with id [ens1_id_min, ens1_id], and the other ensembles will
comprise all the ensembles with id [ens2_id_min, ens2_id].
Parameters
----------
cc : encore.ClustersCollection
Collection from cluster calculated by a clustering algorithm
(e.g. Affinity propagation)
ens1_id : int
First ensemble id as detailed in the ClustersCollection
metadata
ens2_id : int
Second ensemble id as detailed in the ClustersCollection
metadata
Returns
-------
djs : float
Jensen-Shannon divergence between the two ensembles, as
calculated by the clustering ensemble similarity method
"""
ensA = [np.where(np.logical_and(
c.metadata['ensemble_membership'] <= ens1_id,
c.metadata['ensemble_membership'])
>= ens1_id_min)[0].shape[0] for c in cc]
ensB = [np.where(np.logical_and(
c.metadata['ensemble_membership'] <= ens2_id,
c.metadata['ensemble_membership'])
>= ens2_id_min)[0].shape[0] for c in cc]
sizeA = float(np.sum(ensA))
sizeB = float(np.sum(ensB))
tmpA = np.array(ensA) / sizeA
tmpB = np.array(ensB) / sizeB
# Exclude clusters which have 0 elements in both ensembles
pA = tmpA[tmpA + tmpB > EPSILON]
pB = tmpB[tmpA + tmpB > EPSILON]
return discrete_jensen_shannon_divergence(pA, pB)
def gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles,
nsamples):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and
elaborate the coordinates for later use.
Parameters
----------
embedded_space : numpy.array
Array containing the coordinates of the embedded space
ensemble_assignment : numpy.array
Array containing one int per ensemble conformation. These allow to
distinguish, in the complete embedded space, which conformations
belong to each ensemble. For instance if ensemble_assignment
is [1,1,1,1,2,2], it means that the first four conformations belong
to ensemble 1 and the last two to ensemble 2
nensembles : int
Number of ensembles
nsamples : int
samples to be drawn from the ensembles. Will be required in
a later stage in order to calculate dJS.
Returns
-------
kdes : scipy.stats.gaussian_kde
KDEs calculated from ensembles
resamples : list of numpy.array
For each KDE, draw samples according to the probability distribution
of the KDE mixture model
embedded_ensembles : list of numpy.array
List of numpy.array containing, each one, the elements of the
embedded space belonging to a certain ensemble
"""
kdes = []
embedded_ensembles = []
resamples = []
if gaussian_kde is None:
# hack: if we are running with minimal dependencies then scipy was
# not imported and we have to bail here (see scipy import at top)
raise ImportError("For Kernel Density Estimation functionality you"
"need to import scipy")
for i in range(1, nensembles + 1):
this_embedded = embedded_space.transpose()[
np.where(np.array(ensemble_assignment) == i)].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(gaussian_kde(
this_embedded))
# # Set number of samples
# if not nsamples:
# nsamples = this_embedded.shape[1] * 10
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def dimred_ensemble_similarity(kde1, resamples1, kde2, resamples2,
ln_P1_exp_P1=None, ln_P2_exp_P2=None,
ln_P1P2_exp_P1=None, ln_P1P2_exp_P2=None):
"""
Calculate the Jensen-Shannon divergence according the the
Dimensionality reduction method. In this case, we have continuous
probability densities, this we need to integrate over the measurable
space. The aim is to first calculate the Kullback-Liebler divergence, which
is defined as:
.. math::
D_{KL}(P(x) || Q(x)) = \\int_{-\\infty}^{\\infty}P(x_i) ln(P(x_i)/Q(x_i)) = \\langle{}ln(P(x))\\rangle{}_P - \\langle{}ln(Q(x))\\rangle{}_P
where the :math:`\\langle{}.\\rangle{}_P` denotes an expectation calculated
under the distribution P. We can, thus, just estimate the expectation
values of the components to get an estimate of dKL.
Since the Jensen-Shannon distance is actually more complex, we need to
estimate four expectation values:
.. math::
\\langle{}log(P(x))\\rangle{}_P
\\langle{}log(Q(x))\\rangle{}_Q
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P
\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q
Parameters
----------
kde1 : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 1
resamples1 : numpy.array
Samples drawn according do kde1. Will be used as samples to
calculate the expected values according to 'P' as detailed before.
kde2 : scipy.stats.gaussian_kde
Kernel density estimation for ensemble 2
resamples2 : numpy.array
Samples drawn according do kde2. Will be used as sample to
calculate the expected values according to 'Q' as detailed before.
ln_P1_exp_P1 : float or None
Use this value for :math:`\\langle{}log(P(x))\\rangle{}_P`; if None,
calculate it instead
ln_P2_exp_P2 : float or None
Use this value for :math:`\\langle{}log(Q(x))\\rangle{}_Q`; if
None, calculate it instead
ln_P1P2_exp_P1 : float or None
Use this value for
:math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_P`;
if None, calculate it instead
ln_P1P2_exp_P2 : float or None
Use this value for
:math:`\\langle{}log(0.5*(P(x)+Q(x)))\\rangle{}_Q`;
if None, calculate it instead
Returns
-------
djs : float
Jensen-Shannon divergence calculated according to the dimensionality
reduction method
"""
if not ln_P1_exp_P1 and not ln_P2_exp_P2 and not ln_P1P2_exp_P1 and not \
ln_P1P2_exp_P2:
ln_P1_exp_P1 = np.average(np.log(kde1.evaluate(resamples1)))
ln_P2_exp_P2 = np.average(np.log(kde2.evaluate(resamples2)))
ln_P1P2_exp_P1 = np.average(np.log(
0.5 * (kde1.evaluate(resamples1) + kde2.evaluate(resamples1))))
ln_P1P2_exp_P2 = np.average(np.log(
0.5 * (kde1.evaluate(resamples2) + kde2.evaluate(resamples2))))
return 0.5 * (
ln_P1_exp_P1 - ln_P1P2_exp_P1 + ln_P2_exp_P2 - ln_P1P2_exp_P2)
def cumulative_gen_kde_pdfs(embedded_space, ensemble_assignment, nensembles,
nsamples, ens_id_min=1, ens_id_max=None):
"""
Generate Kernel Density Estimates (KDE) from embedded spaces and
elaborate the coordinates for later use. However, consider more than
one ensemble as the space on which the KDE will be generated. In
particular, will use ensembles with ID [ens_id_min, ens_id_max].
Parameters
----------
embedded_space : numpy.array
Array containing the coordinates of the embedded space
ensemble_assignment : numpy.array
array containing one int per ensemble conformation. These allow
to distinguish, in the complete embedded space, which
conformations belong to each ensemble. For instance if
ensemble_assignment is [1,1,1,1,2,2], it means that the first
four conformations belong to ensemble 1 and the last two
to ensemble 2
nensembles : int
Number of ensembles
nsamples : int
Samples to be drawn from the ensembles. Will be required in a later
stage in order to calculate dJS.
ens_id_min : int
Minimum ID of the ensemble to be considered; see description
ens_id_max : int
Maximum ID of the ensemble to be considered; see description. If None,
it will be set to the maximum possible value given the number of
ensembles.
Returns
-------
kdes : scipy.stats.gaussian_kde
KDEs calculated from ensembles
resamples : list of numpy.array
For each KDE, draw samples according to the probability
distribution of the kde mixture model
embedded_ensembles : list of numpy.array
List of numpy.array containing, each one, the elements of the
embedded space belonging to a certain ensemble
"""
if gaussian_kde is None:
# hack: if we are running with minimal dependencies then scipy was
# not imported and we have to bail here (see scipy import at top)
raise ImportError("For Kernel Density Estimation functionality you"
"need to import scipy")
kdes = []
embedded_ensembles = []
resamples = []
if not ens_id_max:
ens_id_max = nensembles + 1
for i in range(ens_id_min, ens_id_max):
this_embedded = embedded_space.transpose()[np.where(
np.logical_and(ensemble_assignment >= ens_id_min,
ensemble_assignment <= i))].transpose()
embedded_ensembles.append(this_embedded)
kdes.append(
gaussian_kde(this_embedded))
# Resample according to probability distributions
for this_kde in kdes:
resamples.append(this_kde.resample(nsamples))
return (kdes, resamples, embedded_ensembles)
def write_output(matrix, base_fname=None, header="", suffix="",
extension="dat"):
"""
Write output matrix with a nice format, to stdout and optionally a file.
Parameters
----------
matrix : encore.utils.TriangularMatrix
Matrix containing the values to be printed
base_fname : str
Basic filename for output. If None, no files will be written, and
the matrix will be just printed on standard output
header : str
Text to be written just before the matrix
suffix : str
String to be concatenated to basename, in order to get the final
file name
extension : str
Extension for the output file
"""
if base_fname is not None:
fname = base_fname + "-" + suffix + "." + extension
else:
fname = None
matrix.square_print(header=header, fname=fname)
def prepare_ensembles_for_convergence_increasing_window(ensemble,
window_size,
selection="name CA"):
"""
Generate ensembles to be fed to ces_convergence or dres_convergence
from a single ensemble. Basically, the different slices the algorithm
needs are generated here.
Parameters
----------
ensemble : :class:`~MDAnalysis.core.universe.Universe` object
Input ensemble
window_size : int
size of the window (in number of frames) to be used
selection : str
Atom selection string in the MDAnalysis format. Default is "name CA"
Returns
-------
tmp_ensembles :
The original ensemble is divided into different ensembles, each being
a window_size-long slice of the original ensemble. The last
ensemble will be bigger if the length of the input ensemble
is not exactly divisible by window_size.
"""
ens_size = ensemble.trajectory.timeseries(ensemble.select_atoms(selection),
format='fac').shape[0]
rest_slices = ens_size // window_size
residuals = ens_size % window_size
slices_n = [0]
tmp_ensembles = []
for rs in range(rest_slices - 1):
slices_n.append(slices_n[-1] + window_size)
slices_n.append(slices_n[-1] + residuals + window_size)
for s,sl in enumerate(slices_n[:-1]):
tmp_ensembles.append(mda.Universe(
ensemble.filename,
ensemble.trajectory.timeseries(format='fac')
[slices_n[s]:slices_n[s + 1], :, :],
format=MemoryReader))
return tmp_ensembles
def hes(ensembles,
selection="name CA",
cov_estimator="shrinkage",
weights='mass',
align=False,
details=False,
estimate_error=False,
bootstrapping_samples=100,
calc_diagonal=False):
"""
Calculates the Harmonic Ensemble Similarity (HES) between ensembles using
the symmetrized version of Kullback-Leibler divergence as described
in [Tiberti2015]_.
Parameters
----------
ensembles : list
List of Universe objects for similarity measurements.
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
cov_estimator : str, optional
Covariance matrix estimator method, either shrinkage, `shrinkage`,
or Maximum Likelyhood, `ml`. Default is shrinkage.
weights : str/array_like, optional
specify optional weights. If ``mass`` then chose masses of ensemble atoms
align : bool, optional
Whether to align the ensembles before calculating their similarity.
Note: this changes the ensembles in-place, and will thus leave your
ensembles in an altered state.
(default is False)
details : bool, optional
Save the mean and covariance matrix for each
ensemble in a numpy array (default is False).
estimate_error : bool, optional
Whether to perform error estimation (default is False).
bootstrapping_samples : int, optional
Number of times the similarity matrix will be bootstrapped (default
is 100), only if estimate_error is True.
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the similarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
Returns
-------
numpy.array (bidimensional)
Harmonic similarity measurements between each pair of ensembles.
Notes
-----
The method assumes that each ensemble is derived from a multivariate normal
distribution. The mean and covariance matrix are, thus, estimatated from
the distribution of each ensemble and used for comparision by the
symmetrized version of Kullback-Leibler divergence defined as:
.. math::
D_{KL}(P(x) || Q(x)) = \\int_{-\\infty}^{\\infty}P(x_i)
ln(P(x_i)/Q(x_i)) = \\langle{}ln(P(x))\\rangle{}_P -
\\langle{}ln(Q(x))\\rangle{}_P
where the :math:`\\langle{}.\\rangle{}_P` denotes an expectation
calculated under the distribution P.
For each ensemble, the mean conformation is estimated as the average over
the ensemble, and the covariance matrix is calculated by default using a
shrinkage estimation method (or by a maximum-likelihood method,
optionally).
Note that the symmetrized version of the Kullback-Leibler divergence has no
upper bound (unlike the Jensen-Shannon divergence used by for instance CES and DRES).
When using this similarity measure, consider whether you want to align
the ensembles first (see example below).
Example
-------
To calculate the Harmonic Ensemble similarity, two ensembles are created
as Universe objects from a topology file and two trajectories. The
topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> HES, details = encore.hes([ens1, ens2])
>>> print HES
[[ 0. 38279683.95892926]
[ 38279683.95892926 0. ]]
You can use the align=True option to align the ensembles first. This will
align everything to the current timestep in the first ensemble. Note that
this changes the ens1 and ens2 objects:
>>> print encore.hes([ens1, ens2], align=True)[0]
[[ 0. 6880.34140106]
[ 6880.34140106 0. ]]
Alternatively, for greater flexibility in how the alignment should be done
you can call use an AlignTraj object manually:
>>> from MDAnalysis.analysis import align
>>> align.AlignTraj(ens1, ens1, select="name CA", in_memory=True).run()
>>> align.AlignTraj(ens2, ens1, select="name CA", in_memory=True).run()
>>> print encore.hes([ens1, ens2])[0]
[[ 0. 7032.19607004]
[ 7032.19607004 0. ]]
"""
if not isinstance(weights, (list, tuple, np.ndarray)) and weights == 'mass':
weights = ['mass' for _ in range(len(ensembles))]
elif weights is not None:
if len(weights) != len(ensembles):
raise ValueError("need weights for every ensemble")
else:
weights = [None for _ in range(len(ensembles))]
# Ensure in-memory trajectories either by calling align
# with in_memory=True or by directly calling transfer_to_memory
# on the universe.
if align:
for e, w in zip(ensembles, weights):
mda.analysis.align.AlignTraj(e, ensembles[0],
select=selection,
weights=w,
in_memory=True).run()
else:
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
logging.info("Chosen metric: Harmonic similarity")
if cov_estimator == "shrinkage":
covariance_estimator = shrinkage_covariance_estimator
logging.info(" Covariance matrix estimator: Shrinkage")
elif cov_estimator == "ml":
covariance_estimator = ml_covariance_estimator
logging.info(" Covariance matrix estimator: Maximum Likelihood")
else:
logging.error(
"Covariance estimator {0} is not supported. "
"Choose between 'shrinkage' and 'ml'.".format(cov_estimator))
return None
out_matrix_eln = len(ensembles)
xs = []
sigmas = []
if estimate_error:
data = []
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
for t in range(bootstrapping_samples):
logging.info("The coordinates will be bootstrapped.")
xs = []
sigmas = []
values = np.zeros((out_matrix_eln, out_matrix_eln))
for i, e_orig in enumerate(ensembles):
xs.append(np.average(
ensembles_list[i][t].trajectory.timeseries(
e_orig.select_atoms(selection),
format=('fac')),
axis=0).flatten())
sigmas.append(covariance_matrix(ensembles_list[i][t],
weights=weights[i],
estimator=covariance_estimator,
selection=selection))
for pair in pairs_indices:
value = harmonic_ensemble_similarity(x1=xs[pair[0]],
x2=xs[pair[1]],
sigma1=sigmas[pair[0]],
sigma2=sigmas[pair[1]])
values[pair[0], pair[1]] = value
values[pair[1], pair[0]] = value
data.append(values)
avgs = np.average(data, axis=0)
stds = np.std(data, axis=0)
return (avgs, stds)
# Calculate the parameters for the multivariate normal distribution
# of each ensemble
values = np.zeros((out_matrix_eln, out_matrix_eln))
for e, w in zip(ensembles, weights):
# Extract coordinates from each ensemble
coordinates_system = e.trajectory.timeseries(e.select_atoms(selection),
format='fac')
# Average coordinates in each system
xs.append(np.average(coordinates_system, axis=0).flatten())
# Covariance matrices in each system
sigmas.append(covariance_matrix(e,
weights=w,
estimator=covariance_estimator,
selection=selection))
for i, j in pairs_indices:
value = harmonic_ensemble_similarity(x1=xs[i],
x2=xs[j],
sigma1=sigmas[i],
sigma2=sigmas[j])
values[i, j] = value
values[j, i] = value
# Save details as required
if details:
kwds = {}
for i in range(out_matrix_eln):
kwds['ensemble{0:d}_mean'.format(i + 1)] = xs[i]
kwds['ensemble{0:d}_covariance_matrix'.format(i + 1)] = sigmas[i]
details = np.array(kwds)
else:
details = None
return values, details
def ces(ensembles,
selection="name CA",
clustering_method=AffinityPropagationNative(
preference=-1.0,
max_iter=500,
convergence_iter=50,
damping=0.9,
add_noise=True),
distance_matrix=None,
estimate_error=False,
bootstrapping_samples=10,
ncores=1,
calc_diagonal=False,
allow_collapsed_result=True):
"""
Calculates the Clustering Ensemble Similarity (CES) between ensembles
using the Jensen-Shannon divergence as described in
[Tiberti2015]_.
Parameters
----------
ensembles : list
List of ensemble objects for similarity measurements
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
clustering_method :
A single or a list of instances of the
:class:`MDAnalysis.analysis.encore.clustering.ClusteringMethod` classes
from the clustering module. Different parameters for the same clustering
method can be explored by adding different instances of the same
clustering class. Clustering methods options are the
Affinity Propagation (default), the DBSCAN and the KMeans. The latter
two methods need the sklearn python module installed.
distance_matrix : encore.utils.TriangularMatrix
Distance matrix clustering methods. If this parameter
is not supplied the matrix will be calculated on the fly.
estimate_error : bool, optional
Whether to perform error estimation (default is False).
Only bootstrapping mode is supported.
bootstrapping_samples : int, optional
number of samples to be used for estimating error.
ncores : int, optional
Maximum number of cores to be used (default is 1).
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the similarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value.
Returns
-------
ces, details : numpy.array, numpy.array
ces contains the similarity values, arranged in a numpy.array.
If only one clustering_method is provided the output will be a
2-dimensional square symmetrical numpy.array. The order of the matrix
elements depends on the order of the input ensembles: for instance, if
ensemble = [ens1, ens2, ens3]
the matrix elements [0,2] and [2,0] will both contain the similarity
value between ensembles ens1 and ens3.
Elaborating on the previous example, if *n* ensembles are given and *m*
clustering_methods are provided the output will be a list of *m* arrays
ordered by the input sequence of methods, each with a *n*x*n*
symmetrical similarity matrix.
details contains information on the clustering: the individual size of
each cluster, the centroids and the frames associated with each cluster.
Notes
-----
In the Jensen-Shannon divergence the upper bound of ln(2) signifies
no similarity between the two ensembles, the lower bound, 0.0,
signifies identical ensembles.
To calculate the CES, the affinity propagation method (or others, if
specified) is used to partition the whole space of conformations. The
population of each ensemble in each cluster is then taken as a probability
density function. Different probability density functions from each
ensemble are finally compared using the Jensen-Shannon divergence measure.
Examples
--------
To calculate the Clustering Ensemble similarity, two ensembles are
created as Universe object using a topology file and two trajectories. The
topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files.
Here the simplest case of just two instances of :class:`Universe` is illustrated: ::
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> CES,details = encore.ces([ens1,ens2])
>>> print CES
[[ 0. 0.68070702]
[ 0.68070702 0. ]]
To use a different clustering method, set the parameter clustering_method
(Note that the sklearn module must be installed). Likewise, different parameters
for the same clustering method can be explored by adding different
instances of the same clustering class: ::
>>> CES, details = encore.ces([ens1,ens2],
clustering_method = [encore.DBSCAN(eps=0.45),
encore.DBSCAN(eps=0.50)])
>>> print "eps=0.45: ", CES[0]
eps=0.45: [[ 0. 0.20447236]
[ 0.20447236 0. ]]
>>> print "eps=0.5: ", CES[1]
eps=0.5: [[ 0. 0.25331629]
[ 0.25331629 0. ]]"
"""
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
clustering_methods = clustering_method
if not hasattr(clustering_method, '__iter__'):
clustering_methods = [clustering_method]
any_method_accept_distance_matrix = \
np.any([method.accepts_distance_matrix for method in clustering_methods])
all_methods_accept_distance_matrix = \
np.all([method.accepts_distance_matrix for method in clustering_methods])
# Register which ensembles the samples belong to
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
# Calculate distance matrix if not provided
if any_method_accept_distance_matrix and not distance_matrix:
distance_matrix = get_distance_matrix(merge_universes(ensembles),
selection=selection,
ncores=ncores)
if estimate_error:
if any_method_accept_distance_matrix:
distance_matrix = \
get_distance_matrix_bootstrap_samples(
distance_matrix,
ensemble_assignment,
samples=bootstrapping_samples,
ncores=ncores)
if not all_methods_accept_distance_matrix:
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
ensembles = []
for j in range(bootstrapping_samples):
ensembles.append([])
for i, e in enumerate(ensembles_list):
ensembles[-1].append(e[j])
else:
# if all methods accept distances matrices, duplicate
# ensemble so that it matches size of distance matrices
# (no need to resample them since they will not be used)
ensembles = [ensembles]*bootstrapping_samples
# Call clustering procedure
ccs = cluster(ensembles,
method= clustering_methods,
selection=selection,
distance_matrix = distance_matrix,
ncores = ncores,
allow_collapsed_result=False)
# Do error analysis
if estimate_error:
k = 0
values = {}
avgs = []
stds = []
for i, p in enumerate(clustering_methods):
failed_runs = 0
values[i] = []
for j in range(bootstrapping_samples):
if ccs[k].clusters is None:
failed_runs += 1
k += 1
continue
values[i].append(np.zeros((len(ensembles[j]),
len(ensembles[j]))))
for pair in pairs_indices:
# Calculate dJS
this_djs = \
clustering_ensemble_similarity(ccs[k],
ensembles[j][
pair[0]],
pair[0] + 1,
ensembles[j][
pair[1]],
pair[1] + 1,
selection=selection)
values[i][-1][pair[0], pair[1]] = this_djs
values[i][-1][pair[1], pair[0]] = this_djs
k += 1
outs = np.array(values[i])
avgs.append(np.average(outs, axis=0))
stds.append(np.std(outs, axis=0))
if hasattr(clustering_method, '__iter__'):
pass
else:
avgs = avgs[0]
stds = stds[0]
return avgs, stds
values = []
details = {}
for i, p in enumerate(clustering_methods):
if ccs[i].clusters is None:
continue
else:
values.append(np.zeros((len(ensembles), len(ensembles))))
for pair in pairs_indices:
# Calculate dJS
this_val = \
clustering_ensemble_similarity(ccs[i],
ensembles[pair[0]],
pair[0] + 1,
ensembles[pair[1]],
pair[1] + 1,
selection=selection)
values[-1][pair[0], pair[1]] = this_val
values[-1][pair[1], pair[0]] = this_val
details['clustering'] = ccs
if allow_collapsed_result and not hasattr(clustering_method, '__iter__'):
values = values[0]
return values, details
def dres(ensembles,
selection="name CA",
dimensionality_reduction_method = StochasticProximityEmbeddingNative(
dimension=3,
distance_cutoff = 1.5,
min_lam=0.1,
max_lam=2.0,
ncycle=100,
nstep=10000),
distance_matrix=None,
nsamples=1000,
estimate_error=False,
bootstrapping_samples=100,
ncores=1,
calc_diagonal=False,
allow_collapsed_result=True):
"""
Calculates the Dimensional Reduction Ensemble Similarity (DRES) between
ensembles using the Jensen-Shannon divergence as described in
[Tiberti2015]_.
Parameters
----------
ensembles : list
List of ensemble objects for similarity measurements
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
dimensionality_reduction_method :
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. Different parameters
for the same method can be explored by adding different instances of
the same dimensionality reduction class. Provided methods are the
Stochastic Proximity Embedding (default) and the Principal Component
Analysis.
distance_matrix : encore.utils.TriangularMatrix
conformational distance matrix, It will be calculated on the fly
from the ensemble data if it is not provided.
nsamples : int, optional
Number of samples to be drawn from the ensembles (default is 1000).
This is used to resample the density estimates and calculate the
Jensen-Shannon divergence between ensembles.
estimate_error : bool, optional
Whether to perform error estimation (default is False)
bootstrapping_samples : int, optional
number of samples to be used for estimating error.
ncores : int, optional
Maximum number of cores to be used (default is 1).
calc_diagonal : bool, optional
Whether to calculate the diagonal of the similarity scores
(i.e. the simlarities of every ensemble against itself).
If this is False (default), 0.0 will be used instead.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value.
Returns
-------
dres, details : numpy.array, numpy.array
dres contains the similarity values, arranged in numpy.array.
If one number of dimensions is provided as an integer,
the output will be a 2-dimensional square symmetrical numpy.array.
The order of the matrix elements depends on the order of the
input ensemble: for instance, if
ensemble = [ens1, ens2, ens3]
then the matrix elements [0,2] and [2,0] will both contain the
similarity value between ensembles ens1 and ens3.
Elaborating on the previous example, if *n* ensembles are given and *m*
methods are provided the output will be a list of *m* arrays
ordered by the input sequence of methods, each with a *n*x*n*
symmetrical similarity matrix.
details provide an array of the reduced_coordinates.
Notes
-----
To calculate the similarity, the method first projects the ensembles into
lower dimensions by using the Stochastic Proximity Embedding (or others)
algorithm. A gaussian kernel-based density estimation method is then used
to estimate the probability density for each ensemble which is then used
to compute the Jensen-Shannon divergence between each pair of ensembles.
In the Jensen-Shannon divergence the upper bound of ln(2) signifies
no similarity between the two ensembles, the lower bound, 0.0,
signifies identical ensembles. However, due to the stochastic nature of
the dimensional reduction in :func:`dres`, two identical ensembles will
not necessarily result in an exact 0.0 estimate of the similarity but
will be very close. For the same reason, calculating the similarity with
the :func:`dres` twice will not result in two identical numbers; small
differences have to be expected.
Examples
--------
To calculate the Dimensional Reduction Ensemble similarity, two ensembles
are created as Universe objects from a topology file and two trajectories.
The topology- and trajectory files used are obtained from the MDAnalysis
test suite for two different simulations of the protein AdK. To run the
examples see the module `Examples`_ for how to import the files.
Here the simplest case of comparing just two instances of :class:`Universe` is
illustrated: ::
>>> ens1 = Universe(PSF,DCD)
>>> ens2 = Universe(PSF,DCD2)
>>> DRES, details = encore.dres([ens1,ens2])
>>> print DRES
[[ 0. 0.67996043]
[ 0.67996043 0. ]]
In addition to the quantitative similarity estimate, the dimensional
reduction can easily be visualized, see the ``Example`` section in
:mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality``
To use a different dimensional reduction methods, simply set the
parameter dimensionality_reduction_method. Likewise, different parameters
for the same clustering method can be explored by adding different
instances of the same method class: ::
>>> DRES, details = encore.dres([ens1,ens2],
dimensionality_reduction_method = encore.PrincipalComponentAnalysis(dimension=2))
>>> print DRES
[[ 0. 0.69314718]
[ 0.69314718 0. ]]
"""
for ensemble in ensembles:
ensemble.transfer_to_memory()
if calc_diagonal:
pairs_indices = list(trm_indices_diag(len(ensembles)))
else:
pairs_indices = list(trm_indices_nodiag(len(ensembles)))
dimensionality_reduction_methods = dimensionality_reduction_method
if not hasattr(dimensionality_reduction_method, '__iter__'):
dimensionality_reduction_methods = [dimensionality_reduction_method]
any_method_accept_distance_matrix = \
np.any([method.accepts_distance_matrix for method in dimensionality_reduction_methods])
all_methods_accept_distance_matrix = \
np.all([method.accepts_distance_matrix for method in dimensionality_reduction_methods])
# Register which ensembles the samples belong to
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
# Calculate distance matrix if not provided
if any_method_accept_distance_matrix and not distance_matrix:
distance_matrix = get_distance_matrix(merge_universes(ensembles),
selection=selection,
ncores=ncores)
if estimate_error:
if any_method_accept_distance_matrix:
distance_matrix = \
get_distance_matrix_bootstrap_samples(
distance_matrix,
ensemble_assignment,
samples=bootstrapping_samples,
ncores=ncores)
if not all_methods_accept_distance_matrix:
ensembles_list = []
for i, ensemble in enumerate(ensembles):
ensembles_list.append(
get_ensemble_bootstrap_samples(
ensemble,
samples=bootstrapping_samples))
ensembles = []
for j in range(bootstrapping_samples):
ensembles.append(ensembles_list[i, j] for i
in range(ensembles_list.shape[0]))
else:
# if all methods accept distances matrices, duplicate
# ensemble so that it matches size of distance matrices
# (no need to resample them since they will not be used)
ensembles = [ensembles] * bootstrapping_samples
# Call dimensionality reduction procedure
coordinates, dim_red_details = reduce_dimensionality(
ensembles,
method=dimensionality_reduction_methods,
selection=selection,
distance_matrix = distance_matrix,
ncores = ncores,
allow_collapsed_result = False)
details = {}
details["reduced_coordinates"] = coordinates
details["dimensionality_reduction_details"] = details
if estimate_error:
k = 0
values = {}
avgs = []
stds = []
for i,method in enumerate(dimensionality_reduction_methods):
values[i] = []
for j in range(bootstrapping_samples):
values[i].append(np.zeros((len(ensembles[j]),
len(ensembles[j]))))
kdes, resamples, embedded_ensembles = gen_kde_pdfs(
coordinates[k],
ensemble_assignment,
len(ensembles[j]),
nsamples=nsamples)
for pair in pairs_indices:
this_value = dimred_ensemble_similarity(kdes[pair[0]],
resamples[pair[0]],
kdes[pair[1]],
resamples[pair[1]])
values[i][-1][pair[0], pair[1]] = this_value
values[i][-1][pair[1], pair[0]] = this_value
k += 1
outs = np.array(values[i])
avgs.append(np.average(outs, axis=0))
stds.append(np.std(outs, axis=0))
if hasattr(dimensionality_reduction_method, '__iter__'):
pass
else:
avgs = avgs[0]
stds = stds[0]
return avgs, stds
values = []
for i,method in enumerate(dimensionality_reduction_methods):
values.append(np.zeros((len(ensembles), len(ensembles))))
kdes, resamples, embedded_ensembles = gen_kde_pdfs(coordinates[i],
ensemble_assignment,
len(ensembles),
nsamples=nsamples)
for pair in pairs_indices:
this_value = dimred_ensemble_similarity(kdes[pair[0]],
resamples[pair[0]],
kdes[pair[1]],
resamples[pair[1]])
values[-1][pair[0], pair[1]] = this_value
values[-1][pair[1], pair[0]] = this_value
if allow_collapsed_result and not hasattr(dimensionality_reduction_method,
'__iter__'):
values = values[0]
return values, details
def ces_convergence(original_ensemble,
window_size,
selection="name CA",
clustering_method=AffinityPropagationNative(
preference=-1.0,
max_iter=500,
convergence_iter=50,
damping=0.9,
add_noise=True),
ncores=1):
"""
Use the CES to evaluate the convergence of the ensemble/trajectory.
CES will be calculated between the whole trajectory contained in an
ensemble and windows of such trajectory of increasing sizes, so that
the similarity values should gradually drop to zero. The rate at which
the value reach zero will be indicative of how much the trajectory
keeps on resampling the same regions of the conformational space, and
therefore of convergence.
Parameters
----------
original_ensemble : :class:`~MDAnalysis.core.universe.Universe` object
ensemble containing the trajectory whose convergence has to estimated
window_size : int
Size of window to be used, in number of frames
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
clustering_method : MDAnalysis.analysis.encore.clustering.ClusteringMethod
A single or a list of instances of the ClusteringMethod classes from
the clustering module. Different parameters for the same clustering
method can be explored by adding different instances of the same
clustering class.
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
out : np.array
array of shape (number_of_frames / window_size, preference_values).
Example
--------
To calculate the convergence of a trajectory using the clustering ensemble
similarity method a Universe object is created from a topology file and the
trajectory. The topology- and trajectory files used are obtained from the
MDAnalysis test suite for two different simulations of the protein AdK.
To run the examples see the module `Examples`_ for how to import the files.
Here the simplest case of evaluating the convergence is illustrated by
splitting the trajectory into a window_size of 10 frames : ::
>>> ens1 = Universe(PSF,DCD)
>>> ces_conv = encore.ces_convergence(ens1, 10)
>>> print ces_conv
[[ 0.48194205]
[ 0.40284672]
[ 0.31699026]
[ 0.25220447]
[ 0.19829817]
[ 0.14642725]
[ 0.09911411]
[ 0.05667391]
[ 0. ]]
"""
ensembles = prepare_ensembles_for_convergence_increasing_window(
original_ensemble, window_size, selection=selection)
ccs = cluster(ensembles,
selection=selection,
method=clustering_method,
allow_collapsed_result=False,
ncores=ncores)
out = []
for cc in ccs:
if cc.clusters is None:
continue
out.append(np.zeros(len(ensembles)))
for j, ensemble in enumerate(ensembles):
out[-1][j] = cumulative_clustering_ensemble_similarity(
cc,
len(ensembles),
j + 1)
out = np.array(out).T
return out
def dres_convergence(original_ensemble,
window_size,
selection="name CA",
dimensionality_reduction_method = \
StochasticProximityEmbeddingNative(
dimension=3,
distance_cutoff=1.5,
min_lam=0.1,
max_lam=2.0,
ncycle=100,
nstep=10000
),
nsamples=1000,
ncores=1):
"""
Use the DRES to evaluate the convergence of the ensemble/trajectory.
DRES will be calculated between the whole trajectory contained in an
ensemble and windows of such trajectory of increasing sizes, so that
the similarity values should gradually drop to zero. The rate at which
the value reach zero will be indicative of how much the trajectory
keeps on resampling the same ares of the conformational space, and
therefore of convergence.
Parameters
----------
original_ensemble : :class:`~MDAnalysis.core.universe.Universe` object
ensemble containing the trajectory whose convergence has to estimated
window_size : int
Size of window to be used, in number of frames
selection : str, optional
Atom selection string in the MDAnalysis format. Default is "name CA"
dimensionality_reduction_method :
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. Different parameters
for the same method can be explored by adding different instances of
the same dimensionality reduction class.
nsamples : int, optional
Number of samples to be drawn from the ensembles (default is 1000).
This is akin to the nsamples parameter of dres().
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
out : np.array
array of shape (number_of_frames / window_size, preference_values).
Example
--------
To calculate the convergence of a trajectory using the DRES
method, a Universe object is created from a topology file and the
trajectory. The topology- and trajectory files used are obtained from the
MDAnalysis test suite for two different simulations of the protein AdK.
To run the examples see the module `Examples`_ for how to import the files.
Here the simplest case of evaluating the convergence is illustrated by
splitting the trajectory into a window_size of 10 frames : ::
>>> ens1 = Universe(PSF,DCD)
>>> dres_conv = encore.dres_convergence(ens1, 10)
>>> print dres_conv
[[ 0.5295528 ]
[ 0.40716539]
[ 0.31158669]
[ 0.25314041]
[ 0.20447271]
[ 0.13212364]
[ 0.06979114]
[ 0.05214759]
[ 0. ]]
Here, the rate at which the values reach zero will be indicative of how
much the trajectory keeps on resampling the same ares of the conformational
space, and therefore of convergence.
"""
ensembles = prepare_ensembles_for_convergence_increasing_window(
original_ensemble, window_size, selection=selection)
coordinates, dimred_details = \
reduce_dimensionality(
ensembles,
selection=selection,
method=dimensionality_reduction_method,
allow_collapsed_result=False,
ncores=ncores)
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
ensemble_assignment = np.array(ensemble_assignment)
out = []
for i, _ in enumerate(coordinates):
out.append(np.zeros(len(ensembles)))
kdes, resamples, embedded_ensembles = \
cumulative_gen_kde_pdfs(
coordinates[i],
ensemble_assignment=ensemble_assignment,
nensembles=len(ensembles),
nsamples=nsamples)
for j, ensemble in enumerate(ensembles):
out[-1][j] = dimred_ensemble_similarity(kdes[-1],
resamples[-1],
kdes[j],
resamples[j])
out = np.array(out).T
return out
| gpl-2.0 | -8,989,456,600,809,411,000 | 36.353349 | 190 | 0.612341 | false |
grnet/synnefo | snf-cyclades-app/synnefo/volume/management/commands/volume-detach.py | 1 | 3271 | # Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import distutils
from optparse import make_option
from django.core.management.base import CommandError
from synnefo.volume import volumes
from synnefo.management import common
from snf_django.management.utils import parse_bool
from snf_django.management.commands import SynnefoCommand
from snf_django.lib.api import Credentials
HELP_MSG = "Detach a volume from a server"
class Command(SynnefoCommand):
# umask = 0o007
can_import_settings = True
args = "<Volume ID> [<Volume ID> ...]"
option_list = SynnefoCommand.option_list + (
make_option(
"--wait",
dest="wait",
default="True",
choices=["True", "False"],
metavar="True|False",
help="Wait for Ganeti jobs to complete."),
make_option(
"-f", "--force",
dest="force",
action="store_true",
default=False,
help="Do not prompt for confirmation"),
)
def confirm_detachment(self, force, resource='', args=''):
if force is True:
return True
ids = ', '.join(args)
self.stdout.write("Are you sure you want to detach %s %s?"
" [Y/N] " % (resource, ids))
try:
answer = distutils.util.strtobool(raw_input())
if answer != 1:
raise CommandError("Aborting detachment")
except ValueError:
raise CommandError("Unaccepted input value. Please choose yes/no"
" (y/n).")
@common.convert_api_faults
def handle(self, *args, **options):
if not args:
raise CommandError("Please provide a volume ID")
force = options['force']
message = "volumes" if len(args) > 1 else "volume"
self.confirm_detachment(force, message, args)
credentials = Credentials("snf-manage", is_admin=True)
for volume_id in args:
self.stdout.write("\n")
try:
volume = volumes.detach(volume_id, credentials)
wait = parse_bool(options["wait"])
if volume.machine is not None:
volume.machine.task_job_id = volume.backendjobid
common.wait_server_task(volume.machine, wait,
stdout=self.stdout)
else:
self.stdout.write("Successfully detached volume %s\n"
% volume)
except CommandError as e:
self.stdout.write("Error -- %s\n" % e.message)
| gpl-3.0 | 2,331,411,874,501,529,600 | 35.752809 | 77 | 0.593397 | false |
princeofdarkness76/libcmaes | python/cma_multiplt.py | 1 | 3443 | #!/usr/bin/env python
"""In a OS shell::
python cma_multiplt.py data_file_name
or in a python shell::
import cma_multiplt as lcmaplt
lcmaplt.plot(data_file_name)
"""
# CMA-ES, Covariance Matrix Adaptation Evolution Strategy
# Copyright (c) 2014 Inria
# Author: Emmanuel Benazera <[email protected]>
#
# This file is part of libcmaes.
#
# libcmaes is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libcmaes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with libcmaes. If not, see <http://www.gnu.org/licenses/>.
##
import sys, pylab, csv
import numpy as np
from matplotlib.pylab import figure, ioff, ion, subplot, semilogy, hold, grid, axis, title, text, xlabel, isinteractive, draw, gcf
# TODO: the above direct imports clutter the interface in a Python shell
# number of static variables at the head of every line (i.e. independent of problem dimension)
single_values = 4
def plot(filename):
# read data into numpy array
dat = np.loadtxt(filename,dtype=float)
dim = int(np.ceil(np.shape(dat)[1] - single_values) / 3) # we estimate the problem dimension from the data
#print dim
fvalue = np.absolute(dat[:,0])
fevals = dat[:,1]
sigma = dat[:,2]
kappa = dat[:,3]
if dim > 0:
eigenvc = []
for c in range(single_values,single_values+dim):
eigenvc.append(c)
eigenv = dat[:,eigenvc]
stdsc = []
for c in range(single_values+dim,single_values+2*dim):
stdsc.append(c)
stds = dat[:,stdsc]
minstds = np.amin(stds,axis=1)
maxstds = np.amax(stds,axis=1)
xmeanc = []
for c in range(single_values+2*dim,single_values+3*dim):
xmeanc.append(c)
xmean = dat[:,xmeanc]
# plot data.
pylab.rcParams['font.size'] = 10
xlab = "function evaluations"
# plot fvalue, sigma, kappa
if dim > 0:
subplot(221)
semilogy(fevals,fvalue,'b')
semilogy(fevals,sigma,'g')
semilogy(fevals,kappa,'r')
if dim > 0:
semilogy(fevals,sigma*minstds,'y')
semilogy(fevals,sigma*maxstds,'y')
title('f-value (blue), sigma (green), kappa (red)')
grid(True)
if dim == 0:
pylab.xlabel(xlab)
pylab.show();
msg = ' --- press return to continue --- '
raw_input(msg) if sys.version < '3' else input(msg)
sys.exit(1)
# plot xmean
subplot(222)
pylab.plot(fevals,xmean)
title('Object Variables (mean, ' + str(dim) + '-D)')
grid(True)
# plot eigenvalues
subplot(223)
semilogy(fevals,eigenv,'-b')
pylab.xlabel(xlab)
title('Eigenvalues')
grid(True)
# plot std deviations
subplot(224)
semilogy(fevals,stds)
pylab.xlabel(xlab)
title('Standard Deviation in all coordinates')
grid(True)
pylab.show()
if __name__ == "__main__":
plot(sys.argv[1])
msg = ' --- press return to continue --- '
raw_input(msg) if sys.version < '3' else input(msg)
| lgpl-3.0 | 8,315,465,058,020,034,000 | 28.93913 | 130 | 0.641301 | false |
eljost/pysisyphus | tests_staging/test_prfo/prfo.py | 1 | 3942 | #!/usr/bin/env python3
# Johannes Steinmetzer, April 2019
# See [1] https://pubs.acs.org/doi/pdf/10.1021/j100247a015
# Banerjee, 1985
# [2]
#
import matplotlib.pyplot as plt
import numpy as np
import sympy as sym
def make_funcs():
x, y, = sym.symbols("x y")
f_ = (1 - y)*x**2*sym.exp(-x**2) + (1/2)*y**2
f = sym.lambdify((x, y), f_)
g_ = sym.derive_by_array(f_, (x, y))
g = sym.lambdify((x, y), g_)
H_ = sym.derive_by_array(g_, (x, y))
H = sym.lambdify((x, y), H_)
return f, g, H
def plot(f, g, H, xs, ys):
X, Y = np.meshgrid(xs, ys)
Z = f(X, Y)
levels = np.linspace(0, 2, 75)
# fig, ax = plt.subplots(figsize=(12, 8))
# cf = ax.contour(X, Y, Z, levels=levels)
# fig.colorbar(cf)
# plt.show()
neg_eigvals = list()
grads = list()
for x_ in xs:
for y_ in ys:
hess = H(x_, y_)
eigvals = np.linalg.eigvals(hess)
if eigvals.min() < 0:
neg_eigvals.append((x_, y_))
grad = np.linalg.norm(g(x_, y_))
grads.append(grad)
neg_eigvals = np.array(neg_eigvals)
grads = np.array(grads)
fig, ax = plt.subplots(figsize=(12, 8))
cf = ax.contour(X, Y, Z, levels=levels)
ax.scatter(*neg_eigvals.T, c="r", s=15, label="neg. eigval.")
ax.scatter(X.T, Y.T, c="b", s=5*grads, label="norm(grad)")
ax.legend()
fig.colorbar(cf)
plt.show()
def prfo(x, H_getter, grad_getter):
fg = lambda x: -np.array(grad_getter(*x))
Hg = lambda x: np.array(H_getter(*x))
f = fg(x)
H = Hg(x)
eigvals, eigvecs = np.linalg.eigh(H)
neg_eigvals = eigvals < 0
assert neg_eigvals.sum() >= 1
print(f"found {neg_eigvals.sum()} negative eigenvalues")
# Transform to eigensystem of hessian
f_trans = eigvecs.T.dot(f)
mu = 0
max_mat = np.array(((eigvals[mu], -f_trans[mu]),
(-f_trans[mu], 0)))
min_mat = np.bmat((
(np.diag(eigvals[1:]), -f_trans[1:,None]),
(-f_trans[None,1:], [[0]])
))
# Scale eigenvectors of the largest (smallest) eigenvector
# of max_mat (min_mat) so the last item is 1.
max_evals, max_evecs = np.linalg.eigh(max_mat)
# Eigenvalues and -values are sorted, so we just use the last
# eigenvector corresponding to the biggest eigenvalue.
max_step = max_evecs.T[-1]
lambda_max = max_step[-1]
max_step = max_step[:-1] / lambda_max
min_evals, min_evecs = np.linalg.eigh(min_mat)
# Again, as everything is sorted we use the (smalelst) first eigenvalue.
min_step = np.asarray(min_evecs.T[0]).flatten()
lambda_min = min_step[-1]
min_step = min_step[:-1] / lambda_min
# Still in the hessian eigensystem
prfo_step = np.zeros_like(f)
prfo_step[0] = max_step[0]
prfo_step[1:] = min_step
# Transform back
step = eigvecs.dot(prfo_step)
norm = np.linalg.norm(step)
if norm > 0.1:
step = 0.1 * step / norm
return step
def run():
x0 = (0.5, 0.2)
f, g, H = make_funcs()
xs = np.linspace(-1.3, 1.3, 50)
ys = np.linspace(-0.7, 1.9, 50)
plot(f, g, H, xs, ys)
prfo(x0, H, g)
fq = -np.array(g(*x0))
hess = np.array(H(*x0))
x = x0
opt_xs = [x, ]
for i in range(15):
step = prfo(x, H, g)
print("norm(step)", np.linalg.norm(step))
grad = g(*x)
gn = np.linalg.norm(grad)
if gn < 1e-5:
print("Converged")
break
x_new = x + step
opt_xs.append(x_new)
x = x_new
opt_xs = np.array(opt_xs)
X, Y = np.meshgrid(xs, ys)
Z = f(X, Y)
levels = np.linspace(0, 2, 75)
fig, ax = plt.subplots()
cf = ax.contour(X, Y, Z, levels=levels)
ax.plot(*opt_xs.T, "ro-", label="TSopt")
fig.colorbar(cf)
ax.set_xlim(xs.min(), xs.max())
ax.set_ylim(ys.min(), ys.max())
plt.show()
if __name__ == "__main__":
run() | gpl-3.0 | -8,698,081,270,007,493,000 | 26.381944 | 76 | 0.538305 | false |
leaffan/pynhldb | _check_player_stats.py | 1 | 3669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from operator import attrgetter
from sqlalchemy import and_, String, cast
from db.common import session_scope
from db.player import Player
from db.team import Team
from db.player_game import PlayerGame
from db.player_season import PlayerSeason
# TODO: command line arguments, comparison of all applicable stat values
season = 2016
season_type = 'RS'
stat_criterion = 'assists'
PS_PG_MAPPING = {
'shots': 'shots_on_goal',
'shifts': 'no_shifts',
'toi': 'toi_overall'
}
if __name__ == '__main__':
# retrieving arguments specified on command line
parser = argparse.ArgumentParser(
description='Download NHL game summary reports.')
parser.add_argument(
'-s', '--season', dest='season', required=False,
metavar='season to check stats for',
help="Season for which stats data will be checked")
parser.add_argument(
'-t', '--type', dest='season_type', required=False,
metavar='season type', choices=['RS', 'PO'],
help="Season type, e.g. regular season (RS) or playoffs (PO)")
parser.add_argument(
'-c', '--criterion', dest='stat_criterion', required=False,
choices=[
'goals', 'assists', 'points', 'pim', 'plus_minus', 'shots',
'hits', 'blocks', 'shifts', 'toi'
],
metavar='statistics criterion to be checked',
help="Statistics criterion to be checked")
args = parser.parse_args()
if args.season is not None:
season = int(args.season)
else:
season = 2017
if args.stat_criterion is not None:
stat_criterion = args.stat_criterion
else:
stat_criterion = 'goals'
if args.season_type is not None:
season_type = args.season_type
else:
season_type = 'RS'
with session_scope() as session:
# retrieving player seasons for specified season and season type
pseasons = session.query(PlayerSeason).filter(
and_(
PlayerSeason.season == season,
PlayerSeason.season_type == season_type
)
).all()
print("+ %d individual season statlines retrieved" % len(pseasons))
for pseason in sorted(pseasons)[:]:
plr = Player.find_by_id(pseason.player_id)
# retrieving individual player games for specified player
# TODO: group by team, i.e. for players with multiple stints with
# a team in one season
pgames = session.query(PlayerGame).filter(
and_(
PlayerGame.player_id == pseason.player_id,
cast(PlayerGame.game_id, String).like("%d02%%" % season),
PlayerGame.team_id == pseason.team_id
)
).all()
if stat_criterion in PS_PG_MAPPING:
stats_value = sum(
map(attrgetter(PS_PG_MAPPING[stat_criterion]), pgames))
else:
stats_value = sum(map(attrgetter(stat_criterion), pgames))
team = Team.find_by_id(pseason.team_id)
# print(plr, stats_value, getattr(pseason, stat_criterion))
try:
assert stats_value == getattr(pseason, stat_criterion)
except Exception as e:
print(plr)
print("\t %s in player games for %s: %d" % (
stat_criterion.capitalize(), team, stats_value))
print("\t %s in player season stats for %s: %d" % (
stat_criterion.capitalize(), team,
getattr(pseason, stat_criterion)))
| mit | -6,396,806,094,362,557,000 | 32.972222 | 77 | 0.579722 | false |
pmorerio/curriculum-dropout | double_mnist/DataSet.py | 1 | 3260 | """Functions for reading MNIST data."""
import numpy as np
from load import doubleMnist
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
class DataSet(object):
def __init__(self,
images,
labels,
dtype=dtypes.float32,
reshape=True):
"""Construct a DataSet.
`dtype` can be either `uint8` to leave the input as `[0, 255]`,
or `float32` to rescale into `[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
#assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float32)
#images = np.multiply(images, 1.0 / 255.0)
self._images = images-0.5
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(data_dir='/data/datasets/',
dtype=dtypes.float32,
reshape=True,
validation_size=1000):
train_images, train_labels, test_images, test_labels = doubleMnist(data_dir)
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
validation = DataSet(validation_images,
validation_labels,
dtype=dtype,
reshape=reshape)
test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)
return base.Datasets(train=train, validation=validation, test=test)
| gpl-3.0 | -1,294,835,172,818,895,000 | 31.277228 | 81 | 0.618098 | false |
metpy/MetPy | metpy/cbook.py | 1 | 3238 | # Copyright (c) 2008,2015,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Collection of generally useful utility code from the cookbook."""
import os
import numpy as np
import pooch
from . import __version__
try:
string_type = basestring
except NameError:
string_type = str
# TODO: This can go away when we remove Python 2
def is_string_like(s):
"""Check if an object is a string."""
return isinstance(s, string_type)
POOCH = pooch.create(
path=pooch.os_cache('metpy'),
base_url='https://github.com/Unidata/MetPy/raw/{version}/staticdata/',
version='v' + __version__,
version_dev='master',
env='TEST_DATA_DIR')
# Check if we're running from a git clone and if so, bash the path attribute with the path
# to git's local data store (un-versioned)
# Look for the staticdata directory (i.e. this is a git checkout)
if os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'staticdata')):
POOCH.path = os.path.join(os.path.dirname(__file__), '..', 'staticdata')
POOCH.load_registry(os.path.join(os.path.dirname(__file__), 'static-data-manifest.txt'))
def get_test_data(fname, as_file_obj=True):
"""Access a file from MetPy's collection of test data."""
path = POOCH.fetch(fname)
# If we want a file object, open it, trying to guess whether this should be binary mode
# or not
if as_file_obj:
return open(path, 'rb')
return path
class Registry(object):
"""Provide a generic function registry.
This provides a class to instantiate, which then has a `register` method that can
be used as a decorator on functions to register them under a particular name.
"""
def __init__(self):
"""Initialize an empty registry."""
self._registry = {}
def register(self, name):
"""Register a callable with the registry under a particular name.
Parameters
----------
name : str
The name under which to register a function
Returns
-------
dec : callable
A decorator that takes a function and will register it under the name.
"""
def dec(func):
self._registry[name] = func
return func
return dec
def __getitem__(self, name):
"""Return any callable registered under name."""
return self._registry[name]
def broadcast_indices(x, minv, ndim, axis):
"""Calculate index values to properly broadcast index array within data array.
See usage in interp.
"""
ret = []
for dim in range(ndim):
if dim == axis:
ret.append(minv)
else:
broadcast_slice = [np.newaxis] * ndim
broadcast_slice[dim] = slice(None)
dim_inds = np.arange(x.shape[dim])
ret.append(dim_inds[tuple(broadcast_slice)])
return tuple(ret)
def iterable(value):
"""Determine if value can be iterated over."""
# Special case for pint Quantities
if hasattr(value, 'magnitude'):
value = value.magnitude
return np.iterable(value)
__all__ = ('Registry', 'broadcast_indices', 'get_test_data', 'is_string_like', 'iterable')
| bsd-3-clause | 2,880,880,812,459,009,000 | 27.910714 | 91 | 0.633416 | false |
catmaid/CATMAID | django/applications/catmaid/control/label.py | 1 | 18054 | # -*- coding: utf-8 -*-
from collections import defaultdict
import json
from typing import Any, DefaultDict, List, Optional, Union
from django.db import connection
from django.http import HttpRequest, Http404, JsonResponse, HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from catmaid.models import Project, Class, ClassInstance, Relation, Connector, \
ConnectorClassInstance, UserRole, Treenode, TreenodeClassInstance, \
ChangeRequest
from catmaid.control.authentication import (requires_user_role, can_edit_or_fail,
PermissionError)
from catmaid.control.common import get_request_bool
from catmaid.fields import Double3D
SKELETON_LABEL_CARDINALITY = {
'soma': 1,
}
"""
The maximum number of relationships specific labels should have with nodes of a
single skeleton. This is only used to generate warnings, not enforced.
"""
def get_link_model(node_type:str) -> Union[ConnectorClassInstance, TreenodeClassInstance]:
""" Return the model class that represents the a label link for nodes of
the given node type.
"""
if node_type == 'treenode':
return TreenodeClassInstance
elif node_type == 'connector':
return ConnectorClassInstance
else:
raise Exception(f'Unknown node type: "{node_type}"')
@requires_user_role(UserRole.Annotate)
def label_remove(request:HttpRequest, project_id=None) -> JsonResponse:
label_id = int(request.POST['label_id'])
if request.user.is_superuser:
try:
label = ClassInstance.objects.get(id=label_id,
class_column__class_name='label')
except ClassInstance.DoesNotExist:
raise ValueError("Could not find label with ID %s" % label_id)
is_referenced = TreenodeClassInstance.objects.filter(
class_instance_id=label_id).exists()
if is_referenced:
raise ValueError("Only unreferenced labels are allowed to be removed")
else:
label.delete()
return JsonResponse({
'deleted_labels': [label_id],
'message': 'success'
})
raise PermissionError('Only super users can delete unreferenced labels')
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def labels_all(request:HttpRequest, project_id=None) -> JsonResponse:
"""List all labels (front-end node *tags*) in use.
---
parameters:
- name: project_id
description: Project containing node of interest
required: true
type:
- type: array
items:
type: string
description: Labels used in this project
required: true
"""
cursor = connection.cursor()
cursor.execute("""
SELECT COALESCE(json_agg(name ORDER BY name), '[]'::json)::text
FROM class_instance
WHERE project_id = %(project_id)s
AND class_id = (
SELECT id
FROM class
WHERE class_name = 'label'
AND project_id = %(project_id)s
)
""", {
'project_id': project_id,
})
return HttpResponse(cursor.fetchone()[0], content_type='text/json')
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def labels_all_detail(request:HttpRequest, project_id=None) -> JsonResponse:
"""List all labels (front-end node *tags*) in use alongside their IDs.
---
parameters:
- name: project_id
description: Project containing node of interest
required: true
type:
- type: array
items:
type: string
description: Labels used in this project
required: true
"""
cursor = connection.cursor()
cursor.execute("""
SELECT COALESCE(json_agg(json_build_object('id', id, 'name', name) ORDER BY name), '[]'::json)::text
FROM class_instance
WHERE project_id = %(project_id)s
AND class_id = (
SELECT id
FROM class
WHERE class_name = 'label'
AND project_id = %(project_id)s
)
""", {
'project_id': project_id,
})
return HttpResponse(cursor.fetchone()[0], content_type='text/json')
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def get_label_stats(request:HttpRequest, project_id=None) -> JsonResponse:
"""Get usage statistics of node labels.
---
parameters:
- name: project_id
description: Project from which to get label stats
required: true
type:
- type: array
items:
type: array
items:
type: string
description: [labelID, labelName, skeletonID, treenodeID]
description: Labels used in this project
required: true
"""
labeled_as_relation = Relation.objects.get(project=project_id, relation_name='labeled_as')
cursor = connection.cursor()
cursor.execute("""
SELECT ci.id, ci.name, t.skeleton_id, t.id
FROM class_instance ci
JOIN treenode_class_instance tci
ON tci.class_instance_id = ci.id
JOIN treenode t
ON tci.treenode_id = t.id
WHERE ci.project_id = %s
AND tci.relation_id = %s;
""", [project_id, labeled_as_relation.id])
return JsonResponse(cursor.fetchall(), safe=False)
@api_view(['GET'])
@requires_user_role(UserRole.Browse)
def labels_for_node(request:HttpRequest, project_id=None, node_type:Optional[str]=None, node_id=None) -> JsonResponse:
"""List all labels (front-end node *tags*) attached to a particular node.
---
parameters:
- name: project_id
description: Project containing node of interest
required: true
- name: node_type
description: Either 'connector', 'treenode' or 'location'
required: true
- name: node_id
description: ID of node to list labels for
required: true
type:
- type: array
items:
type: string
description: Labels used on a particular node
required: true
"""
if node_type == 'treenode':
qs = TreenodeClassInstance.objects.filter(
relation__relation_name='labeled_as',
class_instance__class_column__class_name='label',
treenode=node_id,
project=project_id).select_related('class_instance')
elif node_type == 'location' or node_type == 'connector':
qs = ConnectorClassInstance.objects.filter(
relation__relation_name='labeled_as',
class_instance__class_column__class_name='label',
connector=node_id,
project=project_id).select_related('class_instance')
else:
raise Http404(f'Unknown node type: "{node_type}"')
return JsonResponse([link.class_instance.name for link in qs], safe=False)
@requires_user_role(UserRole.Browse)
def labels_for_nodes(request:HttpRequest, project_id=None) -> JsonResponse:
# Two POST variables, which are each an array of integers stringed together
# with commas as separators
treenode_ids = request.POST.get('treenode_ids', '').strip()
connector_ids = request.POST.get('connector_ids', '').strip()
result:DefaultDict[Any, List] = defaultdict(list)
cursor = connection.cursor()
if treenode_ids:
# Could use treenode_ids directly as a string, but it is good to sanitize arguments
cursor.execute('''
SELECT treenode.id, class_instance.name
FROM treenode, class_instance, treenode_class_instance, relation
WHERE relation.id = treenode_class_instance.relation_id
AND relation.relation_name = 'labeled_as'
AND treenode_class_instance.treenode_id = treenode.id
AND class_instance.id = treenode_class_instance.class_instance_id
AND treenode.id IN (%s)
''' % ','.join(str(int(x)) for x in treenode_ids.split(','))) # convoluted to sanitize
for row in cursor.fetchall():
result[row[0]].append(row[1])
if connector_ids:
cursor.execute('''
SELECT connector.id, class_instance.name
FROM connector, class_instance, connector_class_instance, relation
WHERE relation.id = connector_class_instance.relation_id
AND relation.relation_name = 'labeled_as'
AND connector_class_instance.connector_id = connector.id
AND class_instance.id = connector_class_instance.class_instance_id
AND connector.id IN (%s)
''' % ','.join(str(int(x)) for x in connector_ids.split(','))) # convoluted to sanitize
for row in cursor.fetchall():
result[row[0]].append(row[1])
return JsonResponse(result)
@requires_user_role(UserRole.Annotate)
def label_update(request:HttpRequest, project_id, location_id, ntype:str) -> JsonResponse:
""" location_id is the ID of a treenode or connector.
ntype is either 'treenode' or 'connector'. """
labeled_as_relation = Relation.objects.get(project=project_id, relation_name='labeled_as')
p = get_object_or_404(Project, pk=project_id)
# TODO will FAIL when a tag contains a comma by itself
new_tags = request.POST['tags'].split(',')
delete_existing_labels = get_request_bool(request.POST, 'delete_existing', True)
kwargs = {'relation': labeled_as_relation,
'class_instance__class_column__class_name': 'label'}
table = get_link_model(ntype)
if 'treenode' == ntype:
kwargs['treenode__id'] = location_id
node = Treenode.objects.get(id=location_id)
elif 'connector' == ntype:
kwargs['connector__id'] = location_id
node = Connector.objects.get(id=location_id)
if not table:
raise Http404(f'Unknown node type: "{ntype}"')
# Get the existing list of tags for the tree node/connector and delete any
# that are not in the new list.
existing_labels = table.objects.filter(**kwargs).select_related('class_instance')
existing_names = set(ele.class_instance.name for ele in existing_labels)
duplicate_labels = table.objects.filter(**kwargs).exclude(class_instance__name__in=new_tags).select_related('class_instance')
other_labels = []
deleted_labels = []
if delete_existing_labels:
# Iterate over all labels that should get deleted to check permission
# on each one. Remember each label that couldn't be deleted in the
# other_labels array.
for label in duplicate_labels:
try:
can_edit_or_fail(request.user, label.id, table._meta.db_table)
if remove_label(label.id, ntype):
deleted_labels.append(label)
else:
other_labels.append(label)
except:
other_labels.append(label)
# Create change requests for labels associated to the treenode by other users
for label in other_labels:
change_request_params = {
'type': 'Remove Tag',
'project': p,
'user': request.user,
'recipient': node.user,
'location': Double3D(node.location_x, node.location_y, node.location_z),
ntype: node,
'description': "Remove tag '%s'" % label.class_instance.name,
'validate_action': 'from catmaid.control.label import label_exists\n' +
'is_valid = label_exists(%s, "%s")' % (str(label.id), ntype),
'approve_action': 'from catmaid.control.label import remove_label\n' +
'remove_label(%s, "%s")' % (str(label.id), ntype)
}
ChangeRequest(**change_request_params).save()
# Add any new labels.
label_class = Class.objects.get(project=project_id, class_name='label')
kwargs = {'user': request.user,
'project': p,
'relation': labeled_as_relation,
ntype: node}
new_labels = []
for tag_name in new_tags:
if len(tag_name) > 0 and tag_name not in existing_names:
# Make sure the tag instance exists
existing_tags = tuple(ClassInstance.objects.filter(
project=p,
name=tag_name,
class_column=label_class))
if len(existing_tags) < 1:
tag = ClassInstance(
project=p,
name=tag_name,
user=request.user,
class_column=label_class)
tag.save()
else:
tag = existing_tags[0]
# Associate the tag with the treenode/connector.
kwargs['class_instance'] = tag
tci = table(**kwargs) # creates new TreenodeClassInstance or ConnectorClassInstance
tci.save()
new_labels.append(tag_name)
if node.user != request.user:
# Inform the owner of the node that the tag was added and give them the option of removing it.
change_request_params = {
'type': 'Add Tag',
'description': 'Added tag \'' + tag_name + '\'',
'project': p,
'user': request.user,
'recipient': node.user,
'location': Double3D(node.location_x, node.location_y, node.location_z),
ntype: node,
'validate_action': 'from catmaid.control.label import label_exists\n' +
'is_valid = label_exists(%s, "%s")' % (str(tci.id), ntype),
'reject_action': 'from catmaid.control.label import remove_label\n' +
'remove_label(%s, "%s")' % (str(tci.id), ntype)
}
ChangeRequest(**change_request_params).save()
response = {
'message': 'success',
'new_labels': new_labels,
'duplicate_labels': [label.class_instance.name for label in duplicate_labels
if label not in deleted_labels],
'deleted_labels': [label.class_instance.name for label in deleted_labels],
}
# Check if any labels on this node violate cardinality restrictions on
# its skeleton.
if 'treenode' == ntype:
limited_labels = {label: SKELETON_LABEL_CARDINALITY[label] \
for label in new_tags if label in SKELETON_LABEL_CARDINALITY}
if limited_labels:
ll_names, ll_maxes = zip(*limited_labels.items())
cursor = connection.cursor()
cursor.execute("""
SELECT
ll.name,
COUNT(tci.treenode_id),
ll.max
FROM
class_instance ci,
treenode_class_instance tci,
treenode tn,
unnest(%s::text[], %s::bigint[]) AS ll (name, max)
WHERE ci.name = ll.name
AND ci.project_id = %s
AND ci.class_id = %s
AND tci.class_instance_id = ci.id
AND tci.relation_id = %s
AND tn.id = tci.treenode_id
AND tn.skeleton_id = %s
GROUP BY
ll.name, ll.max
HAVING
COUNT(tci.treenode_id) > ll.max
""", (
list(ll_names),
list(ll_maxes),
p.id,
label_class.id,
labeled_as_relation.id,
node.skeleton_id))
if cursor.rowcount:
response['warning'] = 'The skeleton has too many of the following tags: ' + \
', '.join('{0} ({1}, max. {2})'.format(*row) for row in cursor.fetchall())
return JsonResponse(response)
def label_exists(label_id, node_type) -> bool:
# This checks to see if the exact instance of the tag being applied to a node/connector still exists.
# If the tag was removed and added again then this will return False.
table = get_link_model(node_type)
try:
label = table.objects.get(pk=label_id)
return True
except table.DoesNotExist:
return False
@requires_user_role(UserRole.Annotate)
def remove_label_link(request:HttpRequest, project_id, ntype:str, location_id) -> JsonResponse:
label = request.POST.get('tag', None)
if not label:
raise ValueError("No label parameter given")
table = get_link_model(ntype)
try:
if 'treenode' == ntype:
link_id = table.objects.get(treenode_id=location_id, class_instance__name=label).id
elif 'connector' == ntype:
link_id = table.objects.get(connector_id=location_id, class_instance__name=label).id
except TreenodeClassInstance.DoesNotExist:
raise ValueError("Node %s does not have a label with name \"%s\"." %
(location_id, label))
except ConnectorClassInstance.DoesNotExist:
raise ValueError("Connector %s does not have a label with name \"%s\"." %
(location_id, label))
if remove_label(link_id, ntype):
return JsonResponse({
'deleted_link': link_id,
'message': 'success'
})
else:
return JsonResponse({
'error': 'Could not remove label'
})
def remove_label(label_id, node_type:str) -> bool:
# This removes an exact instance of a tag being applied to a node/connector, it does not look up the tag by name.
# If the tag was removed and added again then this will do nothing and the tag will remain.
table = get_link_model(node_type)
try:
label_link = table.objects.get(pk=label_id)
label = label_link.class_instance
label_link.delete()
# Remove class instance for the deleted label if it is no longer linked
# to any nodes.
if 0 == label.treenodeclassinstance_set.count() + label.connectorclassinstance_set.count():
label.delete()
return True
except table.DoesNotExist:
return False
| gpl-3.0 | -8,427,443,500,740,408,000 | 37.742489 | 129 | 0.597153 | false |
carragom/modoboa | modoboa/admin/tests/test_mapfiles.py | 1 | 2357 | """Test map files generation."""
import os
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from modoboa.core.utils import parse_map_file
from modoboa.lib.test_utils import MapFilesTestCaseMixin
class MapFilesTestCase(MapFilesTestCaseMixin, TestCase):
"""Test case for admin."""
MAP_FILES = [
"sql-domains.cf", "sql-domain-aliases.cf", "sql-aliases.cf",
"sql-maintain.cf", "sql-sender-login-mailboxes.cf",
"sql-sender-login-mailboxes-extra.cf", "sql-sender-login-aliases.cf"
]
def test_map_upgrade(self):
"""Check that map content is used."""
dburl = "postgres://user:password@localhost/testdb"
call_command(
"generate_postfix_maps",
"--dburl", dburl, "--destdir", self.workdir)
# Now upgrade files. Map credentials should be preserved.
call_command("generate_postfix_maps", "--destdir", self.workdir)
for f in self.MAP_FILES:
mapcontent = parse_map_file(os.path.join(self.workdir, f))
self.assertEqual(mapcontent["user"], "user")
self.assertEqual(mapcontent["password"], "password")
self.assertEqual(mapcontent["dbname"], "testdb")
# Now force overwrite, credentials should be different
call_command(
"generate_postfix_maps", "--destdir", self.workdir,
"--force-overwrite")
dbsettings = settings.DATABASES["default"]
for f in self.MAP_FILES:
mapcontent = parse_map_file(os.path.join(self.workdir, f))
if dbsettings["ENGINE"] == "django.db.backends.sqlite3":
self.assertEqual(mapcontent["dbpath"], dbsettings["NAME"])
else:
self.assertEqual(mapcontent["user"], dbsettings["USER"])
self.assertEqual(
mapcontent["password"], dbsettings["PASSWORD"])
self.assertEqual(mapcontent["dbname"], dbsettings["NAME"])
# Now modify a file manually
path = os.path.join(self.workdir, "sql-domains.cf")
with open(path, "a") as fp:
fp.write("pouet")
call_command("generate_postfix_maps", "--destdir", self.workdir)
with open(path) as fp:
content = fp.read()
self.assertIn("pouet", content)
| isc | 7,982,682,497,764,823,000 | 38.283333 | 76 | 0.616886 | false |
lzw120/django | mysite/mysite/books/models.py | 1 | 1105 | from django.db import models
# Create your models here.
class Publisher(models.Model):
name = models.CharField(max_length = 30)
address = models.CharField(max_length = 50)
city = models.CharField(max_length = 60)
state_province = models.CharField(max_length = 30)
country = models.CharField(max_length = 50)
website = models.URLField()
def __str__(self):
return '%s, %s, %s'%(self.name, self.address, self.country)
class Author(models.Model):
salutation = models.CharField(max_length = 10)
first_name = models.CharField(max_length = 30)
last_name = models.CharField(max_length = 40)
email = models.EmailField()
headshot = models.ImageField(upload_to = '/tmp')
def __str__(self):
return '%s %s'%(self.first_name, self.last_name)
class Book(models.Model):
title = models.CharField(max_length = 100)
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher)
publication_date = models.DateField()
def __str__(self):
return self.title
| bsd-3-clause | -6,974,113,854,544,279,000 | 28.864865 | 67 | 0.638009 | false |
sunlightlabs/sitegeist | sitegeist/data/census/migrations/0003_auto__add_field_tract_B08301_001E__add_field_tract_B08301_002E__add_fi.py | 1 | 39582 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tract.B08301_001E'
db.add_column('census_tract', 'B08301_001E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_002E'
db.add_column('census_tract', 'B08301_002E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_003E'
db.add_column('census_tract', 'B08301_003E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_004E'
db.add_column('census_tract', 'B08301_004E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_005E'
db.add_column('census_tract', 'B08301_005E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_006E'
db.add_column('census_tract', 'B08301_006E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_007E'
db.add_column('census_tract', 'B08301_007E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_008E'
db.add_column('census_tract', 'B08301_008E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_009E'
db.add_column('census_tract', 'B08301_009E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_010E'
db.add_column('census_tract', 'B08301_010E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_011E'
db.add_column('census_tract', 'B08301_011E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_012E'
db.add_column('census_tract', 'B08301_012E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_013E'
db.add_column('census_tract', 'B08301_013E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_014E'
db.add_column('census_tract', 'B08301_014E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_015E'
db.add_column('census_tract', 'B08301_015E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_016E'
db.add_column('census_tract', 'B08301_016E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_017E'
db.add_column('census_tract', 'B08301_017E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_018E'
db.add_column('census_tract', 'B08301_018E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_019E'
db.add_column('census_tract', 'B08301_019E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_020E'
db.add_column('census_tract', 'B08301_020E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_021E'
db.add_column('census_tract', 'B08301_021E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Tract.B08301_001E'
db.delete_column('census_tract', 'B08301_001E')
# Deleting field 'Tract.B08301_002E'
db.delete_column('census_tract', 'B08301_002E')
# Deleting field 'Tract.B08301_003E'
db.delete_column('census_tract', 'B08301_003E')
# Deleting field 'Tract.B08301_004E'
db.delete_column('census_tract', 'B08301_004E')
# Deleting field 'Tract.B08301_005E'
db.delete_column('census_tract', 'B08301_005E')
# Deleting field 'Tract.B08301_006E'
db.delete_column('census_tract', 'B08301_006E')
# Deleting field 'Tract.B08301_007E'
db.delete_column('census_tract', 'B08301_007E')
# Deleting field 'Tract.B08301_008E'
db.delete_column('census_tract', 'B08301_008E')
# Deleting field 'Tract.B08301_009E'
db.delete_column('census_tract', 'B08301_009E')
# Deleting field 'Tract.B08301_010E'
db.delete_column('census_tract', 'B08301_010E')
# Deleting field 'Tract.B08301_011E'
db.delete_column('census_tract', 'B08301_011E')
# Deleting field 'Tract.B08301_012E'
db.delete_column('census_tract', 'B08301_012E')
# Deleting field 'Tract.B08301_013E'
db.delete_column('census_tract', 'B08301_013E')
# Deleting field 'Tract.B08301_014E'
db.delete_column('census_tract', 'B08301_014E')
# Deleting field 'Tract.B08301_015E'
db.delete_column('census_tract', 'B08301_015E')
# Deleting field 'Tract.B08301_016E'
db.delete_column('census_tract', 'B08301_016E')
# Deleting field 'Tract.B08301_017E'
db.delete_column('census_tract', 'B08301_017E')
# Deleting field 'Tract.B08301_018E'
db.delete_column('census_tract', 'B08301_018E')
# Deleting field 'Tract.B08301_019E'
db.delete_column('census_tract', 'B08301_019E')
# Deleting field 'Tract.B08301_020E'
db.delete_column('census_tract', 'B08301_020E')
# Deleting field 'Tract.B08301_021E'
db.delete_column('census_tract', 'B08301_021E')
models = {
'census.tract': {
'B01001_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_022E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_023E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_024E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_025E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_026E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_027E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_028E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_029E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_030E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_031E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_032E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_033E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_034E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_035E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_036E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_037E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_038E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_039E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_040E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_041E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_042E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_043E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_044E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_045E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_046E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_047E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_048E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_049E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01002_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_022E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_023E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_024E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_025E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_026E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_027E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_028E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_029E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_030E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_031E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_032E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_033E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_034E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_035E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_036E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_037E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_038E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_039E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_040E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_041E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_042E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_043E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_044E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_045E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_046E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_047E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_048E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_049E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_050E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_051E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_052E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_053E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_054E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_055E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_056E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_057E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_058E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_059E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_060E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_061E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_062E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_063E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_064E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_065E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_066E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_067E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_068E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_069E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_070E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_071E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_072E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_073E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_074E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_075E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_076E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_077E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_078E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_079E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_080E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_081E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_082E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_083E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_084E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_085E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_086E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_087E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_088E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_089E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_090E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_091E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_092E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_093E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_094E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_095E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_096E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_097E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_098E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_099E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_100E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_101E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_102E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_103E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_104E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_105E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_106E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_107E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_108E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B11005_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B11005_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19013_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25058_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25064_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25077_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'Meta': {'ordering': "('state', 'county', 'tract')", 'object_name': 'Tract'},
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tract': ('django.db.models.fields.CharField', [], {'max_length': '12'})
}
}
complete_apps = ['census'] | bsd-3-clause | -209,414,603,140,993,950 | 98.454774 | 150 | 0.562958 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_hasnoteregexp.py | 1 | 1683 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnoteregexbase import HasNoteRegexBase
#-------------------------------------------------------------------------
# "People having notes that contain a substring"
#-------------------------------------------------------------------------
class HasNoteRegexp(HasNoteRegexBase):
name = _('People having notes containing <regular expression>')
description = _("Matches people whose notes contain text "
"matching a regular expression")
| gpl-2.0 | 4,759,022,016,701,117,000 | 36.4 | 75 | 0.537136 | false |
Anaethelion/django-mapentity | mapentity/urls.py | 1 | 1523 | from django.conf import settings
from django.conf.urls import patterns, url, include
from . import app_settings
from . import registry
from .views import (map_screenshot, history_delete,
serve_attachment, JSSettings, Convert)
if app_settings['ACTION_HISTORY_ENABLED']:
from .models import LogEntry
_MEDIA_URL = settings.MEDIA_URL.replace(app_settings['ROOT_URL'], '')
if _MEDIA_URL.startswith('/'):
_MEDIA_URL = _MEDIA_URL[1:]
if _MEDIA_URL.endswith('/'):
_MEDIA_URL = _MEDIA_URL[:-1]
urlpatterns = patterns(
'',
url(r'^map_screenshot/$', map_screenshot, name='map_screenshot'),
url(r'^convert/$', Convert.as_view(), name='convert'),
url(r'^history/delete/$', history_delete, name='history_delete'),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
# See default value in app_settings.JS_SETTINGS.
# Will be overriden, most probably.
url(r'^api/settings.json$', JSSettings.as_view(), name='js_settings'),
)
if settings.DEBUG or app_settings['SENDFILE_HTTP_HEADER']:
urlpatterns += patterns(
'',
url(r'^%s/(?P<path>paperclip/(?P<app_label>.+)_(?P<model_name>.+)/(?P<pk>\d+)/.+)$' % _MEDIA_URL, serve_attachment),
)
if app_settings['ACTION_HISTORY_ENABLED']:
from mapentity.registry import MapEntityOptions
class LogEntryOptions(MapEntityOptions):
menu = False
dynamic_views = ['List', 'JsonList', 'Layer']
urlpatterns += registry.register(LogEntry, LogEntryOptions)
| bsd-3-clause | -6,971,764,326,602,294,000 | 32.844444 | 124 | 0.662508 | false |
tanmoy7989/candidacy_plot_scripts | plot_transferability.py | 1 | 3441 | import os, sys
import numpy as np
import pickle
import matplotlib
import matplotlib.cm as cm
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as plt
# Build dependencies
import setup
axlbls = setup.lbl_dict
units = setup.units_dict
matplotlib.rcParams.update(setup.params)
# Data and target locations
c25_dir = os.path.expanduser('~/c25ld/data/analysis/feb15_runs_fsw')
c40_dir = os.path.expanduser('~/c25ld/data/analysis/transferability/c40')
c50_dir = os.path.expanduser('~/c25ld/data/analysis/transferability/c50')
c18_dir = os.path.expanduser('~/c25ld/data/analysis/transferability/c18')
c12_dir = os.path.expanduser('~/c25ld/data/analysis/transferability/c12')
fftypes = ['lj', 'wca']
### ------------------- Plot_dicts----------------------------------------------
ncases = 4
case_dirs = {1: c12_dir, 2: c18_dir, 3: c40_dir, 4: c50_dir}
case_titles = {1: 'c-12 X 3', 2: 'c-18 X 2', 3: 'c-40', 4: 'c-50'}
cgtypes = ['SP', 'SPLD', 'LD']
clrs = {'AA': 'red', 'CG': 'blue'}
#------------------------ PLotting----------------------------------------------
def make1Dplot(geom_prop, fftype):
fig = plt.figure(figsize = (8,4))
axlist = []
nrows = len(cgtypes)
ncols = ncases
for i, cgtype in enumerate(cgtypes):
AA_pickle_name = 'AA_%s_hist1D_%s.pickle' % (fftype, geom_prop)
CG_pickle_name = 'CG_%s_%s_hist1D_%s.pickle' % (fftype, cgtype, geom_prop)
for case in range(ncases):
target_dir = case_dirs[case+1]
AA_pickle = os.path.join(target_dir, AA_pickle_name); print AA_pickle
CG_pickle = os.path.join(target_dir, CG_pickle_name); print cgtype, CG_pickle
AA_data = pickle.load(open(AA_pickle, 'r'))
CG_data = pickle.load(open(CG_pickle, 'r'))
ax = fig.add_subplot(nrows, ncols, i*ncases + case+1)
if not case == 3:
ax.plot(AA_data['bin_centers']+3*case, AA_data['bin_vals'], linestyle = 'solid', color = clrs['AA'], label = 'AA')
ax.plot(CG_data['bin_centers']+3*case, CG_data['bin_vals'], linestyle = 'dashed',color = clrs['CG'], label = 'CG')
else:
ax.plot(AA_data['bin_centers']+3*case, AA_data['bin_vals'], linestyle = 'solid', color = clrs['AA'], label = '')
ax.plot(CG_data['bin_centers']+3*case, CG_data['bin_vals'], linestyle = 'dashed',color = clrs['CG'], label = cgtype)
ax.xaxis.labelpad = 0.5
ax.yaxis.labelpad = 0.5
if not case == 0:
ax.set_yticklabels([])
ax.set_ylabel('')
elif case == 0:
ax.yaxis.set_major_locator(MaxNLocator(nbins = 5, prune = 'both'))
if not i == 2:
ax.set_xticklabels([])
ax.set_xlabel('')
elif i == 2:
ax.xaxis.set_major_locator(MaxNLocator(nbins = 5, prune = 'both'))
if i == 0:
ax.set_title(case_titles[case+1])
if i == 0 and case == 0:
loc = 'best' if geom_prop == 'SASA_atom' else 2
leg = ax.legend(loc = loc, prop = {'size': 8})
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_alpha(0.3)
if case == 3:
leg = ax.legend(loc = 1, prop = {'size': 8})
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_alpha(0.3)
plt.subplots_adjust(hspace = 0.0, wspace = 0.0, bottom = 0.15)
plt.figtext(0.45, 0.030, axlbls[geom_prop], fontsize = 'large')
plt.figtext(0.035, 0.40, axlbls['dist'], fontsize = 'large', rotation = 90)
geom_prop = sys.argv[1]
fftype = sys.argv[2]
make1Dplot(geom_prop, fftype)
plt.savefig('%s_%s_transferability.%s' % (geom_prop, fftype, setup.params['savefig.format']))
#plt.show()
| gpl-2.0 | 1,855,099,025,161,924,000 | 36.402174 | 120 | 0.617262 | false |
JohnUrban/poreminion | poreminion/winner.py | 1 | 3048 | from poretools import *
import sys
## Nov 4, 2014
## TODO for pipeline Id want both the "details" and the "fa" files
## This would require a --saveas option -- and it will save both.
## Might also want it to save the fastas to their own files when --each
#logging
import logging
logger = logging.getLogger('poreminion')
def run(parser, args):
longest_size = 0
longest_size_2d = 0
longest_size_template = 0
longest_size_complement = 0
longest_read = None
longest_read_2d = None
longest_read_template = None
longest_read_complement = None
if args.type == 'each':
each=True
args.type='all'
else:
each=False
for fast5 in Fast5FileSet(args.files):
fas = fast5.get_fastas(args.type)
for fa in fas:
readtype = fa.name.split()[0].split("_")[-1]
readlen = len(fa.seq)
if each:
if readtype == "template" and readlen > longest_size_template:
longest_size_template = readlen
longest_read_template = fa
elif (readtype == "2D" or readtype == "twodirections") and readlen > longest_size_2d:
longest_size_2d = readlen
longest_read_2d = fa
elif readtype == "complement" and readlen > longest_size_complement:
longest_size_complement = readlen
longest_read_complement = fa
else:
if fa and len(fa.seq) > longest_size:
longest_size = len(fa.seq)
longest_read = fa
fast5.close()
## logger.info("Wow, it's a whopper: your longest read is %d bases." % (longest_size,))
if args.details:
if each:
if longest_read_2d:
print ("\t").join([longest_read_2d.name.split()[0], str(longest_size_2d)])
if longest_read_template:
print ("\t").join([longest_read_template.name.split()[0], str(longest_size_template)])
if longest_read_complement:
print ("\t").join([longest_read_complement.name.split()[0], str(longest_size_complement)])
else:
print ("\t").join([longest_read.name.split()[0], str(longest_size)])
else:
if each:
if longest_read_2d:
print longest_read_2d
if longest_read_template:
print longest_read_template
if longest_read_complement:
print longest_read_complement
else:
print longest_read
| mit | 6,711,413,643,191,066,000 | 41.929577 | 122 | 0.472441 | false |
jeremiedecock/snippets | python/urllib/get_html_setup_http_headers_basic_shutil_version.py | 1 | 3087 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Warning:
# The urllib2 (from Python 2.x) module has been split across several modules in
# Python 3 named "urllib.request" and "urllib.error".
# Urllib (and thus urllib2) is part of the Python3 standard library but this is
# not the case for urllib3 !
# "urllib and urllib2 have little to do with each other. They were designed to
# be independent and standalone, each solving a different scope of problems,
# and urllib3 follows in a similar vein."
# Online documentation:
# - https://docs.python.org/3/library/urllib.request.html
# - http://stackoverflow.com/questions/24226781/changing-user-agent-in-python-3-for-urrlib-urlopen
# - http://stackoverflow.com/questions/802134/changing-user-agent-on-urllib2-urlopen
import argparse
import shutil
import urllib.request
HTTP_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.2.1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate'
}
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='An urllib snippet.')
parser.add_argument("url", nargs=1, metavar="URL",
help="The URL of the webpage to parse.")
args = parser.parse_args()
url = args.url[0]
print("URL:", url)
print()
# HTTP REQUEST ############################################################
# See http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
http_request = urllib.request.Request(url, data=None, headers=HTTP_HEADERS)
with urllib.request.urlopen(http_request) as http_response, open('out.html', 'wb') as out_file:
shutil.copyfileobj(http_response, out_file)
if __name__ == '__main__':
main()
| mit | 8,728,841,368,720,717,000 | 41.260274 | 106 | 0.694327 | false |
openstack/storlets | tests/functional/python/test_broken_storlet.py | 1 | 1840 | # Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swiftclient import client
from swiftclient.exceptions import ClientException
from tests.functional.python import StorletPythonFunctionalTest
import unittest
from storlets.agent.common.utils import DEFAULT_PY2
class TestBrokenStorlet(StorletPythonFunctionalTest):
def setUp(self, version=None):
self.storlet_log = 'broken.log'
self.content = 'abcdefghijklmonp'
self.additional_headers = {}
super(TestBrokenStorlet, self).setUp(
storlet_dir='broken',
storlet_name='broken.py',
storlet_main='broken.BrokenStorlet',
storlet_file='source.txt',
version=version)
def test_get(self):
resp = dict()
req_headers = {'X-Run-Storlet': self.storlet_name}
with self.assertRaises(ClientException) as cm:
client.get_object(
self.url, self.token, self.container, self.storlet_file,
response_dict=resp, headers=req_headers)
e = cm.exception
self.assertEqual(e.http_status, 503)
class TestBrokenStorletRunPy2(TestBrokenStorlet):
def setUp(self):
super(TestBrokenStorletRunPy2, self).setUp(version=DEFAULT_PY2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -372,028,531,723,554,370 | 34.384615 | 72 | 0.694565 | false |
bolkedebruin/airflow | airflow/operators/email_operator.py | 1 | 1165 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.email.operators.email`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.email.operators.email import EmailOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.email.operators.email`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 | -6,296,083,574,177,880,000 | 39.172414 | 87 | 0.764807 | false |
anaran/olympia | apps/users/helpers.py | 1 | 3160 | import random
from django.utils.encoding import smart_unicode
import jinja2
from jingo import register, env
from tower import ugettext as _
import amo
@register.function
def emaillink(email, title=None, klass=None):
if not email:
return ""
fallback = email[::-1] # reverse
# inject junk somewhere
i = random.randint(0, len(email) - 1)
fallback = u"%s%s%s" % (jinja2.escape(fallback[:i]),
u'<span class="i">null</span>',
jinja2.escape(fallback[i:]))
# replace @ and .
fallback = fallback.replace('@', '@').replace('.', '.')
if title:
title = jinja2.escape(title)
else:
title = '<span class="emaillink">%s</span>' % fallback
node = (u'<a%s href="#">%s</a><span class="emaillink js-hidden">%s</span>'
% ((' class="%s"' % klass) if klass else '', title, fallback))
return jinja2.Markup(node)
@register.filter
def user_link(user):
if not user:
return ''
return jinja2.Markup(_user_link(user))
@register.function
def users_list(users, size=None, max_text_length=None):
if not users:
return ''
tail = []
if size and size < len(users):
users = users[:size]
tail = [_('others', 'user_list_others')]
if max_text_length:
user_list = [_user_link(user, max_text_length) for user in users]
else:
user_list = map(_user_link, users)
return jinja2.Markup(', '.join(user_list + tail))
@register.inclusion_tag('users/helpers/addon_users_list.html')
@jinja2.contextfunction
def addon_users_list(context, addon):
ctx = dict(context.items())
ctx.update(addon=addon, amo=amo)
return ctx
def _user_link(user, max_text_length=None):
if isinstance(user, basestring):
return user
username = user.name
if max_text_length and len(user.name) > max_text_length:
username = user.name[:max_text_length].strip() + '...'
return u'<a href="%s" title="%s">%s</a>' % (
user.get_url_path(), jinja2.escape(user.name),
jinja2.escape(smart_unicode(username)))
@register.filter
@jinja2.contextfilter
def user_vcard(context, user, table_class='person-info', is_profile=False):
c = dict(context.items())
c.update({
'profile': user,
'table_class': table_class,
'is_profile': is_profile
})
t = env.get_template('users/vcard.html').render(c)
return jinja2.Markup(t)
@register.inclusion_tag('users/report_abuse.html')
@jinja2.contextfunction
def user_report_abuse(context, hide, profile):
new = dict(context.items())
new.update({'hide': hide, 'profile': profile,
'abuse_form': context['abuse_form']})
return new
@register.filter
def contribution_type(type):
return amo.CONTRIB_TYPES[type]
@register.function
def user_data(amo_user):
anonymous, currency, pre_auth, email = True, 'USD', False, ''
if hasattr(amo_user, 'is_anonymous'):
anonymous = amo_user.is_anonymous()
if not anonymous:
email = amo_user.email
return {'anonymous': anonymous, 'currency': currency, 'email': email}
| bsd-3-clause | -6,784,174,316,505,766,000 | 26.008547 | 78 | 0.618671 | false |
schollii/pypubsub | src/pubsub/utils/misc.py | 1 | 1149 | """
Provides useful functions and classes. Most useful are probably
printTreeDocs and printTreeSpec.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
import sys
__all__ = ('printImported', 'Callback')
def printImported():
"""Output a list of pubsub modules imported so far"""
ll = [mod for mod in sys.modules.keys() if mod.find('pubsub') >= 0] # iter keys ok
ll.sort()
print('\n'.join(ll))
class Callback:
"""
This can be used to wrap functions that are referenced by class
data if the data should be called as a function. E.g. given
>>> def func(): pass
>>> class A:
....def __init__(self): self.a = func
then doing
>>> boo=A(); boo.a()
will fail since Python will try to call a() as a method of boo,
whereas a() is a free function. But if you have instead
"self.a = Callback(func)", then "boo.a()" works as expected.
"""
def __init__(self, callable_obj):
self.__callable = callable_obj
def __call__(self, *args, **kwargs):
return self.__callable(*args, **kwargs)
| bsd-2-clause | 7,571,920,059,007,329,000 | 28.461538 | 87 | 0.637946 | false |
Acurus/PVDB | pnvdb/models/objekt_type.py | 1 | 5437 | # -*- coding: utf-8 -*-
""" Provide the ObjektType class """
import json
import logging
from .util import _fetch_data, build_name2id
class ObjektType(object):
""" Class for individual nvdb-object types. (Data catalogue) """
def __init__(self, nvdb, objekt_type, meta=None):
self.nvdb = nvdb
if isinstance(objekt_type, int):
self.objekt_type = int(objekt_type)
else:
if isinstance(self.nvdb.name2id, dict):
self.objekt_type = self.nvdb.name2id['nvdb_objekter'][objekt_type.lower()]
else:
build_name2id(self.nvdb)
try:
self.objekt_type = self.nvdb.name2id['nvdb_objekter'][objekt_type.lower()]
except KeyError:
logging.error('Objekt_type not found: {}'.format(objekt_type))
return None
self.data = None
self.metadata
logging.debug("Initialized: ObjektType({})".format(self.objekt_type))
def __repr__(self):
return "ObjektType({})".format(self.objekt_type)
def _update_data(self):
self.data = _fetch_data(
self.nvdb, 'vegobjekttyper/{}'.format(self.objekt_type))
def dump(self, file_format='json'):
"""
Function for dumping raw API-result for object.
:param file_format: Type of data to dump as. json or xml
:type file_format: string
:returns: str
"""
if file_format.lower() == 'json':
if not self.data:
self.data = _fetch_data(self.nvdb, 'vegobjekttyper/{}'
.format(self.objekt_type))
return self.data
elif file_format.lower() == 'xml':
xml_data = _fetch_data(self.nvdb, 'vegobjekttyper/{}.xml'
.format(self.objekt_type), file_format='xml')
return xml_data
@property
def relasjonstyper(self):
"""
:Attribute type: Dict
:keys: ['barn', 'foreldre']
:keys in keys: ['type', 'relasjonstype', 'id']
"""
if not self.data:
self._update_data()
return self.data['relasjonstyper']
def egenskapstype(self, egenskapstype_id=None):
"""
Function for returning egenskap based on id
:param egenskaps_id: Id of the property type you want returned
:type egenskaps_id: int
:returns: dict unless property is not found. Then None is returned.
"""
egenskapstype = list(
filter(lambda x: x['id'] == egenskapstype_id, self.egenskapstyper))
if len(egenskapstype):
return egenskapstype[0]
return None
@property
def egenskapstyper(self):
"""
:Attribute type: list of Dicts
:keys: ['liste', 'navn', 'datatype_tekst', 'veiledning', 'beskrivelse', 'sensitivitet',
'sosinvdbnavn', 'objektliste_dato', 'feltlengde', 'sorteringsnummer', 'id',
'styringsparametere', 'viktighet', 'viktighet_tekst', 'datatype']
"""
if not self.data:
self._update_data()
return self.data['egenskapstyper']
@property
def styringsparametere(self):
"""
:Attribute type: Dict
:keys: ['abstrakt_type', 'sideposisjon_relevant', 'retning_relevant', 'ajourhold_splitt',
'må_ha_mor', 'avledet', 'sektype_20k', 'er_dataserie', 'høyde_relevant',
'dekningsgrad', 'overlapp, 'filtrering', 'flyttbar', 'tidsrom_relevant',
'ajourhold_i', 'kjørefelt_relevant']
"""
if not self.data:
self._update_data()
return self.data['styringsparametere']
@property
def metadata(self):
"""
.. todo:: Possible bug. Returns None after reading other attributes
:Attribute type: Dict
:keys: ['navn', 'veiledning', 'beskrivelse', 'objektliste_dato', 'sosinvdbnavn',
'sorteringsnummer', 'stedfesting', 'id', 'kategorier']
"""
#if self.meta:
# return self.meta
if not self.data:
self._update_data()
metadata = self.data.copy()
del metadata['egenskapstyper']
del metadata['relasjonstyper']
del metadata['styringsparametere']
self.meta = metadata
return self.meta
@property
def barn(self):
"""
:Attribute type: list of :class:`.ObjektType`
"""
if not self.data:
self._update_data()
realasjoner = self.data['relasjonstyper']
return [ObjektType(self.nvdb, i['type']['id']) for i in realasjoner['barn']]
@property
def foreldre(self):
"""
:Attribute type: list of :class:`.ObjektType`
"""
if not self.data:
self._update_data()
realasjoner = self.data['relasjonstyper']
return [ObjektType(self.nvdb, i['type']['id']) for i in realasjoner['foreldre']]
def i_objekt_lista(self):
"""
Function checking of an object type is part of "Objektlista"
:returns: bool
"""
if not self.data:
self._update_data()
if 'objektliste_dato' in self.data:
return True
else:
return False
| mit | -8,835,561,781,146,418,000 | 31.73494 | 97 | 0.54251 | false |
leapp-to/prototype | leapp/libraries/stdlib/call.py | 1 | 9615 | from __future__ import print_function
import codecs
import os
from leapp.compat import string_types
from leapp.libraries.stdlib.eventloop import POLL_HUP, POLL_IN, POLL_OUT, POLL_PRI, EventLoop
STDIN = 0
STDOUT = 1
STDERR = 2
def _multiplex(ep, read_fds, callback_raw, callback_linebuffered,
encoding='utf-8', write=None, timeout=1, buffer_size=80):
# Register the file descriptors (stdout + stderr) with the epoll object
# so that we'll get notifications when data are ready to read
for fd in read_fds:
ep.register(fd, POLL_IN | POLL_PRI)
# Register a write file descriptor
if write:
ep.register(write[0], POLL_OUT)
# Offset into the `write[1]` buffer where we should continue writing to stdin
offset = 0
# We need to keep track of which file descriptors have already been drained
# because when running under `pytest` it seems that all `epoll` events are
# received twice so using solely `ep.unregister(fd)` will not work
hupped = set()
# Total number of 'hupped' file descriptors we expect
num_expected = len(read_fds) + (1 if write else 0)
# Set up file-descriptor specific buffers where we'll buffer the output
buf = {fd: bytes() for fd in read_fds}
if encoding:
linebufs = {fd: '' for fd in read_fds}
decoders = {fd: codecs.getincrementaldecoder(encoding)() for fd in read_fds}
def _get_fd_type(fd):
"""
File descriptors passed via `read_fds` are always representing [stdout, stderr],
since arrays start at index 0, we need to add 1 to get the real symbolic value
`STDOUT` or `STDERR`.
"""
return read_fds.index(fd) + 1
while not ep.closed and len(hupped) != num_expected:
events = ep.poll(timeout)
for fd, event in events:
if event == POLL_HUP:
hupped.add(fd)
ep.unregister(fd)
if event & (POLL_IN | POLL_PRI) != 0:
fd_type = _get_fd_type(fd)
read = os.read(fd, buffer_size)
callback_raw((fd, fd_type), read)
if encoding:
linebufs[fd] += decoders[fd].decode(read)
while '\n' in linebufs[fd]:
pre, post = linebufs[fd].split('\n', 1)
linebufs[fd] = post
callback_linebuffered((fd, fd_type), pre)
buf[fd] += read
elif event == POLL_OUT:
# Write data to pipe, `os.write` returns the number of bytes written,
# thus we need to offset
wfd, data = write
if fd in hupped:
continue
offset += os.write(fd, data[offset:])
if offset == len(data):
os.close(fd)
hupped.add(fd)
ep.unregister(fd)
# Process leftovers from line buffering
if encoding:
for (fd, lb) in linebufs.items():
if lb:
# [stdout, stderr] is relayed, stdout=1 a stderr=2
# as the field starting indexed is 0, so the +1 needs to be added
callback_linebuffered((fd, _get_fd_type(fd)), lb)
return buf
def _call(command, callback_raw=lambda fd, value: None, callback_linebuffered=lambda fd, value: None,
encoding='utf-8', poll_timeout=1, read_buffer_size=80, stdin=None, env=None):
"""
:param command: The command to execute
:type command: list, tuple
:param encoding: Decode output or encode input using this encoding
:type encoding: str
:param poll_timeout: Timeout used by epoll to wait certain amount of time for activity on file descriptors
:type poll_timeout: int
:param read_buffer_size: How much data are we going to read from the file descriptors each iteration.
The default value of 80 chosen to correspond with suggested terminal line width
:type read_buffer_size: int
:param callback_raw: Callback executed on raw data (before decoding) as they are read from file descriptors
:type callback_raw: ((fd: int, fd_type: int), buffer: bytes) -> None
:param callback_linebuffered: Callback executed on decoded lines as they are read from the file descriptors
:type callback_linebuffered: ((fd: int, fd_type: int), value: str) -> None
:param stdin: String or a file descriptor that will be written to stdin of the child process
:type stdin: int, str
:param env: Environment variables to use for execution of the command
:type env: dict
:return: {'stdout' : stdout, 'stderr': stderr, 'signal': signal, 'exit_code': exit_code, 'pid': pid}
:rtype: dict
"""
if not isinstance(command, (list, tuple)):
raise TypeError('command parameter has to be a list or tuple')
if not callable(callback_raw) or\
(getattr(callback_raw, '__code__', None) and callback_raw.__code__.co_argcount != 2):
raise TypeError('callback_raw parameter has to be callable accepting 2 parameters')
if (not callable(callback_linebuffered) or (getattr(callback_linebuffered, '__code__', None) and # noqa
callback_linebuffered.__code__.co_argcount != 2)):
raise TypeError('callback_linebuffered parameter has to be callable accepting 2 parameters')
if not isinstance(poll_timeout, int) or isinstance(poll_timeout, bool) or poll_timeout <= 0:
raise ValueError('poll_timeout parameter has to be integer greater than zero')
if not isinstance(read_buffer_size, int) or isinstance(read_buffer_size, bool) or read_buffer_size <= 0:
raise ValueError('read_buffer_size parameter has to be integer greater than zero')
environ = os.environ
if env:
if not isinstance(env, dict):
raise TypeError('env parameter has to be a dictionary')
environ.update(env)
# Create a separate pipe for stdout/stderr
#
# The parent process is going to use the read-end of the pipes for reading child's
# stdout/stderr, whereas the forked children process is going to use the write-end
# of the pipes to pass data to parent
stdout, wstdout = os.pipe()
stderr, wstderr = os.pipe()
# We allow stdin to be either a file descriptor (int) or a string and we need to handle
# each of those cases differently
#
# The case where stdin is a file descriptor is simple -- we just need to dup2() the file
# descriptor into the child process' stdin. If stdin is a string, though, the situation is
# more complicated and we need to create another pipe and write the string to the pipe
# in the _multiplex function
fstdin, wstdin = None, None
stdin_fd, stdin_str = False, False
if isinstance(stdin, int):
stdin_fd = True
elif isinstance(stdin, string_types):
stdin_str = True
fstdin, wstdin = os.pipe()
elif stdin is not None:
raise TypeError('stdin has to be either a file descriptor or string, not "{!s}"'.format(type(stdin)))
ep = EventLoop()
pid = os.fork()
if pid > 0:
# Since pid > 0, we are in the parent process, so we have to close the write-end
# file descriptors
os.close(wstdout)
os.close(wstderr)
# Extra optional arguments for the `_multiplex` function
extra = {}
if stdin_str:
# NOTE: We use the same encoding for encoding the stdin string as well which might
# be suboptimal in certain cases -- there are two possible solutions:
# 1) Rather than string require the `stdin` parameter to already be bytes()
# 2) Add another parameter for stdin_encoding
extra['write'] = (wstdin, stdin.encode(encoding))
os.close(fstdin)
read = _multiplex(
ep,
[stdout, stderr],
callback_raw,
callback_linebuffered,
timeout=poll_timeout,
buffer_size=read_buffer_size,
encoding=encoding,
**extra
)
# Wait for the child to finish
pid, status = os.wait()
ep.close()
# The status variable is a 16 bit value, where the lower octet describes
# the signal which killed the process, and the upper octet is the exit code
signal, exit_code = status & 0xff, status >> 8 & 0xff
ret = {'signal': signal, 'exit_code': exit_code, 'pid': pid}
if not encoding:
ret.update({
'stdout': read[stdout],
'stderr': read[stderr]
})
else:
ret.update({
'stdout': read[stdout].decode(encoding),
'stderr': read[stderr].decode(encoding)
})
return ret
if pid == 0:
# We are in the child process, so we need to close the read-end of the pipes
# and assign our pipe's file descriptors to stdout/stderr
#
# If `stdin` is specified as a file descriptor, we simply pass it as the stdin of the
# child. In case `stdin` is specified as a string, we pass in the read end of our
# stdin pipe
if stdin_fd:
os.dup2(stdin, STDIN)
if stdin_str:
os.close(wstdin)
os.dup2(fstdin, STDIN)
os.close(stdout)
os.close(stderr)
os.dup2(wstdout, STDOUT)
os.dup2(wstderr, STDERR)
os.execvpe(command[0], command, env=environ)
| lgpl-2.1 | -6,441,964,478,796,059,000 | 43.308756 | 115 | 0.604784 | false |
our-city-app/oca-backend | src/solutions/common/integrations/timeblockr/models.py | 1 | 1400 | # -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import TextProperty, BooleanProperty, JsonProperty, StringProperty
from rogerthat.dal import parent_ndb_key
from rogerthat.models import NdbModel
from rogerthat.rpc import users
class TimeblockrSettings(NdbModel):
url = TextProperty(default=None)
api_key = TextProperty(default=None)
enabled = BooleanProperty(default=False)
@property
def service_user(self):
return users.User(self.key.id())
@classmethod
def create_key(cls, service_user):
return ndb.Key(cls, service_user.email(), parent=parent_ndb_key(service_user))
class TimeblockrAppointment(NdbModel):
data = JsonProperty(required=True)
user_email = StringProperty(required=True)
| apache-2.0 | 6,770,343,659,073,634,000 | 32.333333 | 102 | 0.747143 | false |
mganeva/mantid | scripts/AbinsModules/IOmodule.py | 1 | 19826 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import h5py
import numpy as np
import six
import subprocess
import shutil
import hashlib
import io
import AbinsModules
import os
from mantid.kernel import logger, ConfigService
# noinspection PyMethodMayBeStatic
class IOmodule(object):
"""
Class for Abins I/O HDF file operations.
"""
def __init__(self, input_filename=None, group_name=None):
if isinstance(input_filename, str):
self._input_filename = input_filename
try:
self._hash_input_filename = self.calculate_ab_initio_file_hash()
except IOError as err:
logger.error(str(err))
except ValueError as err:
logger.error(str(err))
# extract name of file from the full path in the platform independent way
filename = os.path.basename(self._input_filename)
if filename.strip() == "":
raise ValueError("Name of the file cannot be an empty string.")
else:
raise ValueError("Invalid name of input file. String was expected.")
if isinstance(group_name, str):
self._group_name = group_name
else:
raise ValueError("Invalid name of the group. String was expected.")
core_name = filename[0:filename.rfind(".")]
save_dir_path = ConfigService.getString("defaultsave.directory")
self._hdf_filename = os.path.join(save_dir_path, core_name + ".hdf5") # name of hdf file
try:
self._advanced_parameters = self._get_advanced_parameters()
except IOError as err:
logger.error(str(err))
except ValueError as err:
logger.error(str(err))
self._attributes = {} # attributes for group
# data for group; they are expected to be numpy arrays or
# complex data sets which have the form of Python dictionaries or list of Python
# dictionaries
self._data = {}
# Fields which have a form of empty dictionaries have to be set by an inheriting class.
def _valid_hash(self):
"""
Checks if input ab initio file and content of HDF file are consistent.
:returns: True if consistent, otherwise False.
"""
saved_hash = self.load(list_of_attributes=["hash"])
return self._hash_input_filename == saved_hash["attributes"]["hash"]
def _valid_advanced_parameters(self):
"""
In case of rerun checks if advanced parameters haven't changed.
Returns: True if they are the same, otherwise False
"""
previous_advanced_parameters = self.load(list_of_attributes=["advanced_parameters"])
return self._advanced_parameters == previous_advanced_parameters["attributes"]["advanced_parameters"]
def get_previous_ab_initio_program(self):
"""
:returns: name of ab initio program which was used in the previous calculation.
"""
return self.load(list_of_attributes=["ab_initio_program"])["attributes"]["ab_initio_program"]
def check_previous_data(self):
"""
Checks if currently used ab initio file is the same as in the previous calculations. Also checks if currently
used parameters from AbinsParameters are the same as in the previous calculations.
"""
if not self._valid_hash():
raise ValueError("Different ab initio file was used in the previous calculations.")
if not self._valid_advanced_parameters():
raise ValueError("Different advanced parameters were used in the previous calculations.")
def erase_hdf_file(self):
"""
Erases content of hdf file.
"""
with h5py.File(self._hdf_filename, 'w') as hdf_file:
hdf_file.close()
def add_attribute(self, name=None, value=None):
"""
Adds attribute to the dictionary with other attributes.
:param name: name of the attribute
:param value: value of the attribute. More about attributes at: http://docs.h5py.org/en/latest/high/attr.html
"""
self._attributes[name] = value
def add_file_attributes(self):
"""
Adds file attributes: filename and hash of file to the collection of all attributes.
"""
self.add_attribute("hash", self._hash_input_filename)
self.add_attribute("filename", self._input_filename)
self.add_attribute("advanced_parameters", self._advanced_parameters)
def add_data(self, name=None, value=None):
"""
Adds data to the dictionary with the collection of other datasets.
:param name: name of dataset
:param value: value of dataset. Numpy array is expected or complex data sets which have the form of Python
dictionaries or list of Python dictionaries. More about dataset at:
http://docs.h5py.org/en/latest/high/dataset.html
"""
self._data[name] = value
def _save_attributes(self, group=None):
"""
Saves attributes to an hdf file.
:param group: group to which attributes should be saved.
"""
for name in self._attributes:
if isinstance(self._attributes[name], (np.int64, int, np.float64, float, str, bytes)):
group.attrs[name] = self._attributes[name]
else:
raise ValueError("Invalid value of attribute. String, "
"int or bytes was expected! " + name +
"= (invalid type : %s) " % type(self._attributes[name]))
def _recursively_save_structured_data_to_group(self, hdf_file=None, path=None, dic=None):
"""
Helper function for saving structured data into an hdf file.
:param hdf_file: hdf file object
:param path: absolute name of the group
:param dic: dictionary to be added
"""
for key, item in dic.items():
folder = path + key
if isinstance(item, (np.int64, int, np.float64, float, str, bytes)):
if folder in hdf_file:
del hdf_file[folder]
hdf_file[folder] = item
elif isinstance(item, np.ndarray):
if folder in hdf_file:
del hdf_file[folder]
hdf_file.create_dataset(name=folder, data=item, compression="gzip", compression_opts=9)
elif isinstance(item, dict):
self._recursively_save_structured_data_to_group(hdf_file=hdf_file, path=folder + '/', dic=item)
else:
raise ValueError('Cannot save %s type' % type(item))
def _save_data(self, hdf_file=None, group=None):
"""
Saves data in the form of numpy array, dictionary or list of dictionaries. In case data in group already exist
it will be overridden.
:param hdf_file: hdf file object to which data should be saved
:param group: group to which data should be saved.
"""
for item in self._data:
# case data to save is a simple numpy array
if isinstance(self._data[item], np.ndarray):
if item in group:
del group[item]
group.create_dataset(name=item, data=self._data[item], compression="gzip", compression_opts=9)
# case data to save has form of list
elif isinstance(self._data[item], list):
num_el = len(self._data[item])
for el in range(num_el):
self._recursively_save_structured_data_to_group(hdf_file=hdf_file,
path=group.name + "/" + item + "/%s/" % el,
dic=self._data[item][el])
# case data has a form of dictionary
elif isinstance(self._data[item], dict):
self._recursively_save_structured_data_to_group(hdf_file=hdf_file,
path=group.name + "/" + item + "/",
dic=self._data[item])
else:
raise ValueError('Invalid structured dataset. Cannot save %s type' % type(item))
def save(self):
"""
Saves datasets and attributes to an hdf file.
"""
with h5py.File(self._hdf_filename, 'a') as hdf_file:
if self._group_name not in hdf_file:
hdf_file.create_group(self._group_name)
group = hdf_file[self._group_name]
if len(self._attributes.keys()) > 0:
self._save_attributes(group=group)
if len(self._data.keys()) > 0:
self._save_data(hdf_file=hdf_file, group=group)
# Repack if possible to reclaim disk space
try:
path = os.getcwd()
temp_file = self._hdf_filename[self._hdf_filename.find(".")] + "temphgfrt.hdf5"
subprocess.check_call(["h5repack" + " -i " + os.path.join(path, self._hdf_filename) +
" -o " + os.path.join(path, temp_file)])
shutil.move(os.path.join(path, temp_file), os.path.join(path, self._hdf_filename))
except OSError:
pass # repacking failed: no h5repack installed in the system... but we proceed
except IOError:
pass
except RuntimeError:
pass
# noinspection PyMethodMayBeStatic
def _list_of_str(self, list_str=None):
"""
Checks if all elements of the list are strings.
:param list_str: list to check
:returns: True if each entry in the list is a string, otherwise False
"""
if list_str is None:
return False
if not (isinstance(list_str, list) and
all([isinstance(list_str[item], str) for item in range(len(list_str))])):
raise ValueError("Invalid list of items to load!")
return True
def _load_attributes(self, list_of_attributes=None, group=None):
"""
Loads collection of attributes from the given group.
:param list_of_attributes:
:param group: name of group
:returns: dictionary with attributes
"""
results = {}
for item in list_of_attributes:
results[item] = self._load_attribute(name=item, group=group)
return results
def _load_attribute(self, name=None, group=None):
"""
Loads attribute.
:param group: group in hdf file
:param name: name of attribute
:returns: value of attribute
"""
if name not in group.attrs:
raise ValueError("Attribute %s in not present in %s file." % (name, self._hdf_filename))
else:
return group.attrs[name]
def _load_datasets(self, hdf_file=None, list_of_datasets=None, group=None):
"""
Loads structured dataset which has a form of Python dictionary directly from an hdf file.
:param hdf_file: hdf file object from which data should be loaded
:param list_of_datasets: list with names of datasets to be loaded
:param group: name of group
:returns: dictionary with datasets
"""
results = {}
for item in list_of_datasets:
results[item] = self._load_dataset(hdf_file=hdf_file, name=item, group=group)
return results
# noinspection PyMethodMayBeStatic
def _get_subgrp_name(self, path=None):
"""
Extracts name of the particular subgroup from the absolute name.
:param path: absolute name of subgroup
:returns: name of subgroup
"""
reversed_path = path[::-1]
end = reversed_path.find("/")
return reversed_path[:end]
# noinspection PyMethodMayBeStatic
def _convert_unicode_to_string_core(self, item=None):
"""
Convert atom element from unicode to str
but only in Python 2 where unicode handling is a mess
:param item: converts unicode to item
:returns: converted element
"""
assert isinstance(item, six.text_type)
return item.encode('utf-8')
def _convert_unicode_to_str(self, object_to_check=None):
"""
Converts unicode to Python str, works for nested dicts and lists (recursive algorithm). Only required
for Python 2 where a mismatch with unicode/str objects is a problem for dictionary lookup
:param object_to_check: dictionary, or list with names which should be converted from unicode to string.
"""
if six.PY2:
if isinstance(object_to_check, list):
for i in range(len(object_to_check)):
object_to_check[i] = self._convert_unicode_to_str(object_to_check[i])
elif isinstance(object_to_check, dict):
for item in object_to_check:
if isinstance(item, six.text_type):
decoded_item = self._convert_unicode_to_string_core(item)
item_dict = object_to_check[item]
del object_to_check[item]
object_to_check[decoded_item] = item_dict
item = decoded_item
object_to_check[item] = self._convert_unicode_to_str(object_to_check[item])
# unicode element
elif isinstance(object_to_check, six.text_type):
object_to_check = self._convert_unicode_to_string_core(object_to_check)
return object_to_check
def _load_dataset(self, hdf_file=None, name=None, group=None):
"""
Loads one structured dataset.
:param hdf_file: hdf file object from which structured dataset should be loaded.
:param name: name of dataset
:param group: name of the main group
:returns: loaded dataset
"""
if not isinstance(name, str):
raise ValueError("Invalid name of the dataset.")
if name in group:
hdf_group = group[name]
else:
raise ValueError("Invalid name of the dataset.")
# noinspection PyUnresolvedReferences,PyProtectedMember
if isinstance(hdf_group, h5py._hl.dataset.Dataset):
return hdf_group.value
elif all([self._get_subgrp_name(path=hdf_group[el].name).isdigit() for el in hdf_group.keys()]):
structured_dataset_list = []
# here we make an assumption about keys which have a numeric values; we assume that always : 1, 2, 3... Max
num_keys = len(hdf_group.keys())
for item in range(num_keys):
structured_dataset_list.append(
self._recursively_load_dict_contents_from_group(hdf_file=hdf_file,
path=hdf_group.name + "/%s" % item))
return self._convert_unicode_to_str(object_to_check=structured_dataset_list)
else:
return self._convert_unicode_to_str(
object_to_check=self._recursively_load_dict_contents_from_group(hdf_file=hdf_file,
path=hdf_group.name + "/"))
def _recursively_load_dict_contents_from_group(self, hdf_file=None, path=None):
"""
Loads structure dataset which has form of Python dictionary.
:param hdf_file: hdf file object from which dataset is loaded
:param path: path to dataset in hdf file
:returns: dictionary which was loaded from hdf file
"""
ans = {}
for key, item in hdf_file[path].items():
# noinspection PyUnresolvedReferences,PyProtectedMember,PyProtectedMember
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = self._recursively_load_dict_contents_from_group(hdf_file, path + key + '/')
return ans
def load(self, list_of_attributes=None, list_of_datasets=None):
"""
Loads all necessary data.
:param list_of_attributes: list of attributes to load (list of strings with names of attributes)
:param list_of_datasets: list of datasets to load. It is a list of strings with names of datasets.
Datasets have a form of numpy arrays. Datasets can also have a form of Python
dictionary or list of Python dictionaries.
:returns: dictionary with both datasets and attributes
"""
results = {}
with h5py.File(self._hdf_filename, 'r') as hdf_file:
if self._group_name not in hdf_file:
raise ValueError("No group %s in hdf file." % self._group_name)
group = hdf_file[self._group_name]
if self._list_of_str(list_str=list_of_attributes):
results["attributes"] = self._load_attributes(list_of_attributes=list_of_attributes, group=group)
if self._list_of_str(list_str=list_of_datasets):
results["datasets"] = self._load_datasets(hdf_file=hdf_file,
list_of_datasets=list_of_datasets,
group=group)
return results
# noinspection PyMethodMayBeStatic
def _calculate_hash(self, filename=None):
"""
Calculates hash of a file defined by filename according to sha512 algorithm.
:param filename: name of a file to calculate hash (full path to the file)
:returns: string representation of hash
"""
return self._calculate_hash_core(filename=filename, coding='utf-8')
def _calculate_hash_core(self, filename=None, coding=None):
"""
Helper function for calculating hash.
:param filename: name of a file to calculate hash
:returns: string representation of hash
"""
hash_calculator = hashlib.sha512()
# chop content of a file into chunks to minimize memory consumption for hash creation
buf = AbinsModules.AbinsConstants.BUF
with io.open(file=filename, mode="rt", encoding=coding, buffering=buf, newline=None) as f:
while True:
data = f.read(buf)
if not data:
break
hash_calculator.update(data.encode(coding))
return hash_calculator.hexdigest()
def _get_advanced_parameters(self):
"""
Calculates hash of file with advanced parameters.
Returns: string representation of hash for file with advanced parameters
which contains only hexadecimal digits
"""
h = self._calculate_hash(filename=AbinsModules.AbinsParameters.__file__.replace(".pyc", ".py"))
return h
def get_input_filename(self):
return self._input_filename
def calculate_ab_initio_file_hash(self):
"""
This method calculates hash of the file with vibrational or phonon data according to SHA-2 algorithm from
hashlib library: sha512.
:returns: string representation of hash for file with vibrational data which contains only hexadecimal digits
"""
return self._calculate_hash(filename=self._input_filename)
| gpl-3.0 | 4,607,640,801,790,355,000 | 41.004237 | 119 | 0.58968 | false |
kickstandproject/ripcord | ripcord/tests/db/domain/test_create.py | 1 | 2322 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from ripcord.common import exception
from ripcord.openstack.common import uuidutils
from ripcord.tests.db import base
class TestCase(base.FunctionalTest):
def test_all_fields(self):
row = {
'disabled': True,
'id': 1,
'name': 'example.org',
'project_id': '793491dd5fa8477eb2d6a820193a183b',
'updated_at': None,
'user_id': '02d99a62af974b26b510c3564ba84644',
}
res = self.db_api.create_domain(
name=row['name'], disabled=row['disabled'],
project_id=row['project_id'], user_id=row['user_id'])
for k, v in row.iteritems():
self.assertEqual(res[k], v)
self.assertEqual(type(res['created_at']), datetime.datetime)
self.assertTrue(uuidutils.is_uuid_like(res['uuid']))
# NOTE(pabelanger): We add 3 because of created_at, uuid, and hidden
# sqlalchemy object.
self.assertEqual(len(res.__dict__), len(row) + 3)
def test_domain_already_exists(self):
row = {
'disabled': False,
'name': 'example.org',
'project_id': '793491dd5fa8477eb2d6a820193a183b',
'updated_at': None,
'user_id': '02d99a62af974b26b510c3564ba84644',
}
res = self.db_api.create_domain(
name=row['name'], disabled=row['disabled'],
project_id=row['project_id'], user_id=row['user_id'])
self.assertTrue(res)
self.assertRaises(
exception.DomainAlreadyExists,
self.db_api.create_domain,
name=row['name'], project_id=row['project_id'],
user_id=row['user_id'])
| apache-2.0 | 3,682,769,967,210,309,000 | 34.723077 | 76 | 0.625754 | false |
wbthomason/cs3240-onedir | server.py | 1 | 10957 | import os
import json
import time
from subprocess import call
from twisted.web.server import Site, NOT_DONE_YET
from twisted.internet import ssl, reactor
from twisted.web.resource import Resource
import db_access
class FileServerResource(Resource):
def __init__(self):
Resource.__init__(self)
self.db = db_access.connect()
self.putChild("user", UserResource(self.db))
self.putChild("check", CheckResource(self.db))
self.putChild("files", FileResource(self.db))
class UserResource(Resource):
def __init__(self, db):
Resource.__init__(self)
self.db = db
def getChild(self, path, request):
return self
def render_POST(self, request):
urlparts = request.path.split("/")
if urlparts[-1] == 'auth':
# Need to escape the args for security
email = request.args['email'][0]
passw = request.args['passw'][0]
print "Doing auth stuff! Got data: %s, %s" % (email, passw)
if not db_access.login(email, passw, self.db):
logstr = "%f: Failed login for %s from %s\n" % (time.time(), email, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps({'auth_key': 0})
elif urlparts[-1] == 'create':
# Same as above
email = request.args['email'][0]
passw = request.args['passw'][0]
print "Creating a user! Got data: %s, %s" % (email, passw)
db_access.create_account(email, passw, self.db)
logstr = "%f: Created %s as requested by %s\n" % (time.time(), email, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
# Assume both email and password are being changed. No change is
# accomplished by passing the same arg for new.
elif urlparts[-1] == 'update':
old_email = request.args['old_email'][0]
old_password = request.args['old_password'][0]
new_email = request.args['new_email'][0]
new_password = request.args['new_password'][0]
if db_access.login(old_email, old_password, self.db):
db_access.update_account(old_email, old_password, new_email, new_password, self.db)
call("mv " + "./files/%s" % old_email + " ./files/%s" % new_email, shell=True)
logstr = "%f: Updated from %s and %s to %s and %s from IP %s\n" % (
time.time(), old_email, old_password, new_email, new_password, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
elif urlparts[-1] == 'delete':
email = request.args['email'][0]
password = request.args['password'][0]
if db_access.login(email, password, self.db):
db_access.delete_account(email, self.db)
call("rm -rf " + "./files/%s" % email, shell=True)
logstr = "%f: Deleted %s from %s\n" % (time.time(), email, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps({'auth_key': 0})
elif urlparts[-1] == 'admin':
password = request.args['password'][0]
if db_access.login('admin', password, self.db):
command = request.args['command'][0]
if command == "users":
logstr = "%f: Admin listed users from %s\n" % (time.time(), str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps({'users': db_access.list_users(self.db)})
elif command == "files":
email = request.args['email'][0]
logstr = "%f: Admin listed users from %s\n" % (time.time(), str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps({'files': db_access.get_files(email, self.db)})
elif command == "change":
old_email = request.args['old_email'][0]
new_email = request.args['new_email'][0]
new_password = request.args['new_password'][0]
db_access.update_account(old_email, '', new_email, new_password, self.db)
logstr = "%f: Admin updated from %s to %s and %s from IP %s\n" % (
time.time(), old_email, new_email, new_password, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
elif command == "remove":
email = request.args['email'][0]
db_access.delete_account(email, self.db)
logstr = "%f: Admin deleted %s from %s\n" % (time.time(), email, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps({'auth_key': 1})
class CheckResource(Resource):
def __init__(self, db):
Resource.__init__(self)
self.db = db
def render_GET(self, request):
id = db_access.get_id(request.args['email'][0], self.db)
user = request.args['email'][0]
last_check = request.args['last_check'][0]
cur = self.db.cursor()
checker = "SELECT file FROM user_files WHERE user_id='%d' AND last_update > '%f'" % (int(id), float(last_check))
cur.execute(checker)
res = cur.fetchall()
data = [item[0] for item in res]
print data
logstr = "%f: Check request for %s from %s\n" % (time.time(), user, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
return json.dumps(data)
# May need to fix things to stream properly.
class FileResource(Resource):
def __init__(self, db):
Resource.__init__(self)
self.db = db
# Gets file specified in query string. I *think* this streams it, though I need to verify that.
def render_GET(self, request):
directory = "./files/%s/" % request.args['username'][0]
# Behind the scenes work to get versioning data
username = request.args['username'][0]
file_name_raw = request.args['filename'][0]
version = int(db_access.get_version(username, file_name_raw, self.db))
file_parts = file_name_raw.split(".")
file_parts.append(str(version))
# Python is a beautiful, terrifying language
file_name = "."
file_name = file_name.join(file_parts)
request.setHeader('Content-Length', os.stat(directory + file_name).st_size)
with open("./files/%s/%s" % (username, file_name), 'rb') as readFile:
request.write(readFile.read())
logstr = "%f: Request for %s for %s from %s\n" % (
time.time(), file_name_raw, username, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
request.finish()
return NOT_DONE_YET
# Again, I *think* this streams the file (though, now that I think about it, content.read() definitely doesn't...)
def render_PUT(self, request):
file_name_raw = request.args['filename'][0]
username = request.args['username'][0]
# Get the version number, increment it by 1, and secretly make that the file name
version = int(db_access.get_version(username, file_name_raw, self.db))
file_parts = file_name_raw.split(".")
file_parts.append(str(version + 1))
# Python is a beautful, terrifying language
file_name = "."
file_name = file_name.join(file_parts)
# Update the DB with current version
db_access.inc_version(username, file_name_raw, version, self.db)
# Because nested one-liners are great coding practice
morepath = '/'.join(file_name.split('/')[:-1])
directory = "./files/%s/" % request.args['username'][0]
full_dir = directory + morepath + '/'
if not os.path.exists(full_dir):
os.makedirs(full_dir)
with open(directory + file_name, 'wb') as writeFile:
writeFile.write(request.content.read())
cur = self.db.cursor()
user_id = int(db_access.get_id(username, self.db))
file_size = int(request.args['filesize'][0])
updated = "INSERT INTO user_files (user_id, file, size, last_update) VALUES ('%(uid)d', '%(file)s', '%(size)d', '%(time)f') " \
"ON DUPLICATE KEY UPDATE last_update='%(time)f', size='%(size)d'" \
% {'uid': user_id, 'file': file_name_raw, 'size': file_size, 'time': time.time()}
# "UPDATE user_files SET last_update='%f' WHERE file='%s' AND user_id='%d'" % (time.time(), file_name, user_id)
cur.execute(updated)
self.db.commit()
request.write('received')
logstr = "%f: %s pushed %s from %s\n" % (time.time(), username, file_name_raw, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
request.finish()
return NOT_DONE_YET
def render_DELETE(self, request):
file_name_raw = request.args['filename'][0]
username = request.args['username'][0]
cur = self.db.cursor()
user_id = int(db_access.get_id(username, self.db))
killswitch = "DELETE FROM user_files WHERE user_id='%(uid)d' AND file='%(filename)s'" % {'uid': user_id,
'filename': file_name_raw}
cur.execute(killswitch)
self.db.commit()
directory = "./files/%s/" % request.args['username'][0]
print directory + file_name_raw + '*'
call("rm -rf " + directory + file_name_raw + '*', shell=True)
logstr = "%f: %s deleted %s from %s\n" % (time.time(), username, file_name_raw, str(request.getClientIP()))
print logstr
with open('./log.txt', 'a') as log:
log.write(logstr)
request.finish()
return NOT_DONE_YET
if __name__ == "__main__":
resource = FileServerResource()
factory = Site(resource)
with open("onedirkey.crt") as keycert:
cert = ssl.PrivateCertificate.loadPEM(keycert.read())
reactor.listenTCP(3240, factory)
print "Listening on 3240."
reactor.run()
| mit | -6,860,069,166,581,717,000 | 42.137795 | 135 | 0.544401 | false |
jbrambleDC/simulacrum | simulacrum/dataset.py | 1 | 6131 | import pandas as pd
from faker import Faker
from uuid import uuid4
import logging
import numpy as np
from datetime import datetime
class DataSet:
def __init__(self, length, **kwargs):
self.data = self.create(length, **kwargs)
def get_data(self):
return self.data
def num_data(self, ty, length):
a = ty['min']
b = ty['max']
return pd.Series(np.random.uniform(a, b, length))
def num_int(self, ty, length):
a = ty['min']
b = ty['max']
return pd.Series(np.random.random_integers(a, b, length))
def norm_data(self, ty, length):
if len(ty) == 1:
return pd.Series(np.random.standard_normal(size=length))
mean = ty['mean']
sd = ty['sd']
return pd.Series(np.random.normal(mean, sd, length))
def exp_data(self, ty, length):
B = float(1) / float(ty['lam'])
return pd.Series(np.random.exponential(B, length))
def binom_data(self, ty, length):
n = ty['n']
p = ty['p']
return pd.Series(np.random.binomial(n, p, length))
def poisson_data(self, ty, length):
lam = ty['lam']
return pd.Series(np.random.poisson(lam, length))
def text_data(self, ty, length):
res = []
f = Faker()
for _ in range(0, length - 1):
res.append(f.text())
return pd.Series(res)
def name_data(self, ty, length):
res = []
f = Faker()
for _ in range(0, length - 1):
res.append(f.name())
return pd.Series(res)
def cats_data(self, ty, length):
res = []
f = Faker()
for _ in range(0, length - 1):
res.append(f.name())
return pd.Series(res)
def date_data(self, ty, length):
# TODO add error handling and validation for date strings passed
res = []
f = Faker()
begin = datetime.strptime(ty['begin'], '%Y-%m-%d')
end = datetime.strptime(ty['end'], '%Y-%m-%d')
for _ in range(0, length - 1):
res.append(f.date_time_between_dates(datetime_start=begin,
datetime_end=end))
return pd.Series(res)
def coords_data(self, ty, length):
lat_min = ty['lat_min']
lat_max = ty['lat_max']
lon_min = ty['lon_min']
lon_max = ty['lon_max']
if lat_min not in range(-90, 90) or lat_min > lat_max:
logging.error('lat ranges unacceptable; not in [-90, 90] or lat_min > lat_max')
if lon_min not in range(-180, 180) or lon_min > lon_max:
logging.error('lon ranges unacceptable; not in [-180, 180] or lon_min > lon_max')
return pd.Series(zip(np.random.uniform(lat_min, lat_max, length),
np.random.uniform(lat_min, lat_max, length)))
def address_data(self, ty, length):
res = []
f = Faker()
for _ in range(0, length - 1):
res.append(f.address())
return pd.Series(res)
def zip_data(self, ty, length):
res = []
f = Faker()
for _ in range(0, length - 1):
res.append(f.name())
return pd.Series(res)
@staticmethod
def uuid_data(ty, length):
"""
Generate a column of random uuids.
:param length: The number of uuids.
:type length: int.
:return: The column of uuids.
:rtype: pd.Series
"""
return pd.Series(list(map(lambda _: uuid4(), range(length))))
@staticmethod
def faker_data(ty, length):
"""
Generate a column based on any faker data type.
:param ty: A configuration for the faker data. Must contain faker provider and related args as dict.
:param length: The number of rows wanted.
:param ty: dict.
:param length: The number of rows wanted.
:type length: int.
:return: The column of Faker data.
:rtype: pd.Series
"""
try:
provider = ty["provider"]
del ty["provider"]
return pd.Series(list(map(lambda _: getattr(Faker(), provider)(**ty), range(length))))
except KeyError:
raise KeyError("You have to define the Faker provider.")
except AttributeError:
raise AttributeError("Faker().{}() is not a valid Faker provider.".format(provider))
def create(self, length, cols=None, types=None, coltypes=None):
series_res = {}
ops = {'num': self.num_data,
'int': self.num_int,
'norm': self.norm_data,
'exp': self.exp_data,
'bin': self.binom_data,
'pois': self.poisson_data,
'txt': self.text_data,
'name': self.name_data,
'addr': self.address_data,
'zip': self.zip_data,
'date': self.date_data,
'uuid': self.uuid_data,
'faker': self.faker_data}
if cols and types and coltypes:
logging.error('coltypes should not be defined when cols and types are defined')
if (cols and not types) or (types and not cols):
logging.error('cols and types must both be defined together, as lists')
if (cols and types):
validate_types(types)
if len(cols) != len(types):
logging.error('cols and types must be lists of equal length')
for i in len(cols):
series_res[col[i]] = ops[types[i]['type']](types[i], length)
else:
if not coltypes:
logging.error('please define either cols and types or coltypes')
# Assure iteritems compatibility throught 2.7 and 3+
try:
coltypes_items = coltypes.iteritems()
except AttributeError:
coltypes_items = coltypes.items()
for col, typ in coltypes_items:
data_builder = ops[typ['type']]
del typ['type']
series_res[col] = data_builder(typ, length)
return pd.DataFrame(series_res)
| mit | -2,031,743,940,998,872,600 | 32.140541 | 108 | 0.536617 | false |
monovertex/ygorganizer | ygo_cards/tasks/sets.py | 1 | 8666 | from __future__ import absolute_import
from celery import shared_task
from ygo_cards.models import Card, CardVersion, CardSet, UserCardVersion
from ygo_core.utils import process_string, slugify
from ygo_variables.models import Variable
import unirest
import urllib
from ygo_cards.utils import sn_has_language_code, sn_normalize
from ygo_cards.tasks.utils import output_print
import dateutil.parser
from django.db import transaction
API_SETS_LIST = 'http://yugiohprices.com/api/card_sets'
API_SET = 'http://yugiohprices.com/api/set_data/{}'
def combine_prices(a, b):
if a['status'] != 'success':
return b
elif b['status'] != 'success':
return a
elif a['status'] != 'success' and b['status'] != 'success':
return None
a = a['data']['prices']
b = b['data']['prices']
preferred_source = None
try:
a['updated_at'] = dateutil.parser.parse(a['updated_at'])
except:
preferred_source = b
try:
b['updated_at'] = dateutil.parser.parse(b['updated_at'])
except:
preferred_source = a
if preferred_source is None:
preferred_source = (a if a['updated_at'] > b['updated_at'] else b)
result = {
'status': 'success',
'data': {
'prices': {
'updated_at': preferred_source['updated_at']
}
}
}
for key in a:
result_value = None
try:
value_a = float(a[key])
except:
value_a = None
try:
value_b = float(b[key])
except:
value_b = None
if value_a is None and value_b is not None:
result_value = value_b
elif value_a is not None and value_b is None:
result_value = value_a
elif value_a is not None and value_b is not None:
if key == 'low':
result_value = min(value_a, value_b)
elif key == 'high':
result_value = max(value_a, value_b)
else:
result_value = float(preferred_source[key])
result['data']['prices'][key] = result_value
return result
@shared_task
def fetch_sets(output=output_print):
step = Variable.objects.get(identifier='fetch-sets-step')
# Fetch a list of sets and mark all sets for updating.
if step.get() == 0:
output(u' ### Fetching list of sets ### ')
created = 0
response = unirest.get(API_SETS_LIST)
if response.code == 200:
for name in response.body:
name = process_string(name)
try:
CardSet.objects.create(name=name)
created += 1
except:
pass
CardSet.objects.all().update(requires_update=True)
step.set(1)
output(u'{:d} card sets created.'.format(created))
else:
output(u'API call failed')
# Fetch individual sets.
elif step.get() == 1:
output(u' --- Fetching individual sets --- ')
limit = Variable.objects.get(identifier='fetch-sets-max').get()
sets = CardSet.objects.filter(requires_update=True)[:limit]
if len(sets):
for card_set in sets:
output(u'Fetching set {}...'.format(card_set.name))
response = unirest.get(
API_SET.format(urllib.quote(card_set.name, '')))
if (response.code != 200
or response.body['status'] != 'success'):
output(u'=!= Failed set {}.'.format(card_set.name))
card_set.with_language_code = True
for card_source in response.body['data']['cards']:
for card_version_source in card_source['numbers']:
if not sn_has_language_code(
card_version_source['print_tag']):
card_set.with_language_code = False
break
if not card_set.with_language_code:
break
new_card_versions = {}
for card_source in response.body['data']['cards']:
card = Card.find_or_create(
name=card_source['name']
)
for card_version_source in card_source['numbers']:
set_number = sn_normalize(
card_version_source['print_tag'],
card_set.with_language_code
)
rarity = slugify(card_version_source['rarity'])
if (set_number in new_card_versions and
rarity in new_card_versions[
set_number]):
new_card_versions[set_number][rarity][
'price_data'] = (combine_prices(
new_card_versions[
set_number][rarity]['price_data'],
card_version_source['price_data']))
else:
if set_number not in new_card_versions:
new_card_versions[set_number] = {}
new_card_versions[set_number][rarity] = {
'card': card
}
new_card_versions[set_number][
rarity]['price_data'] = (
card_version_source['price_data'])
new_card_versions_pks = []
for set_number, rarities in new_card_versions.iteritems():
for rarity, data in rarities.iteritems():
card_version = CardVersion.find_or_create(
set_number=set_number,
card=data['card'],
card_set=card_set,
rarity=rarity
)
new_card_versions_pks.append(card_version.pk)
data['card_version'] = card_version
if (data['price_data'] and
data['price_data']['status'] == 'success'):
card_version.set_prices(data['price_data'])
else:
card_version.clear_prices()
junk_card_versions = (
CardVersion.objects
.filter(card_set=card_set)
.exclude(pk__in=new_card_versions_pks)
.prefetch_related('user_card_versions',
'user_card_versions__user')
.select_related('rarity')
.distinct())
for card_version in junk_card_versions:
set_number = sn_normalize(
card_version.set_number,
card_set.with_language_code
)
rarity = unicode(card_version.rarity.identifier)
try:
actual_card_version = new_card_versions[set_number][
rarity]['card_version']
except:
try:
actual_card_version = new_card_versions[
set_number].itervalues().next()['card_version']
except:
card_version.dirty = True
card_version.save()
continue
with transaction.atomic():
for item in card_version.user_card_versions.all():
try:
user_card_version = (
UserCardVersion.objects
.get(card_version=card_version,
user=item.user))
user_card_version.have_count += item.have_count
user_card_version.save()
except:
item.card_version = actual_card_version
item.save()
card_version.delete()
card_set.requires_update = False
card_set.save()
output(u'Fetched.')
else:
step.set(0)
| mit | 2,607,918,986,943,571,500 | 34.371429 | 79 | 0.452804 | false |
MichaelMGonzalez/MagneticFieldLocalization | SerialCommunication/PIDLerner.py | 1 | 4531 | from Communicator import *
from PID_Q_Lerner import *
import json
import os
import time
import atexit
t_fmt = "{00:g}"
inf = float("inf")
stable_factor = 40
timeout = 13
timeout_penalty = 100
count_until = 2
class PIDLerner:
def __init__(self, learning_factor = .1):
self.arduino = SerialComm()
self.log_file = "data_log_r10.json"
self.state = {}
self.ittr = 0
self.reset()
self.global_start = time.time()
self.times = []
self.learning_space = SearchSpace(res = 10 )
if os.path.exists( self.log_file ):
self.learning_space.load_from_file( self.log_file )
self.curr_node = self.learning_space.get_random_node()
self.get_next_node()
self.l_f = learning_factor
self.best_time = inf
self.best_t_coord = None
def reset(self):
self.stable_t = inf
self.sim_start = time.time()
self.r_o_f = inf
self.l_o_f = inf
self.p_f = inf
self.r_t = self.sim_start
self.l_t = self.sim_start
# Reset Arduino
self.arduino.write("STOP", None)
time.sleep(.2)
self.arduino.write("MOVE_FORWARD", None)
self.state["REPORTING_STATE"] = 0
#for k in self.state: self.state[k] = inf
def print_state(self):
if os.name != "nt":
os.system("clear")
global_t = int(time.time() - self.global_start)
s = str(global_t % 60).zfill(2)
m = str((global_t / 60)%60).zfill(2)
h = str(global_t / 3600)
print "\nUptime:", str( h + ":" + m + ":" + s )
print "\nIteration:", str(self.ittr)
t = time.time() - self.sim_start
s_t = t - self.stable_t
if s_t == t: s_t = 0
print "\nRobot Readings\n"
print "BAUD RATE: ", self.arduino.baud_rate, "\n"
for s in sorted(self.state): print s, self.state[s]
print "\nOscillation Factor:\n"
print "Right Wheel Oscillation Factor:", self.r_o_f
print "Left Wheel Oscillation Factor:", self.l_o_f
print "Product Factor:", self.p_f
print "Stable for", s_t, "seconds"
print "Time:", t
#print "Times Collected:", self.times
#print self.learning_space
print "\nBest Time", self.best_time
print "Best Time Observed at: ", str( self.best_t_coord )
def set_new_pd_vals( self, msg_delay=.3 ):
self.reset()
n = self.curr_node
self.arduino.write("SET_P", float(n.p))
time.sleep(msg_delay)
self.arduino.write("SET_D", float(n.d))
time.sleep(msg_delay)
def q_update( self, reward ):
self.ittr += 1
self.curr_node.times.append(reward)
e = self.prev_edge
e.weight = ( 1.0 - self.l_f ) * e.weight
e.weight += self.l_f * reward
self.get_next_node()
self.set_new_pd_vals()
self.print_state()
self.learning_space.dump_to_file( self.log_file )
def get_next_node( self ):
self.prev_edge = self.curr_node.get_min_edge()
self.curr_node = self.prev_edge.other
def check_threshold( self, t ):
# Has the threshold been reached?
if abs(self.p_f) < stable_factor:
if not self.stable_t: self.stable_t = t
if t-self.stable_t > count_until:
self.times.append( t )
if t < self.best_time:
self.best_time = t
self.best_t_coord = self.curr_node
self.q_update(t)
else: self.stable_t = 0
def run_lerner(self):
has_run = False
try:
state = self.state
while True:
self.learning_space.active = self.curr_node
t = time.time() - self.sim_start
if t > timeout: self.q_update( timeout_penalty )
self.p_f = (self.l_o_f * self.r_o_f)
self.check_threshold(t)
v = None
if self.arduino.communicator.inWaiting():
v = self.arduino.read()
if v:
if not has_run:
self.set_new_pd_vals()
self.arduino.write("MOVE_FORWARD", None)
has_run = True
msg,val = v
# Has state changed?
if msg in state and val == state[msg]: continue
if str(msg) == "REPORTING_R_CONTROL" and msg in state:
dt = t - self.r_t
self.r_t = t
self.r_o_f = (float(val) - float(state[msg]))/dt
if str(msg) == "REPORTING_L_CONTROL" and msg in state:
dt = t - self.l_t
self.l_t = t
self.l_o_f = (float(val) - float(state[msg]))/dt
state[msg] = val
self.print_state()
except KeyboardInterrupt:
print "Exiting..."
finally:
self.arduino.write("STOP", None)
self.arduino.close()
self.learning_space.dump_to_file( self.log_file )
if __name__ == "__main__":
lerner = PIDLerner()
lerner.run_lerner()
| gpl-3.0 | -9,066,643,630,799,897,000 | 30.685315 | 63 | 0.590598 | false |
quarkslab/arybo | benchs/cmp.py | 1 | 1673 | #!/usr/bin/env python3
#
import sys
if len(sys.argv) <= 2:
print("Usage: %s ref new" % sys.argv[0])
sys.exit(1)
reff = sys.argv[1]
newf = sys.argv[2]
class BenchRes:
def __init__(self, name, time_ms, mem_mb):
self.name = name
self.time_ms = time_ms
self.mem_mb = mem_mb
def __repr__(self):
return "%s\t%0.2f\t%0.2f" % (self.name, self.time_ms, self.mem_mb)
def str_res(self):
return "%0.2f\t%0.2f" % (self.time_ms, self.mem_mb)
def read_benchs(f):
fd = open(f, "r")
ret = list()
for l in fd:
l = l.strip().split('\t')
br = BenchRes(l[0], float(l[1]), float(l[2]))
ret.append(br)
ret = sorted(ret, key=lambda r: r.name)
return ret
def gain(old, new):
return old/new
def gain_time(old, new):
return gain(old.time_ms, new.time_ms)
def gain_mem(old, new):
return gain(old.mem_mb, new.mem_mb)
ref = read_benchs(reff)
new = read_benchs(newf)
#print(ref)
#print(new)
print("name\ttime_old\ttime_new\ttime_gain\tmem_old\tmem_new\tmem_reduction")
iref = 0
inew = 0
while iref < len(ref) and inew < len(new):
br_o = ref[iref]
br_n = new[inew]
if br_o.name == br_n.name:
print("%s\t%0.2f\t%0.2f\t%0.2f\t%0.2f\t%0.2f\t%0.2f" % (br_o.name, br_o.time_ms, br_n.time_ms, gain_time(br_o, br_n), br_o.mem_mb, br_n.mem_mb, gain_mem(br_o, br_n)))
iref += 1
inew += 1
elif br_o.name < br_n.name:
print("%s\t%0.2f\tNA\tNA\t%0.2f\tNA\tNA" % (br_o.name, br_o.time_ms, br_o.mem_mb))
iref += 1
else:
print("%s\tNA\t%0.2f\tNA\tNA\t%0.2f\tNA" % (br_n.name, br_n.time_ms, br_n.mem_mb))
inew += 1
| bsd-3-clause | 1,771,513,749,114,655,000 | 24.348485 | 174 | 0.558279 | false |
lab11/M-ulator | platforms/HT_m3/programming/mbus_message.py | 1 | 2878 | #!/usr/bin/python
import sys
import logging
from m3_common import m3_common
m3_common.configure_root_logger()
logger = logging.getLogger(__name__)
class mbus_message_generator(m3_common):
TITLE = "MBus Message Generator"
def parse_args(self):
if len(sys.argv) not in (2,):
logger.info("USAGE: %s SERAIL_DEVICE\n" % (sys.argv[0]))
logger.info("")
sys.exit(2)
self.serial_path = sys.argv[1]
def install_handler(self):
self.ice.msg_handler['B++'] = self.Bpp_callback
self.ice.msg_handler['b++'] = self.Bpp_callback
def Bpp_callback(self, address, data, broadcast, success):
logger.info("")
logger.info("Received MBus message:")
logger.info(" address: " + address.encode('hex'))
logger.info(" data: " + data.encode('hex'))
logger.info("broadcast: " + str(broadcast))
logger.info(" success: " + str(success))
logger.info("")
def read_binfile(self):
pass
def set_master(self):
self.ice.mbus_set_master_onoff(True)
def set_slave(self):
self.ice.mbus_set_master_onoff(False)
m = mbus_message_generator()
m3_common.do_default("Run power-on sequence", m.power_on)
m3_common.do_default("Reset M3", m.reset_m3)
m3_common.do_default("Act as MBus master", m.set_master, m.set_slave)
def build_mbus_message():
logging.info("Build your MBus message. All values hex. Leading 0x optional. Ctrl-C to Quit.")
addr = m3_common.default_value("Address ", "0xA5").replace('0x','').decode('hex')
data = m3_common.default_value(" Data", "0x12345678").replace('0x','').decode('hex')
return add, data
def get_mbus_message_to_send():
logging.info("Which message would you like to send?")
logging.info("\t0) Custom")
logging.info("\t1) Enumerate (0xF0000000, 0x24000000)")
logging.info("\t2) SNS Config Bits (0x40, 0x0423dfef)")
logging.info("\t2) SNS Sample Setup (0x40, 0x030bf0f0)")
logging.info("\t3) SNS Sample Start (0x40, 0x030af0f0)")
selection = m3_common.default_value("Choose a message type", "-1")
if selection == '0':
return build_mbus_message()
elif selection == '1':
return ("F0000000".decode('hex'), "24000000".decode('hex'))
elif selection == '2':
return ("40".decode('hex'), "0423dfef".decode('hex'))
elif selection == '3':
return ("40".decode('hex'), "030bf0f0".decode('hex'))
elif selection == '4':
return ('40'.decode('hex'), '030af0f0'.decode('hex'))
else:
logging.info("Please choose one of the numbered options")
return get_mbus_message_to_send()
while True:
try:
addr, data = get_mbus_message_to_send()
m.ice.mbus_send(addr, data)
except KeyboardInterrupt:
break
logging.info('')
logging.info("Exiting.")
| gpl-3.0 | -2,066,246,356,386,461,400 | 32.465116 | 97 | 0.615705 | false |
seankelly/buildbot | master/buildbot/test/unit/test_reporters_pushover.py | 1 | 3930 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
from unittest import SkipTest
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.process.properties import Interpolate
from buildbot.process.results import SUCCESS
from buildbot.reporters.pushover import PushoverNotifier
from buildbot.test.fake import fakemaster
from buildbot.test.fake import httpclientservice as fakehttpclientservice
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.util import httpclientservice
class TestPushoverNotifier(ConfigErrorsMixin, unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master(testcase=self,
wantData=True, wantDb=True, wantMq=True)
def setupFakeHttp(self):
return self.successResultOf(fakehttpclientservice.HTTPClientService.getFakeService(
self.master, self, 'https://api.pushover.net'))
@defer.inlineCallbacks
def setupPushoverNotifier(self, user_key="1234", api_token=Interpolate("abcd"), **kwargs):
pn = PushoverNotifier(user_key, api_token, **kwargs)
yield pn.setServiceParent(self.master)
yield pn.startService()
defer.returnValue(pn)
@defer.inlineCallbacks
def test_sendMessage(self):
_http = self.setupFakeHttp()
pn = yield self.setupPushoverNotifier(priorities={'passing': 2})
_http.expect("post", "/1/messages.json",
params={'user': "1234", 'token': "abcd",
'message': "Test", 'title': "Tee", 'priority': 2},
content_json={'status': 1, 'request': '98765'})
n = yield pn.sendMessage(body="Test", subject="Tee", results=SUCCESS)
j = yield n.json()
self.assertEqual(j['status'], 1)
self.assertEqual(j['request'], '98765')
@defer.inlineCallbacks
def test_sendNotification(self):
_http = self.setupFakeHttp()
pn = yield self.setupPushoverNotifier(otherParams={'sound': "silent"})
_http.expect("post", "/1/messages.json",
params={'user': "1234", 'token': "abcd",
'sound': "silent", 'message': "Test"},
content_json={'status': 1, 'request': '98765'})
n = yield pn.sendNotification({'message': "Test"})
j = yield n.json()
self.assertEqual(j['status'], 1)
self.assertEqual(j['request'], '98765')
@defer.inlineCallbacks
def test_sendRealNotification(self):
creds = os.environ.get('TEST_PUSHOVER_CREDENTIALS')
if creds is None:
raise SkipTest("real pushover test runs only if the variable "
"TEST_PUSHOVER_CREDENTIALS is defined")
user, token = creds.split(':')
_http = yield httpclientservice.HTTPClientService.getService(
self.master, 'https://api.pushover.net')
yield _http.startService()
pn = yield self.setupPushoverNotifier(user_key=user, api_token=token)
n = yield pn.sendNotification({'message': "Buildbot Pushover test passed!"})
j = yield n.json()
self.assertEqual(j['status'], 1)
| gpl-2.0 | 4,242,439,390,941,125,600 | 42.666667 | 94 | 0.66285 | false |
eunchong/build | scripts/slave/ios/host_info.py | 1 | 4450 | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print information about the tools present on this machine.
Usage:
./host_info.py -j /tmp/out.json
Writes a json dictionary containing tools information.
"""
import argparse
import json
import multiprocessing
import os
import platform
import sys
from slave.ios import utils
def check_for_tools():
"""Checks for the presence of some required tools.
Returns:
A list of tools present, a list of tools missing.
"""
available = []
missing = []
# A list of tools that should be present in PATH.
tools = [
utils.PLIST_BUDDY,
]
def try_call(binary):
try:
utils.call(binary)
available.append(binary)
except OSError:
missing.append(binary)
for tool in tools:
try_call(tool)
return available, missing
def extract_xcode_version(out):
"""Extracts Xcode version information from the given xcodebuild output.
Args:
out: List of lines emitted by an xcodebuild -version call.
Returns:
A 2-tuple of (Xcode Version, Xcode Build Version).
"""
# Sample output:
# Xcode 5.0
# Build version 5A1413
ver = None
build_ver = None
if len(out) > 0:
if ' ' in out[0]:
ver = out[0].split()[-1]
if len(out) > 1:
if ' ' in out[1]:
build_ver = out[1].split()[-1]
return ver, build_ver
def extract_sdks(out):
"""Extracts Xcode SDK information from the given xcodebuild output.
Args:
out: List of lines emitted by an xcodebuild -showsdks call.
Returns:
A list of valid parameters to xcodebuild -sdk.
"""
# Sample output:
# OS X SDKs:
# Mac OS X 10.6 -sdk macosx10.6
# OS X 10.8 -sdk macosx10.8
#
# iOS SDKs:
# iOS 7.0 -sdk iphoneos7.0
#
# iOS Simulator SDKs:
# Simulator - iOS 6.1 -sdk iphonesimulator6.1
# Simulator - iOS 7.0 -sdk iphonesimulator7.0
return [line.split('-sdk')[-1].strip() for line in out if '-sdk' in line]
def get_free_disk_space():
"""Returns the amount of free space on the current disk, in GiB.
Returns:
The amount of free space on the current disk, measured in GiB.
"""
# Stat the current path for info on the current disk.
stat = os.statvfs('.')
# Multiply block size by number of free blocks, express in GiB.
return stat.f_frsize * stat.f_bavail / 1024.0 / 1024.0 / 1024.0
def get_num_cpus():
"""Returns the number of logical CPUs on this machine.
Returns:
The number of logical CPUs on this machine, or 'unknown' if indeterminate.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 'unknown'
def get_python_version():
"""Returns the version of Python running this script.
Returns:
A Python version string.
"""
return platform.python_version()
def get_python_location():
"""Returns the location of the Python interpreter running this script.
Returns:
The full path to the current Python interpreter.
"""
return sys.executable
def get_osx_version():
"""Returns the version of Mac OS X installed on this host.
Returns:
The Mac version string, or the empty string if this host is not a Mac.
"""
return platform.mac_ver()[0]
def main(json_file):
"""Extracts information about the tools present on this host.
Args:
json_file: File to write JSON containing the tools information.
"""
info = {
}
info['Xcode Version'], info['Xcode Build Version'] = extract_xcode_version(
utils.call('xcodebuild', '-version').stdout)
info['Xcode SDKs'] = extract_sdks(
utils.call('xcodebuild', '-showsdks').stdout)
info['Free Space'] = get_free_disk_space()
info['Logical CPUs'] = get_num_cpus()
info['Python Version'] = get_python_version()
info['Python Location'] = get_python_location()
info['Mac OS X Version'] = get_osx_version()
info['Available Tools'], info['Missing Tools'] = check_for_tools()
if json_file:
with open(json_file, 'w') as json_file:
json.dump(info, json_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-j',
'--json-file',
help='Location to write a JSON summary.',
metavar='file',
type=str,
)
sys.exit(main(parser.parse_args().json_file))
| bsd-3-clause | -480,386,654,096,618,500 | 22.670213 | 78 | 0.650562 | false |
Secure-Trading/PythonAPI | securetrading/__init__.py | 1 | 1328 | # Secure Trading Python API
# Authors: Secure Trading Ltd
# Configuration variables
from __future__ import unicode_literals
from .requestobject import Request
from .requestobject import Requests
from .responseobject import Response
from .exceptions import SecureTradingError
from .exceptions import ApiError
from .exceptions import HttpError
from .exceptions import ConnectionError
from .exceptions import SendReceiveError
from .converter import Converter
from .config import Config
from .api import Api
from .phrasebook import PhraseBook
import securetrading.util
import pkgutil
import platform
dataFile = 'data/errormessages.json'
data = pkgutil.get_data('securetrading', dataFile).decode("utf-8")
error_messages = securetrading.util.json.loads(data)
dataFile = 'data/phrasebook.json'
data = pkgutil.get_data('securetrading', dataFile).decode("utf-8")
phrase_book = securetrading.util.json.loads(data)
__title__ = 'Secure Trading Python API'
__version__ = "1.0.16"
__author__ = 'Secure Trading Ltd'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Secure Trading Ltd'
version_information = ["Python",
platform.python_version(),
securetrading.__version__,
platform.platform(),
]
version_info = "::".join(version_information)
| mit | -7,403,021,655,991,908,000 | 29.883721 | 66 | 0.723645 | false |
trol73/avr-ic-tester | scripts/compiler.py | 1 | 7790 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from analyser import Analyser
from generator import DataGenerator, convert_pin
from ic_parser import load_line
from classes import Chip
OPTIMIZE_CMD_ALL = True # использовать CMD_SET_ALL вместо CMD_SET
OPTIMIZE_CMD_TEST = True # использовать CMD_TEST_ALL вместо CMD_TEST везде, где это возможно
OPTIMIZE_LAST_PULSE = True # использовать команду CMD_LAST_PULSE везде, где это возможно
OPTIMIZE_SET_AND_TEST = True # использовать команду CMD_SET_AND_TEST вместо сочетания CMD_SET_ALL + CMD_TEST
OPTIMIZE_LAST_PULSE_AND_TEST = True # использовать команду CMD_LAST_PULSE_AND_TEST вместо сочетания CMD_LAST_PULSE + CMD_TEST
__author__ = 'trol'
SRC_TTL = 'data_ttl.ic'
OUT_TTL = '../ic-tester/data_ttl.h'
SRC_CMOS = 'data_cmos.ic'
OUT_CMOS = '../ic-tester/data_cmos.h'
#if len(sys.argv) == 2:
# src = sys.argv[1]
def compile_chip(chip, g):
"""
Компилирует данные для микросхемы
:param g:
"""
analyser = Analyser(chip.pins, chip.name)
g.add_chip(chip.name)
first_command_index = len(g.commands) - 1
#g.add_command('CMD_RESET_FULL')
inputs = chip.inputs
for power in chip.powerPlus:
inputs.append(power)
for power in chip.powerMinus:
inputs.append(power)
g.add_command_mask_1('CMD_INIT', inputs, chip.pins)
analyser.set_ddr(inputs)
# команды
for cmd in chip.commands:
if cmd.name == 'set':
pins0 = cmd.lst0
for power in chip.powerMinus:
pins0.append(power)
pins1 = cmd.lst1
for power in chip.powerPlus:
pins1.append(power)
for pullUp in chip.pullUpOutputs:
pins1.append(pullUp)
analyser.set_pins_to_0(pins0)
analyser.set_pins_to_1(pins1)
if OPTIMIZE_CMD_ALL:
g.add_command_mask_1('CMD_SET_ALL', analyser.get_levels_mask(), chip.pins, 1)
else:
g.add_command_mask_2('CMD_SET', pins0, pins1, chip.pins)
elif cmd.name == 'test':
if OPTIMIZE_CMD_TEST:
optimized_mask = analyser.get_test_all_mask(cmd.lst0, cmd.lst1)
else:
optimized_mask = None
if optimized_mask is None:
g.add_command_mask_2('CMD_TEST', cmd.lst0, cmd.lst1, chip.pins)
else:
g.add_command_mask_1('CMD_TEST_ALL', optimized_mask, chip.pins, 1)
elif cmd.name == 'set+test':
pins0 = cmd.lst0
for power in chip.powerMinus:
pins0.append(power)
pins1 = cmd.lst1
for power in chip.powerPlus:
pins1.append(power)
for pullUp in chip.pullUpOutputs:
pins1.append(pullUp)
analyser.set_pins_to_0(pins0)
analyser.set_pins_to_1(pins1)
if OPTIMIZE_CMD_ALL:
g.add_command_mask_1('CMD_SET_ALL', analyser.get_levels_mask(), chip.pins, 1)
else:
g.add_command_mask_2('CMD_SET', pins0, pins1, chip.pins)
if OPTIMIZE_CMD_TEST:
optimized_mask = analyser.get_test_all_mask(cmd.lst0_2, cmd.lst1_2)
else:
optimized_mask = None
if optimized_mask is None:
g.add_command_mask_2('CMD_TEST', cmd.lst0_2, cmd.lst1_2, chip.pins)
else:
g.add_command_mask_1('CMD_TEST_ALL', optimized_mask, chip.pins, 1)
elif cmd.name == 'pulse+':
if OPTIMIZE_LAST_PULSE and analyser.pulse(cmd.pin, '+'):
g.add_command('CMD_LAST_PULSE')
else:
g.add_command('CMD_PULSE_PLUS', convert_pin(cmd.pin, chip.pins, 28))
elif cmd.name == 'pulse-':
if OPTIMIZE_LAST_PULSE and analyser.pulse(cmd.pin, '-'):
g.add_command('CMD_LAST_PULSE')
else:
g.add_command('CMD_PULSE_MINUS', convert_pin(cmd.pin, chip.pins, 28))
elif cmd.name == 'config':
inputs = cmd.lst0
for power in chip.powerPlus:
inputs.append(power)
for power in chip.powerMinus:
inputs.append(power)
chip.inputs = cmd.lst0
chip.outputs = cmd.lst1
g.add_command_mask_1('CMD_INIT', inputs, chip.pins)
analyser.set_ddr(inputs)
elif cmd.name == 'test-z':
pins = cmd.lst1
g.add_command_mask_1('CMD_TEST_Z', pins, chip.pins)
elif cmd.name == 'test-oc':
pins = cmd.lst1
g.add_command_mask_1('CMD_TEST_OC', pins, chip.pins)
elif cmd.name == 'repeat-pulse':
g.add_command('CMD_REPEAT_PULSE', cmd.value & 0xff, (cmd.value >> 8) & 0xff)
g.add_command('CMD_END')
# проходимся по всем команам этой МС и выполняем оптимизации
while True:
optimized = False
for i in range(first_command_index, len(g.commands)):
cmd = g.commands[i]
if isinstance(cmd, (list, tuple)):
cmd_name = cmd[0]
else:
continue
if i+1 < len(g.commands):
cmd_next = g.commands[i+1]
cmd_next_name = cmd_next[0]
else:
break
#print cmd_name, cmd_next_name
if OPTIMIZE_SET_AND_TEST and cmd_name.startswith('CMD_SET_ALL_') and cmd_next_name.startswith('CMD_TEST_ALL_'):
optimized = True
#print g.commands[i]
g.commands[i][0] = 'CMD_SET_ALL_AND_TEST_' + cmd_next_name[len('CMD_TEST_ALL_'):]
for j in range(1, len(cmd_next)):
g.commands[i].append(cmd_next[j])
#print g.commands[i]
del g.commands[i+1]
break
if OPTIMIZE_LAST_PULSE_AND_TEST and cmd_name == 'CMD_LAST_PULSE' and cmd_next_name.startswith('CMD_TEST_ALL_'):
g.commands[i+1][0] = 'CMD_LAST_PULSE_AND_TEST_' + cmd_next_name[len('CMD_TEST_ALL_'):]
#print g.commands[i+1]
del g.commands[i]
optimized = True
break
# CMD_SET_ALL_16, CMD_TEST_ALL_16 -> CMD_SET_AND_TEST_ALL
# CMD_LAST_PULSE, CMD_TEST_ALL_16 -> CMD_LAST_PULSE_AND_TEST_ALL
if not optimized:
break
#first_command_index
def process(src, out, suffix):
print 'compile', src, 'to', out
chips = []
# загружаем файл
f = open(src, 'r')
for s in f:
s = s.strip()
l = len(s)
if l == 0:
continue
if l == 1 and (s[0] == '\n' or s[0] == '\r'):
continue
if s[0] == '#':
continue
if s[l - 1] == '\n':
s = s[:l - 1]
if s.startswith('CHIP['):
chip = Chip()
load_line(chip, s)
chips.append(chip)
else:
load_line(chips[len(chips) - 1], s)
f.close()
g = DataGenerator(suffix)
for chip in chips:
#chip.show()
compile_chip(chip, g)
g.generate(out)
print '-------------[Chips]--------------------'
for chip in chips:
print chip.name.decode('cp1251').encode('utf8')
print '----------------------------------------'
print 'Total chips: ', len(chips)
print 'Data size: ', g.size
process(SRC_TTL, OUT_TTL, 'TTL')
process(SRC_CMOS, OUT_CMOS, 'CMOS')
| gpl-3.0 | -7,507,544,284,092,070,000 | 31.877729 | 125 | 0.530881 | false |
john-mcnamara-intel/dpdk | usertools/dpdk-devbind.py | 2 | 28205 | #! /usr/bin/env python
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
#
from __future__ import print_function
import sys
import os
import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
# The PCI base class for all devices
network_class = {'Class': '02', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
acceleration_class = {'Class': '12', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
ifpga_class = {'Class': '12', 'Vendor': '8086', 'Device': '0b30',
'SVendor': None, 'SDevice': None}
encryption_class = {'Class': '10', 'Vendor': None, 'Device': None,
'SVendor': None, 'SDevice': None}
intel_processor_class = {'Class': '0b', 'Vendor': '8086', 'Device': None,
'SVendor': None, 'SDevice': None}
cavium_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a04b,a04d',
'SVendor': None, 'SDevice': None}
cavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',
'SVendor': None, 'SDevice': None}
cavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',
'SVendor': None, 'SDevice': None}
cavium_tim = {'Class': '08', 'Vendor': '177d', 'Device': 'a051',
'SVendor': None, 'SDevice': None}
cavium_zip = {'Class': '12', 'Vendor': '177d', 'Device': 'a037',
'SVendor': None, 'SDevice': None}
avp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',
'SVendor': None, 'SDevice': None}
octeontx2_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa',
'SVendor': None, 'SDevice': None}
octeontx2_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc',
'SVendor': None, 'SDevice': None}
octeontx2_dma = {'Class': '08', 'Vendor': '177d', 'Device': 'a081',
'SVendor': None, 'SDevice': None}
intel_ioat_bdw = {'Class': '08', 'Vendor': '8086', 'Device': '6f20,6f21,6f22,6f23,6f24,6f25,6f26,6f27,6f2e,6f2f',
'SVendor': None, 'SDevice': None}
intel_ioat_skx = {'Class': '08', 'Vendor': '8086', 'Device': '2021',
'SVendor': None, 'SDevice': None}
intel_ntb_skx = {'Class': '06', 'Vendor': '8086', 'Device': '201c',
'SVendor': None, 'SDevice': None}
network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
baseband_devices = [acceleration_class]
crypto_devices = [encryption_class, intel_processor_class]
eventdev_devices = [cavium_sso, cavium_tim, octeontx2_sso]
mempool_devices = [cavium_fpa, octeontx2_npa]
compress_devices = [cavium_zip]
misc_devices = [intel_ioat_bdw, intel_ioat_skx, intel_ntb_skx, octeontx2_dma]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
devices = {}
# list of supported DPDK drivers
dpdk_drivers = ["igb_uio", "vfio-pci", "uio_pci_generic"]
# list of currently loaded kernel modules
loaded_modules = None
# command-line arg flags
b_flag = None
status_flag = False
force_flag = False
args = []
def usage():
'''Print usage information for the program'''
argv0 = basename(sys.argv[0])
print("""
Usage:
------
%(argv0)s [options] DEVICE1 DEVICE2 ....
where DEVICE1, DEVICE2 etc, are specified via PCI "domain:bus:slot.func" syntax
or "bus:slot.func" syntax. For devices bound to Linux kernel drivers, they may
also be referred to by Linux interface name e.g. eth0, eth1, em0, em1, etc.
Options:
--help, --usage:
Display usage information and quit
-s, --status:
Print the current status of all known network, crypto, event
and mempool devices.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
driver, other relevant information will be displayed:
* the Linux interface name e.g. if=eth0
* the driver being used e.g. drv=igb_uio
* any suitable drivers not currently using that device
e.g. unused=igb_uio
NOTE: if this flag is passed along with a bind/unbind option, the
status display will always occur after the other operations have taken
place.
--status-dev:
Print the status of given device group. Supported device groups are:
"net", "baseband", "crypto", "event", "mempool" and "compress"
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
-u, --unbind:
Unbind a device (Equivalent to \"-b none\")
--force:
By default, network devices which are used by Linux - as indicated by
having routes in the routing table - cannot be modified. Using the
--force flag overrides this behavior, allowing active links to be
forcibly unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
Examples:
---------
To display current device status:
%(argv0)s --status
To display current network device status:
%(argv0)s --status-dev net
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
To unbind 0000:01:00.0 from using any driver
%(argv0)s -u 0000:01:00.0
To bind 0000:02:00.0 and 0000:02:00.1 to the ixgbe kernel driver
%(argv0)s -b ixgbe 02:00.0 02:00.1
""" % locals()) # replace items from local variables
# This is roughly compatible with check_output function in subprocess module
# which is only available in python 2.7.
def check_output(args, stderr=None):
'''Run a command and capture its output'''
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=stderr).communicate()[0]
# check if a specific kernel module is loaded
def module_is_loaded(module):
global loaded_modules
if loaded_modules:
return module in loaded_modules
# Get list of sysfs modules (both built-in and dynamically loaded)
sysfs_path = '/sys/module/'
# Get the list of directories in sysfs_path
sysfs_mods = [m for m in os.listdir(sysfs_path)
if os.path.isdir(os.path.join(sysfs_path, m))]
# special case for vfio_pci (module is named vfio-pci,
# but its .ko is named vfio_pci)
sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]
loaded_modules = sysfs_mods
return module in sysfs_mods
def check_modules():
'''Checks that igb_uio is loaded'''
global dpdk_drivers
# list of supported modules
mods = [{"Name": driver, "Found": False} for driver in dpdk_drivers]
# first check if module is loaded
for mod in mods:
if module_is_loaded(mod["Name"]):
mod["Found"] = True
# check if we have at least one loaded module
if True not in [mod["Found"] for mod in mods] and b_flag is not None:
print("Warning: no supported DPDK kernel modules are loaded", file=sys.stderr)
# change DPDK driver list to only contain drivers that are loaded
dpdk_drivers = [mod["Name"] for mod in mods if mod["Found"]]
def has_driver(dev_id):
'''return true if a device is assigned to a driver. False otherwise'''
return "Driver_str" in devices[dev_id]
def get_pci_device_details(dev_id, probe_lspci):
'''This function gets additional details for a PCI device'''
device = {}
if probe_lspci:
extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
# parse lspci details
for line in extra_info:
if len(line) == 0:
continue
name, value = line.decode().split("\t", 1)
name = name.strip(":") + "_str"
device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
if "net" in dirs:
device["Interface"] = \
",".join(os.listdir(os.path.join(base, "net")))
break
# check if a port is used for ssh connection
device["Ssh_if"] = False
device["Active"] = ""
return device
def clear_data():
'''This function clears any old data'''
global devices
devices = {}
def get_device_details(devices_type):
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
# first loop through and read details for all devices
# request machine readable format, with numeric IDs and String
dev = {}
dev_lines = check_output(["lspci", "-Dvmmnnk"]).splitlines()
for dev_line in dev_lines:
if len(dev_line) == 0:
if device_type_match(dev, devices_type):
# Replace "Driver" with "Driver_str" to have consistency of
# of dictionary key names
if "Driver" in dev.keys():
dev["Driver_str"] = dev.pop("Driver")
if "Module" in dev.keys():
dev["Module_str"] = dev.pop("Module")
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
# Clear previous device's data
dev = {}
else:
name, value = dev_line.decode().split("\t", 1)
value_list = value.rsplit(' ', 1)
if len(value_list) > 1:
# String stored in <name>_str
dev[name.rstrip(":") + '_str'] = value_list[0]
# Numeric IDs
dev[name.rstrip(":")] = value_list[len(value_list) - 1] \
.rstrip("]").lstrip("[")
if devices_type == network_devices:
# check what is the interface if any for an ssh connection if
# any to this host, so we can mark it later.
ssh_if = []
route = check_output(["ip", "-o", "route"])
# filter out all lines for 169.254 routes
route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
route.decode().splitlines()))
rt_info = route.split()
for i in range(len(rt_info) - 1):
if rt_info[i] == "dev":
ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
if not device_type_match(devices[d], devices_type):
continue
# get additional info and add it to existing data
devices[d] = devices[d].copy()
# No need to probe lspci
devices[d].update(get_pci_device_details(d, False).items())
if devices_type == network_devices:
for _if in ssh_if:
if _if in devices[d]["Interface"].split(","):
devices[d]["Ssh_if"] = True
devices[d]["Active"] = "*Active*"
break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
for driver in dpdk_drivers:
if driver not in devices[d]["Module_str"]:
devices[d]["Module_str"] = \
devices[d]["Module_str"] + ",%s" % driver
else:
devices[d]["Module_str"] = ",".join(dpdk_drivers)
# make sure the driver and module strings do not have any duplicates
if has_driver(d):
modules = devices[d]["Module_str"].split(",")
if devices[d]["Driver_str"] in modules:
modules.remove(devices[d]["Driver_str"])
devices[d]["Module_str"] = ",".join(modules)
def device_type_match(dev, devices_type):
for i in range(len(devices_type)):
param_count = len(
[x for x in devices_type[i].values() if x is not None])
match_count = 0
if dev["Class"][0:2] == devices_type[i]["Class"]:
match_count = match_count + 1
for key in devices_type[i].keys():
if key != 'Class' and devices_type[i][key]:
value_list = devices_type[i][key].split(',')
for value in value_list:
if value.strip(' ') == dev[key]:
match_count = match_count + 1
# count must be the number of non None parameters to match
if match_count == param_count:
return True
return False
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
it, which can then be used to index into the devices array'''
# check if it's already a suitable index
if dev_name in devices:
return dev_name
# check if it's an index just missing the domain part
elif "0000:" + dev_name in devices:
return "0000:" + dev_name
else:
# check if it's an interface name, e.g. eth1
for d in devices.keys():
if dev_name in devices[d]["Interface"].split(","):
return devices[d]["Slot"]
# if nothing else matches - error
raise ValueError("Unknown device: %s. "
"Please specify device in \"bus:slot.func\" format" % dev_name)
def unbind_one(dev_id, force):
'''Unbind the device identified by "dev_id" from its current driver'''
dev = devices[dev_id]
if not has_driver(dev_id):
print("Notice: %s %s %s is not currently managed by any driver" %
(dev["Slot"], dev["Device_str"], dev["Interface"]), file=sys.stderr)
return
# prevent us disconnecting ourselves
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Skipping unbind" % dev_id, file=sys.stderr)
return
# write to /sys to unbind
filename = "/sys/bus/pci/drivers/%s/unbind" % dev["Driver_str"]
try:
f = open(filename, "a")
except:
sys.exit("Error: unbind failed for %s - Cannot open %s" %
(dev_id, filename))
f.write(dev_id)
f.close()
def bind_one(dev_id, driver, force):
'''Bind the device given by "dev_id" to the driver "driver". If the device
is already bound to a different driver, it will be unbound first'''
dev = devices[dev_id]
saved_driver = None # used to rollback any unbind in case of failure
# prevent disconnection of our ssh session
if dev["Ssh_if"] and not force:
print("Warning: routing table indicates that interface %s is active. "
"Not modifying" % dev_id, file=sys.stderr)
return
# unbind any existing drivers we don't want
if has_driver(dev_id):
if dev["Driver_str"] == driver:
print("Notice: %s already bound to driver %s, skipping" %
(dev_id, driver), file=sys.stderr)
return
else:
saved_driver = dev["Driver_str"]
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
# For kernels >= 3.15 driver_override can be used to specify the driver
# for a device rather than relying on the driver to provide a positive
# match of the device. The existing process of looking up
# the vendor and device ID, adding them to the driver new_id,
# will erroneously bind other devices too which has the additional burden
# of unbinding those devices
if driver in dpdk_drivers:
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if os.path.exists(filename):
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
return
try:
f.write("%s" % driver)
f.close()
except:
print("Error: bind failed for %s - Cannot write driver %s to "
"PCI ID " % (dev_id, driver), file=sys.stderr)
return
# For kernels < 3.15 use new_id to add PCI id's to the driver
else:
filename = "/sys/bus/pci/drivers/%s/new_id" % driver
try:
f = open(filename, "w")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
return
try:
# Convert Device and Vendor Id to int to write to new_id
f.write("%04x %04x" % (int(dev["Vendor"],16),
int(dev["Device"], 16)))
f.close()
except:
print("Error: bind failed for %s - Cannot write new PCI ID to "
"driver %s" % (dev_id, driver), file=sys.stderr)
return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
try:
f = open(filename, "a")
except:
print("Error: bind failed for %s - Cannot open %s"
% (dev_id, filename), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
try:
f.write(dev_id)
f.close()
except:
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
tmp = get_pci_device_details(dev_id, True)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
% (dev_id, driver), file=sys.stderr)
if saved_driver is not None: # restore any previous driver
bind_one(dev_id, saved_driver, force)
return
# For kernels > 3.15 driver_override is used to bind a device to a driver.
# Before unbinding it, overwrite driver_override with empty string so that
# the device can be bound to any other driver
filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
if os.path.exists(filename):
try:
f = open(filename, "w")
except:
sys.exit("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
try:
f.write("\00")
f.close()
except:
sys.exit("Error: unbind failed for %s - Cannot open %s"
% (dev_id, filename))
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
if dev_list[0] == "dpdk":
for d in devices.keys():
if "Driver_str" in devices[d]:
if devices[d]["Driver_str"] in dpdk_drivers:
unbind_one(devices[d]["Slot"], force)
return
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
print(ex)
sys.exit(1)
for d in dev_list:
unbind_one(d, force)
def bind_all(dev_list, driver, force=False):
"""Bind method, takes a list of device locations"""
global devices
# a common user error is to forget to specify the driver the devices need to
# be bound to. check if the driver is a valid device, and if it is, show
# a meaningful error.
try:
dev_id_from_dev_name(driver)
# if we've made it this far, this means that the "driver" was a valid
# device string, so it's probably not a valid driver name.
sys.exit("Error: Driver '%s' does not look like a valid driver. " \
"Did you forget to specify the driver to bind devices to?" % driver)
except ValueError:
# driver generated error - it's not a valid device ID, so all is well
pass
# check if we're attempting to bind to a driver that isn't loaded
if not module_is_loaded(driver):
sys.exit("Error: Driver '%s' is not loaded." % driver)
try:
dev_list = map(dev_id_from_dev_name, dev_list)
except ValueError as ex:
sys.exit(ex)
for d in dev_list:
bind_one(d, driver, force)
# For kernels < 3.15 when binding devices to a generic driver
# (i.e. one that doesn't have a PCI ID table) using new_id, some devices
# that are not bound to any other driver could be bound even if no one has
# asked them to. hence, we check the list of drivers again, and see if
# some of the previously-unbound devices were erroneously bound.
if not os.path.exists("/sys/bus/pci/devices/%s/driver_override" % d):
for d in devices.keys():
# skip devices that were already bound or that we know should be bound
if "Driver_str" in devices[d] or d in dev_list:
continue
# update information about this device
devices[d] = dict(devices[d].items() +
get_pci_device_details(d, True).items())
# check if updated information indicates that the device was bound
if "Driver_str" in devices[d]:
unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
'''Displays to the user the details of a list of devices given in
"dev_list". The "extra_params" parameter, if given, should contain a string
with %()s fields in it for replacement by the named fields in each
device's dictionary.'''
strings = [] # this holds the strings to print. We sort before printing
print("\n%s" % title)
print("="*len(title))
if len(dev_list) == 0:
strings.append("<none>")
else:
for dev in dev_list:
if extra_params is not None:
strings.append("%s '%s %s' %s" % (dev["Slot"],
dev["Device_str"],
dev["Device"],
extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
def show_device_status(devices_type, device_name):
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
no_drv = []
# split our list of network devices into the three categories above
for d in devices.keys():
if device_type_match(devices[d], devices_type):
if not has_driver(d):
no_drv.append(devices[d])
continue
if devices[d]["Driver_str"] in dpdk_drivers:
dpdk_drv.append(devices[d])
else:
kernel_drv.append(devices[d])
n_devs = len(dpdk_drv) + len(kernel_drv) + len(no_drv)
# don't bother displaying anything if there are no devices
if n_devs == 0:
msg = "No '%s' devices detected" % device_name
print("")
print(msg)
print("".join('=' * len(msg)))
return
# print each category separately, so we can clearly see what's used by DPDK
if len(dpdk_drv) != 0:
display_devices("%s devices using DPDK-compatible driver" % device_name,
dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
if len(kernel_drv) != 0:
display_devices("%s devices using kernel driver" % device_name, kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
if len(no_drv) != 0:
display_devices("Other %s devices" % device_name, no_drv,
"unused=%(Module_str)s")
def show_status():
'''Function called when the script is passed the "--status" option.
Displays to the user what devices are bound to the igb_uio driver, the
kernel driver or to no driver'''
if status_dev == "net" or status_dev == "all":
show_device_status(network_devices, "Network")
if status_dev == "baseband" or status_dev == "all":
show_device_status(baseband_devices, "Baseband")
if status_dev == "crypto" or status_dev == "all":
show_device_status(crypto_devices, "Crypto")
if status_dev == "event" or status_dev == "all":
show_device_status(eventdev_devices, "Eventdev")
if status_dev == "mempool" or status_dev == "all":
show_device_status(mempool_devices, "Mempool")
if status_dev == "compress" or status_dev == "all":
show_device_status(compress_devices , "Compress")
if status_dev == "misc" or status_dev == "all":
show_device_status(misc_devices, "Misc (rawdev)")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
global status_dev
global force_flag
global args
if len(sys.argv) <= 1:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
["help", "usage", "status", "status-dev=",
"force", "bind=", "unbind", ])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
sys.exit(1)
for opt, arg in opts:
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
if opt == "--status-dev":
status_flag = True
status_dev = arg
if opt == "--status" or opt == "-s":
status_flag = True
status_dev = "all"
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
if b_flag is not None:
sys.exit("Error: binding and unbinding are mutually exclusive")
if opt == "-u" or opt == "--unbind":
b_flag = "none"
else:
b_flag = arg
def do_arg_actions():
'''do the actual action requested by the user'''
global b_flag
global status_flag
global force_flag
global args
if b_flag is None and not status_flag:
print("Error: No action specified for devices. "
"Please give a -b or -u option", file=sys.stderr)
usage()
sys.exit(1)
if b_flag is not None and len(args) == 0:
print("Error: No devices specified.", file=sys.stderr)
usage()
sys.exit(1)
if b_flag == "none" or b_flag == "None":
unbind_all(args, force_flag)
elif b_flag is not None:
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
clear_data()
# refresh if we have changed anything
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(misc_devices)
show_status()
def main():
'''program main function'''
# check if lspci is installed, suppress any output
with open(os.devnull, 'w') as devnull:
ret = subprocess.call(['which', 'lspci'],
stdout=devnull, stderr=devnull)
if ret != 0:
sys.exit("'lspci' not found - please install 'pciutils'")
parse_args()
check_modules()
clear_data()
get_device_details(network_devices)
get_device_details(baseband_devices)
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
get_device_details(compress_devices)
get_device_details(misc_devices)
do_arg_actions()
if __name__ == "__main__":
main()
| mit | -2,125,751,032,921,621,200 | 37.063428 | 113 | 0.577664 | false |
gerrit-review/gerrit | tools/download_file.py | 1 | 5139 | #!/usr/bin/env python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path, remove
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from util import hash_file, resolve_url
from zipfile import ZipFile, BadZipfile, LargeZipFile
GERRIT_HOME = path.expanduser('~/.gerritcodereview')
# TODO(davido): Rename in bazel-cache
CACHE_DIR = path.join(GERRIT_HOME, 'buck-cache', 'downloaded-artifacts')
LOCAL_PROPERTIES = 'local.properties'
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(args):
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
opts.add_option('--unsign', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir and path.dirname(root_dir) != root_dir:
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
if args.unsign:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
| apache-2.0 | -685,302,445,222,360,700 | 28.534483 | 77 | 0.654602 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.5/Lib/re.py | 1 | 12232 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB ([email protected]).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines as well as the string.
"$" matches the end of lines as well as the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, 0).sub(repl, string, count)
def subn(pattern, repl, string, count=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, 0).subn(repl, string, count)
def split(pattern, string, maxsplit=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, 0).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i in range(len(pattern)):
c = pattern[i]
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
s.groups = len(p)
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| mit | 3,808,175,959,281,925,000 | 37.831746 | 78 | 0.649035 | false |
sgtnasty/python | list/list.py | 1 | 1958 | #!/usr/bin/env python
# https://pymotw.com/3/os.path/
import os
import os.path
import time
import argparse
APPNAME='lister'
__version__ = '0.0.1'
def config_args():
"""
Configure command line arguments
"""
parser = argparse.ArgumentParser(description=APPNAME,
epilog=("Version {}".format(__version__)))
#parser.add_argument('-c', metavar='CONFIGFILE', required=False, help='path to config file',
# default=DESTINY_CONFIG_FILE)
#parser.add_argument('--log', metavar='LOGFILE', required=False, help='path to log file',
# default=DESTINY_LOGFILE)
parser.add_argument('files', metavar='F', nargs='+',
help='file or directory to evaluate')
parser.add_argument('--version', action='version', version=('%(prog)s ' + __version__))
parser.add_argument('--debug', required=False, help='Enable debugging of this script', action="store_true")
args = parser.parse_args()
return args
def ftime(filepath):
print('File : {}'.format(filepath))
print('Access time :', time.ctime(os.path.getatime(filepath)))
print('Modified time:', time.ctime(os.path.getmtime(filepath)))
print('Change time :', time.ctime(os.path.getctime(filepath)))
print('Size :', os.path.getsize(filepath))
def finfo(filepath):
print('File : {!r}'.format(filepath))
print('Absolute :', os.path.isabs(filepath))
print('Is file? :', os.path.isfile(filepath))
print('Is Dir? :', os.path.isdir(filepath))
print('Is Link? :', os.path.islink(filepath))
print('Mountpoint? :', os.path.ismount(filepath))
print('Exists? :', os.path.exists(filepath))
print('Link Exists?:', os.path.lexists(filepath))
if __name__ == '__main__':
args = config_args()
for filepath in args.files:
#print(type(filepath))
#print(repr(filepath))
fp = os.path.abspath(filepath)
ftime(fp)
finfo(fp)
| gpl-3.0 | 822,235,792,198,670,000 | 32.186441 | 111 | 0.621042 | false |
sahutd/youtube-dl | youtube_dl/extractor/__init__.py | 1 | 20425 | from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import AdobeTVIE
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .empflix import EMPFlixIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import ImgurIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import WebOfStoriesIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
| unlicense | -7,653,186,010,566,997,000 | 25.491569 | 83 | 0.800245 | false |
jck/myhdl | myhdl/test/core/test_traceSignals.py | 1 | 5535 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Run the unit tests for traceSignals """
import os
import random
import pytest
from myhdl import block, Signal, Simulation, _simulator, delay, instance, intbv
from myhdl._traceSignals import TraceSignalsError, _error, traceSignals
from helpers import raises_kind
random.seed(1) # random, but deterministic
path = os.path
QUIET=1
@block
def gen(clk):
@instance
def logic():
while 1:
yield delay(10)
clk.next = not clk
return logic
@block
def fun():
clk = Signal(bool(0))
inst = gen(clk)
return inst
@block
def dummy():
clk = Signal(bool(0))
inst = gen(clk)
return 1
@block
def top():
inst = traceSignals(fun())
return inst
@block
def top2():
inst = [{} for i in range(4)]
j = 3
inst[j-2]['key'] = traceSignals(fun())
return inst
@block
def top3():
inst_1 = traceSignals(fun())
inst_2 = traceSignals(fun())
return inst_1, inst_2
@block
def genTristate(clk, x, y, z):
xd = x.driver()
yd = y.driver()
zd = z.driver()
@instance
def ckgen():
while 1:
yield delay(10)
clk.next = not clk
@instance
def logic():
for v in [True, False, None, 0, True, None, None, 1]:
yield clk.posedge
xd.next = v
if v is None:
yd.next = zd.next = None
elif v:
yd.next = zd.next = 11
else:
yd.next = zd.next = 0
return ckgen,logic
@block
def tristate():
from myhdl import TristateSignal
clk = Signal(bool(0))
x = TristateSignal(True) # single bit
y = TristateSignal(intbv(0)) # intbv with undefined width
z = TristateSignal(intbv(0)[8:]) # intbv with fixed width
inst = genTristate(clk, x, y, z)
return inst
@block
def topTristate():
inst = traceSignals(tristate())
return inst
@pytest.yield_fixture
def vcd_dir(tmpdir):
with tmpdir.as_cwd():
yield tmpdir
if _simulator._tracing:
_simulator._tf.close()
_simulator._tracing = 0
class TestTraceSigs:
# TODO: multiple trace handling is different now has the
# calls go bottom-up. To be revisited.
# def testMultipleTraces(self, vcd_dir):
# with raises_kind(TraceSignalsError, _error.MultipleTraces):
# dut = top3()
def testArgType1(self, vcd_dir):
with raises_kind(TraceSignalsError, _error.ArgType):
dut = traceSignals([1, 2])
# this test is no longer relevant
# def testReturnVal(self, vcd_dir):
# from myhdl import ExtractHierarchyError
# from myhdl._extractHierarchy import _error
# kind = _error.InconsistentToplevel % (2, "dummy")
# with raises_kind(ExtractHierarchyError, kind):
# dut = traceSignals(dummy())
def testHierarchicalTrace1(self, vcd_dir):
p = "%s.vcd" % fun.__name__
top()
assert path.exists(p)
def testHierarchicalTrace2(self, vcd_dir):
pdut = "%s.vcd" % top.__name__
psub = "%s.vcd" % fun.__name__
dut = traceSignals(top())
assert path.exists(pdut)
assert not path.exists(psub)
def testTristateTrace(self, vcd_dir):
sim = Simulation(topTristate())
sim.run(100, quiet=QUIET)
sim.quit()
def testBackupOutputFile(self, vcd_dir):
p = "%s.vcd" % fun.__name__
dut = traceSignals(fun())
sim = Simulation(dut)
sim.run(1000, quiet=QUIET)
sim.quit()
_simulator._tf.close()
_simulator._tracing = 0
size = path.getsize(p)
pbak = p[:-4] + '.' + str(path.getmtime(p)) + '.vcd'
assert not path.exists(pbak)
dut = traceSignals(fun())
_simulator._tf.close()
_simulator._tracing = 0
assert path.exists(p)
assert path.exists(pbak)
assert path.getsize(pbak) == size
assert path.getsize(p) < size
def testSetDirectory(self, vcd_dir):
traceSignals.directory = 'some_vcd_dir'
os.mkdir(path.join(str(vcd_dir), traceSignals.directory))
pdut = "%s.vcd" % top.__name__
psub = "%s.vcd" % fun.__name__
pdutd = path.join(traceSignals.directory, "%s.vcd" % top.__name__)
psubd = path.join(traceSignals.directory, "%s.vcd" % fun.__name__)
dut = traceSignals(top())
_simulator._tf.close()
_simulator._tracing = 0
traceSignals.directory = None
assert not path.exists(pdut)
assert not path.exists(psub)
assert path.exists(pdutd)
assert not path.exists(psubd)
| lgpl-2.1 | -461,648,004,307,917,760 | 26.954545 | 79 | 0.609214 | false |
schenkd/webdev-project | app/main/views.py | 1 | 7169 | # ~*~ encoding: utf-8 ~*~
from app.main import main
from flask import render_template, request, flash, redirect, url_for
from app.main.forms import EngpassForm, ContactForm, ClassifyForm, classified
from app.models import Engpass, User, Drug, Producer, Contact, Log
from flask_login import login_required, current_user
from app.decorators import admin_required
from datetime import datetime
@main.route('/', methods=['GET', 'POST'])
def index():
engpaesse = Engpass.objects()
# update last seen
if current_user.is_authenticated:
current_user.update_last_seen()
return render_template('main/index.html', engpaesse=engpaesse)
@main.route('/search/<query>', methods=['GET', 'POST'])
def search_query(query):
pass
@main.route('/klassifizierung', methods=['GET', 'POST'])
def classify():
form = ClassifyForm()
# update last seen
if current_user.is_authenticated:
current_user.update_last_seen()
if request.method == 'POST':
enr = int(request.form['enr'])
classify = int(request.form['classify'])
try:
# Arzneimittel klassifizierung aktualisieren
drug = Drug.get_by_enr(enr)
drug.update_class(classify)
# Integer in einen String transformieren
# als Text in der Message und im Log
classify_name = [pair[1] for pair in classified if classify in pair]
flash('{} wurde als {} klassifiziert'.format(drug['drug_title'], classify_name[0]))
# save in log
user = User.objects.get(email=current_user.email)
Log(user=user, category='classify', text='{} wurde als {} klassifiziert'.format(enr, classify)).save()
except:
flash('ENR {} konnte keinem Arzneimittel zugewiesen werden'.format(enr))
# query Arzneimittel entsprechend der Klassifizierung
relevants = Drug.objects(classify=1)
dangers = Drug.objects(classify=2)
return render_template('intern/classify/form.html', form=form, relevants=relevants, dangers=dangers)
@main.route('/_getFilter', methods=['POST'])
def getFilter():
msg = request.get_json(force=True)
if msg == 'RELEVANT':
# query alle versorgungsrelevanten Engpaesse
drugs = [doc.id for doc in Drug.objects(classify=1)]
engpaesse = Engpass.objects(__raw__={'drug': {'$in': drugs}})
elif msg == 'DANGER':
# query alle versorgungsgefährdende Engpaesse
drugs = [doc.id for doc in Drug.objects(classify=2)]
engpaesse = Engpass.objects(__raw__={'drug': {'$in': drugs}})
else:
# query alle Engpaesse
engpaesse = Engpass.objects()
return render_template('main/table.html', engpaesse=engpaesse)
@main.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
# update last seen
if current_user.is_authenticated:
current_user.update_last_seen()
if request.method == 'POST' and form.validate_on_submit():
# Erstellen eines Contact Dokument
Contact(firstname=request.form['firstname'],
lastname=request.form['lastname'],
telephone=request.form['telephone'],
message=request.form['message'],
email=request.form['email']
).save()
# save in log
user = User.objects.get(email=current_user.email)
Log(user=user, category='contact', text='Hat eine Kontaktanfrage gesendet.').save()
flash('Ihre Nachricht wurde erfolgreich übermittelt.')
return render_template('main/contact.html', form=form)
@main.route('/engpass', methods=['GET', 'POST'])
@login_required
def engpass():
form = EngpassForm()
if request.method == 'POST':
# Erststellung eines Engpass Document
Engpass(
producer=Producer.get_by_employee(current_user.email),
drug=Drug.get_by_enr(int(request.form['enr'])),
alternative=request.form['alternative'],
inform_expert_group=request.form['inform_expert_group'],
telephone=request.form['telephone'],
email=request.form['email'] if request.form['email'] is None else current_user.email,
end=datetime(int(request.form['year']), int(request.form['month']), int(request.form['day'])),
reason=request.form['reason'],
other_reasons=request.form['other_reasons']
).save()
# save in log
user = User.objects.get(email=current_user.email)
Log(user=user, category='engpass', text='Hat einen Erstmeldung für einen Engpass gemeldet.').save()
flash('Engpass wurde gemeldet.')
return redirect(url_for('main.index'))
return render_template('hersteller/engpass_form.html', form=form)
@main.route('/verwaltung', methods=['GET', 'POST'])
@login_required
@admin_required
def verwaltung():
# update last seen
if current_user.is_authenticated:
current_user.update_last_seen()
# query aller nicht autorisierten User
unauthorized_users = User.objects(authorized=False)
# query letzten Zehn Log Documents
logs = Log.objects[:10]
return render_template('intern/admin/verwaltung.html', unauthorized_users=unauthorized_users, logs=logs)
@main.route('/edit_engpass/<int:enr>', methods=['GET', 'POST'])
@login_required
def edit_engpass(enr):
form = EngpassForm()
# Ausgewählte Engpass Document laden
engpass = Engpass.get_by_enr(enr)
if request.method == 'POST':
# Bearbeitung des Engpass Document
engpass['drug'] = Drug.objects.get(enr=int(request.form['enr']))
print(request.form['alternative'])
engpass['alternative'] = True if request.form['alternative'] == 'Ja' else False
engpass['inform_expert_group'] = True if request.form['inform_expert_group'] == 'Ja' else False
engpass['end'] = datetime(int(request.form['year']), int(request.form['month']), int(request.form['day']))
engpass['reason'] = request.form['reason']
engpass['other_reasons'] = request.form['other_reasons']
engpass['telephone'] = request.form['telephone']
engpass['email'] = request.form['email']
engpass.update_last_report()
# save in log
user = User.objects.get(email=current_user.email)
Log(user=user, category='engpass',
text='Hat eine Zwischenmeldung für den Engpass von Arzneimittel ENR {} abgegeben.'.format(request.form['enr'])).save()
return redirect(url_for('main.index'))
# Zuweisung der Values aus dem Engpass Document
form.enr.data = engpass.drug['enr']
form.pzn.data = engpass.drug['pzn']
form.alternative.default = engpass['alternative']
form.inform_expert_group.default = engpass['inform_expert_group']
form.day.default = engpass['end'].day
form.month.default = engpass['end'].month
form.year.default = engpass['end'].year
form.reason.default = engpass['reason']
form.other_reasons.data = engpass['other_reasons']
form.telephone.data = engpass['telephone']
form.email.data = engpass['email']
return render_template('hersteller/engpass_form.html', form=form)
| mit | -664,967,691,219,463,400 | 36.3125 | 130 | 0.650056 | false |
sniperganso/python-manilaclient | manilaclient/tests/functional/test_shares_listing.py | 1 | 8081 | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions
import testtools
from manilaclient import config
from manilaclient.tests.functional import base
CONF = config.CONF
@ddt.ddt
class SharesListReadOnlyTest(base.BaseTestCase):
@ddt.data('admin', 'user')
def test_shares_list(self, role):
self.clients[role].manila('list')
@ddt.data('admin', 'user')
def test_list_with_debug_flag(self, role):
self.clients[role].manila('list', flags='--debug')
@ddt.data('admin', 'user')
def test_shares_list_all_tenants(self, role):
self.clients[role].manila('list', params='--all-tenants')
@ddt.data('admin', 'user')
def test_shares_list_filter_by_name(self, role):
self.clients[role].manila('list', params='--name name')
@ddt.data('admin', 'user')
def test_shares_list_filter_by_status(self, role):
self.clients[role].manila('list', params='--status status')
def test_shares_list_filter_by_share_server_as_admin(self):
self.clients['admin'].manila('list', params='--share-server fake')
def test_shares_list_filter_by_share_server_as_user(self):
self.assertRaises(
exceptions.CommandFailed,
self.clients['user'].manila,
'list',
params='--share-server fake')
@ddt.data('admin', 'user')
def test_shares_list_filter_by_project_id(self, role):
self.clients[role].manila('list', params='--project-id fake')
@ddt.data('admin', 'user')
def test_shares_list_filter_by_host(self, role):
self.clients[role].manila('list', params='--host fake')
@ddt.data('admin', 'user')
def test_shares_list_with_limit_and_offset(self, role):
self.clients[role].manila('list', params='--limit 1 --offset 1')
@ddt.data(
{'role': 'admin', 'direction': 'asc'},
{'role': 'admin', 'direction': 'desc'},
{'role': 'user', 'direction': 'asc'},
{'role': 'user', 'direction': 'desc'})
@ddt.unpack
def test_shares_list_with_sorting(self, role, direction):
self.clients[role].manila(
'list', params='--sort-key host --sort-dir ' + direction)
@ddt.data('admin', 'user')
def test_snapshot_list(self, role):
self.clients[role].manila('snapshot-list')
@ddt.data('admin', 'user')
def test_snapshot_list_all_tenants(self, role):
self.clients[role].manila('snapshot-list', params='--all-tenants')
@ddt.data('admin', 'user')
def test_snapshot_list_filter_by_name(self, role):
self.clients[role].manila('snapshot-list', params='--name name')
@ddt.data('admin', 'user')
def test_snapshot_list_filter_by_status(self, role):
self.clients[role].manila('snapshot-list', params='--status status')
@ddt.ddt
class SharesListReadWriteTest(base.BaseTestCase):
@classmethod
def setUpClass(cls):
super(SharesListReadWriteTest, cls).setUpClass()
cls.private_name = data_utils.rand_name('autotest_share_name')
cls.private_description = data_utils.rand_name(
'autotest_share_description')
cls.public_name = data_utils.rand_name('autotest_public_share_name')
cls.public_description = data_utils.rand_name(
'autotest_public_share_description')
cls.private_share = cls.create_share(
name=cls.private_name,
description=cls.private_description,
public=False,
cleanup_in_class=True,
client=cls.get_user_client(),
wait_for_creation=False)
cls.public_share = cls.create_share(
name=cls.public_name,
description=cls.public_description,
public=True,
client=cls.get_user_client(),
cleanup_in_class=True)
for share_id in (cls.private_share['id'], cls.public_share['id']):
cls.get_admin_client().wait_for_share_status(share_id, 'available')
def _list_shares(self, filters=None):
filters = filters or dict()
shares = self.user_client.list_shares(filters=filters)
self.assertTrue(len(shares) > 1)
for s_id in (self.private_share['id'], self.public_share['id']):
self.assertTrue(any(s_id == s['ID'] for s in shares))
if filters:
for share in shares:
try:
get = self.user_client.get_share(share['ID'])
except exceptions.NotFound:
# NOTE(vponomaryov): Case when some share was deleted
# between our 'list' and 'get' requests. Skip such case.
# It occurs with concurrently running tests.
continue
for k, v in filters.items():
if k in ('share_network', 'share-network'):
k = 'share_network_id'
if v != 'deleting' and get[k] == 'deleting':
continue
self.assertEqual(v, get[k])
def test_list_shares(self):
self._list_shares()
def test_list_shares_for_all_tenants(self):
shares = self.user_client.list_shares(True)
self.assertTrue(len(shares) > 1)
for s_id in (self.private_share['id'], self.public_share['id']):
self.assertTrue(any(s_id == s['ID'] for s in shares))
def test_list_shares_by_name(self):
shares = self.user_client.list_shares(
filters={'name': self.private_name})
self.assertEqual(1, len(shares))
self.assertTrue(
any(self.private_share['id'] == s['ID'] for s in shares))
for share in shares:
get = self.user_client.get_share(share['ID'])
self.assertEqual(self.private_name, get['name'])
def test_list_shares_by_share_type(self):
share_type_id = self.user_client.get_share_type(
self.private_share['share_type'])['ID']
# NOTE(vponomaryov): this is API 2.6+ specific
self._list_shares({'share_type': share_type_id})
def test_list_shares_by_status(self):
self._list_shares({'status': 'available'})
def test_list_shares_by_project_id(self):
project_id = self.admin_client.get_project_id(
self.admin_client.tenant_name)
self._list_shares({'project_id': project_id})
@testtools.skipUnless(
CONF.share_network, "Usage of Share networks is disabled")
def test_list_shares_by_share_network(self):
share_network_id = self.user_client.get_share_network(
CONF.share_network)['id']
self._list_shares({'share_network': share_network_id})
def test_list_shares_by_host(self):
get = self.user_client.get_share(self.private_share['id'])
self._list_shares({'host': get['host']})
@ddt.data(
{'limit': 1},
{'limit': 2},
{'limit': 1, 'offset': 1},
{'limit': 2, 'offset': 0},
)
def test_list_shares_with_limit(self, filters):
shares = self.user_client.list_shares(filters=filters)
self.assertEqual(filters['limit'], len(shares))
def test_list_share_select_column(self):
shares = self.user_client.list_shares(columns="Name,Size")
self.assertTrue(any(s['Name'] is not None for s in shares))
self.assertTrue(any(s['Size'] is not None for s in shares))
self.assertTrue(all('Description' not in s for s in shares))
| apache-2.0 | -5,230,246,703,347,627,000 | 37.117925 | 79 | 0.610073 | false |
minorg/yomeka | test/yomeka_test/classic/omeka_classic_rest_api_client_test.py | 1 | 1984 | import unittest
from .test_credentials import TEST_API_KEY, TEST_COLLECTION_ID, TEST_ENDPOINT_URL, TEST_ITEM_ID
from yomeka.classic.no_such_omeka_classic_collection_exception import NoSuchOmekaClassicCollectionException
from yomeka.classic.no_such_omeka_classic_item_exception import NoSuchOmekaClassicItemException
from yomeka.classic.omeka_classic_collection import OmekaClassicCollection
from yomeka.classic.omeka_classic_file import OmekaClassicFile
from yomeka.classic.omeka_classic_item import OmekaClassicItem
from yomeka.classic.omeka_classic_rest_api_client import OmekaClassicRestApiClient
class OmekaClassicRestApiClientTest(unittest.TestCase):
def setUp(self):
self.__client = OmekaClassicRestApiClient(api_key=TEST_API_KEY, endpoint_url=TEST_ENDPOINT_URL)
def test_get_collection(self):
self.__client.get_collection(id=TEST_COLLECTION_ID)
try:
self.__client.get_collection(id=42)
self.fail()
except NoSuchOmekaClassicCollectionException:
pass
def test_get_collections(self):
collections = self.__client.get_collections(page=1, per_page=2)
self.assertEquals(2, len(collections))
for collection in collections:
self.assertTrue(isinstance(collection, OmekaClassicCollection))
def test_get_files(self):
files = self.__client.get_files(page=1, per_page=10)
self.assertEquals(10, len(files))
for file_ in files:
self.assertTrue(isinstance(file_, OmekaClassicFile))
def test_get_item(self):
self.__client.get_item(id=TEST_ITEM_ID)
try:
self.__client.get_item(id=4242424)
self.fail()
except NoSuchOmekaClassicItemException:
pass
def test_get_items(self):
items = self.__client.get_items(page=1, per_page=2)
self.assertEquals(2, len(items))
for item in items:
self.assertTrue(isinstance(item, OmekaClassicItem))
| bsd-2-clause | -7,684,260,334,593,606,000 | 40.333333 | 107 | 0.705645 | false |
johnowhitaker/bobibabber | sklearn/hmm.py | 1 | 48255 | # Hidden Markov Models
#
# Author: Ron Weiss <[email protected]>
# and Shiqiao Du <[email protected]>
# API changes: Jaques Grobler <[email protected]>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17"
"as it no longer falls within the project's scope and API.")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API.")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats["trans"] += np.exp(logsumexp(lneta, 0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_==0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| mit | 8,186,882,143,462,571,000 | 36.435997 | 82 | 0.575505 | false |
Vivaq/g2p | g2p_project/g2p_project/settings.py | 1 | 1969 | import os
SETTINGS_DIR = os.path.dirname(__file__)
PROJECT_PATH = os.path.join(SETTINGS_DIR, os.pardir)
PROJECT_ROOT = os.path.abspath(PROJECT_PATH)
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
SECRET_KEY = 'u)vhj6nj*)(i(8zg2f0!j=xwg+309om2v@o$-sn0l9a5u0=%+7'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'g2p',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'g2p.middleware.RequireLoginMiddleware',
)
LOGIN_REQUIRED_URLS = (
r'/(.*)$', # TODO interpret this regex.
r'/downloadData(.*)$'
)
LOGIN_REQUIRED_URLS_EXCEPTIONS = (
r'/login(.*)$',
r'/logout(.*)$',
r'/staff(.*)$',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
ROOT_URLCONF = 'g2p_project.urls'
WSGI_APPLICATION = 'g2p_project.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'database.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| bsd-2-clause | 4,745,019,941,371,833,000 | 20.637363 | 65 | 0.653123 | false |
lopiola/integracja_wypadki | scripts/db_api/person.py | 1 | 3215 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Manipulates the person table
"""
from psycopg2 import connect
import common
constraints = {
'sex': ['MALE', 'FEMALE', 'UNKNOWN'],
'injury_level': ['FATAL', 'SERIOUS', 'SLIGHT', 'NONE', 'UNKNOWN'],
'type': ['DRIVER', 'PASSENGER', 'PEDESTRIAN', 'UNKNOWN'],
'seatbelt': ['NOT_APPLICABLE', 'WORN_CONFIRMED', 'WORN_NOT_CONFIRMED', 'NOT_WORN', 'UNKNOWN'],
'seated_pos': ['DRIVER', 'PASSENGER', 'BACK', 'NONE', 'UNKNOWN']
}
def new(id,
acc_id,
veh_id,
sex,
age,
injury_level,
type='UNKNOWN',
seatbelt='UNKNOWN',
seated_pos='UNKNOWN'):
person = {
'id': id,
'acc_id': acc_id,
'veh_id': veh_id,
'sex': sex,
'age': age,
'injury_level': injury_level,
'type': type,
'seatbelt': seatbelt,
'seated_pos': seated_pos,
}
common.check_key_constraints(person, constraints)
return person
def new_from_dict(person_data):
person = {
'type': 'UNKNOWN',
'seatbelt': 'UNKNOWN',
'seated_pos': 'UNKNOWN',
}
person.update(person_data)
# TODO: Check obligatory fields
common.check_key_constraints(person, constraints)
return person
def insert(person_list):
if not isinstance(person_list, list):
person_list = [person_list]
user = common.get_user()
database = common.get_db_name()
con = connect(user=user, database=database)
cur = con.cursor()
for person in person_list:
cur.execute(insert_command(person))
cur.close()
con.commit()
con.close()
def delete(id_list):
if not isinstance(id_list, list):
id_list = [id_list]
user = common.get_user()
database = common.get_db_name()
con = connect(user=user, database=database)
cur = con.cursor()
for person_id in id_list:
cur.execute(delete_command(person_id))
cur.close()
con.commit()
con.close()
def create_table_command():
return '''
CREATE TABLE person(
id BIGINT PRIMARY KEY NOT NULL,
acc_id BIGINT NOT NULL,
veh_id BIGINT NULL,
sex TEXT NOT NULL,
age INT NOT NULL,
type TEXT NOT NULL,
injury_level TEXT NOT NULL,
seatbelt TEXT NOT NULL,
seated_pos TEXT NOT NULL
);
'''
def insert_command(person):
command = '''
INSERT INTO person VALUES (
{id},
{acc_id},
{veh_id},
'{sex}',
{age},
'{type}',
'{injury_level}',
'{seatbelt}',
'{seated_pos}'
);
'''
command = command.format(
id=person['id'],
acc_id=person['acc_id'],
veh_id=person['veh_id'],
sex=person['sex'],
age=person['age'],
type=person['type'],
injury_level=person['injury_level'],
seatbelt=person['seatbelt'],
seated_pos=person['seated_pos'],
)
return command
def delete_command(person_id):
command = '''DELETE FROM person WHERE id = {id}'''
return command.format(id=person_id) | mit | 430,964,396,947,975,200 | 22.136691 | 98 | 0.536236 | false |
betterlife/flask-psi | psi/app/views/inventory_transaction.py | 2 | 6267 | from datetime import datetime
from flask_admin.contrib.sqla.ajax import QueryAjaxModelLoader
from flask_admin.model.fields import AjaxSelectField
from psi.app.models import Product
from psi.app import service
from psi.app.models import InventoryTransactionLine, InventoryTransaction
from psi.app.utils import security_util
from flask_admin.contrib.sqla.filters import FloatGreaterFilter, FloatSmallerFilter
from flask_admin.model import InlineFormAdmin
from flask_babelex import lazy_gettext
from .formatter import receivings_formatter, shipping_formatter, \
default_date_formatter, type_field, date_field, product_field, price_field, \
quantity_field, total_amount_field, remark_field, saleable_quantity_field, \
line_formatter
from psi.app.views.base import ModelViewWithAccess, ModelWithLineFormatter
class InventoryTransactionLineInlineAdmin(InlineFormAdmin):
form_args = dict(
id=dict(label=lazy_gettext('id')),
product=dict(label=lazy_gettext('Product')),
price=dict(label=lazy_gettext('Inventory Transaction Price'),
description=lazy_gettext('For sales, it should be sell price, '
'for item lost or broken, should be purchase price plus logistic expend')),
in_transit_quantity=dict(label=lazy_gettext('In Transit Quantity'),
description=lazy_gettext('Quantity of product ordered but still on the way')),
quantity=dict(label=lazy_gettext('Actual Quantity Change'),
description=lazy_gettext('This quantity should be a negative number '
'for sales, item lost or item broken')),
remark=dict(label=lazy_gettext('Remark')),
)
def postprocess_form(self, form):
from psi.app.views.components import DisabledStringField
form.total_amount = DisabledStringField(label=lazy_gettext('Total Amount'))
form.saleable_quantity = DisabledStringField(label=lazy_gettext('Saleable Quantity')),
ajaxLoader = QueryAjaxModelLoader(name='product',
session=service.Info.get_db().session,
model=Product,
fields=['name'])
form.product = AjaxSelectField(ajaxLoader, label=lazy_gettext('Product(Can be searched by first letter)'))
form.itl_receiving_line = None
form.remark = None
form.itl_shipping_line = None
form.in_transit_quantity = None
return form
class InventoryTransactionAdmin(ModelViewWithAccess, ModelWithLineFormatter):
can_delete = False
column_list = ('id', 'type', 'date', 'total_amount', 'it_receiving', 'it_shipping', 'remark')
column_sortable_list = ('id', ('type', 'type.display'), 'total_amount', 'date',)
form_columns = ('type', 'date', 'total_amount', 'remark', 'lines')
form_create_rules = ('type', 'date', 'remark', 'lines',)
form_edit_rules = ('type', 'date', 'remark', 'lines',)
column_editable_list = ('remark',)
column_filters = ('date',
FloatGreaterFilter(InventoryTransaction.total_amount, lazy_gettext('Total Amount')),
FloatSmallerFilter(InventoryTransaction.total_amount, lazy_gettext('Total Amount')),)
column_searchable_list = ('type.display', 'remark')
column_details_list = ('id', 'type', 'date', 'total_amount', 'remark', 'lines', 'it_receiving', 'it_shipping',)
column_labels = {
'id': lazy_gettext('id'),
'type': lazy_gettext('Inventory Transaction Type'),
'date': lazy_gettext('Date'),
'total_amount': lazy_gettext('Total Amount'),
'remark': lazy_gettext('Remark'),
'lines': lazy_gettext('Lines'),
'it_receiving': lazy_gettext('Related Receiving'),
'it_shipping': lazy_gettext('Related Shipping'),
}
form_excluded_columns = ('it_shipping', 'it_receiving')
form_args = dict(
type=dict(query_factory=InventoryTransaction.manual_type_filter),
date=dict(default=datetime.now()),
)
from psi.app.views.components import DisabledStringField
form_extra_fields = {
'total_amount': DisabledStringField(label=lazy_gettext('Total Amount')),
}
form_ajax_refs = {
'product': QueryAjaxModelLoader(name='product',
session=service.Info.get_db().session,
model=Product,
# --> Still need to filter the products by organization.
# --> Line 209 is commented out, need to bring it back.
fields=['name', 'mnemonic'])
}
column_formatters = {
'it_receiving': receivings_formatter,
'it_shipping': shipping_formatter,
'date': default_date_formatter,
'lines': line_formatter,
}
inline_models = (InventoryTransactionLineInlineAdmin(InventoryTransactionLine),)
def get_list_columns(self):
"""
This method is called instantly in list.html
List of columns is decided runtime during render of the table
Not decided during flask-admin blueprint startup.
"""
columns = super(InventoryTransactionAdmin, self).get_list_columns()
cols = ['total_amount']
columns = security_util.filter_columns_by_role(
columns, cols, 'purchase_price_view'
)
return columns
def get_details_columns(self):
cols = ['total_amount']
columns = super(InventoryTransactionAdmin, self).get_details_columns()
columns = security_util.filter_columns_by_role(
columns, cols, 'purchase_price_view'
)
return columns
@property
def line_fields(self):
if not security_util.user_has_role('purchase_price_view'):
return [type_field, date_field, product_field, quantity_field,
saleable_quantity_field, remark_field]
return [type_field, date_field, product_field, price_field,
quantity_field, total_amount_field, saleable_quantity_field,
remark_field]
| mit | 1,161,510,000,624,890,400 | 43.133803 | 119 | 0.62502 | false |
unicefuganda/edtrac | edtrac_project/rapidsms_contact/contact/migrations/0002_auto__add_field_flag_words__add_field_flag_rule__add_field_flag_rule_r.py | 1 | 13099 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Flag.words'
db.add_column('contact_flag', 'words', self.gf('django.db.models.fields.CharField')(max_length=200, null=True), keep_default=False)
# Adding field 'Flag.rule'
db.add_column('contact_flag', 'rule', self.gf('django.db.models.fields.IntegerField')(max_length=10, null=True), keep_default=False)
# Adding field 'Flag.rule_regex'
db.add_column('contact_flag', 'rule_regex', self.gf('django.db.models.fields.CharField')(max_length=200, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Flag.words'
db.delete_column('contact_flag', 'words')
# Deleting field 'Flag.rule'
db.delete_column('contact_flag', 'rule')
# Deleting field 'Flag.rule_regex'
db.delete_column('contact_flag', 'rule_regex')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contact.flag': {
'Meta': {'object_name': 'Flag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'rule': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True'}),
'rule_regex': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'words': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'contact.masstext': {
'Meta': {'object_name': 'MassText'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'masstexts'", 'symmetrical': 'False', 'to': "orm['rapidsms.Contact']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contact.messageflag': {
'Meta': {'object_name': 'MessageFlag'},
'flag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'null': 'True', 'to': "orm['contact.Flag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['rapidsms_httprouter.Message']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'locations.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['locations.LocationType']"})
},
'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'locations.point': {
'Meta': {'object_name': 'Point'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'rapidsms.backend': {
'Meta': {'object_name': 'Backend'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'rapidsms.connection': {
'Meta': {'unique_together': "(('backend', 'identity'),)", 'object_name': 'Connection'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Backend']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'birthdate': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'health_facility': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reporting_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Location']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'village': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'villagers'", 'null': 'True', 'to': "orm['locations.Location']"}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'rapidsms_httprouter.message': {
'Meta': {'object_name': 'Message'},
'application': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'null': 'True', 'to': "orm['rapidsms_httprouter.MessageBatch']"}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['rapidsms.Connection']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_response_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'null': 'True', 'to': "orm['rapidsms_httprouter.Message']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '10', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
'rapidsms_httprouter.messagebatch': {
'Meta': {'object_name': 'MessageBatch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contact']
| bsd-3-clause | 8,817,603,443,012,260,000 | 73.851429 | 182 | 0.551187 | false |
codelv/enaml-native | src/enamlnative/android/android_bottom_sheet_dialog.py | 1 | 1316 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Jan 29, 2018
@author: jrm
"""
from atom.api import Typed, set_default
from enamlnative.widgets.bottom_sheet_dialog import ProxyBottomSheetDialog
from .android_dialog import Dialog, AndroidDialog
class BottomSheetDialog(Dialog):
package = 'com.google.android.material.bottomsheet'
#: Simply uses a different class
__nativeclass__ = set_default('%s.BottomSheetDialog' % package)
class AndroidBottomSheetDialog(AndroidDialog, ProxyBottomSheetDialog):
""" An Android implementation of an Enaml ProxyBottomSheetDialog.
"""
#: A reference to the widget created by the proxy.
dialog = Typed(BottomSheetDialog)
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
self.dialog = BottomSheetDialog(self.get_context(), d.style)
| mit | 1,975,408,620,800,349,200 | 29.604651 | 79 | 0.631459 | false |
jcarbaugh/django-blogdor | blogdor/templatetags/blog.py | 1 | 3930 | from blogdor import utils
from blogdor.models import Post
from django import template
from django.conf import settings
from django.db.models import Count
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from tagging.models import Tag
register = template.Library()
class PostsNode(template.Node):
def __init__(self, queryset, count, offset, varname):
self.posts = queryset[offset:count+offset]
self.varname = varname
def render(self, context):
context[self.varname] = self.posts
return ''
class UserPostsNode(template.Node):
def __init__(self, user, count, offset, varname):
self.user = template.Variable(user)
self.count = count
self.offset = offset
self.varname = varname
def render(self, context):
user = self.user.resolve(context)
posts = Post.objects.published().filter(author=user).select_related()
context[self.varname] = posts[self.offset:self.count+self.offset]
return ''
class TagListNode(template.Node):
def __init__(self, tags, varname):
self.tags = tags
self.varname = varname
def render(self, context):
context[self.varname] = self.tags
return ''
def _simple_get_posts(token, queryset):
pieces = token.contents.split()
as_index = pieces.index('as')
if as_index == -1 or as_index > 3 or len(pieces) != as_index+2:
raise template.TemplateSyntaxError('%r tag must be in format {%% %r [count [offset]] as varname %%}' %
pieces[0])
# count & offset
count = 5
offset = 0
if as_index > 1:
count = int(pieces[1])
if as_index > 2:
count = int(pieces[2])
varname = pieces[as_index+1]
return PostsNode(queryset, count, offset, varname)
@register.tag
def get_recent_posts(parser, token):
return _simple_get_posts(token, Post.objects.published().select_related())
@register.tag
def get_favorite_posts(parser, token):
return _simple_get_posts(token, Post.objects.published().filter(is_favorite=True).select_related())
@register.tag
def get_user_posts(parser, token):
pieces = token.contents.split()
as_index = pieces.index('as')
if as_index < 2 or as_index > 4 or len(pieces) != as_index+2:
raise template.TemplateSyntaxError('%r tag must be in format {%% %r user [count [offset]] as varname %%}' %
pieces[0])
# count & offset
count = 5
offset = 0
if as_index > 2:
count = int(pieces[2])
if as_index > 3:
count = int(pieces[3])
user = pieces[1]
varname = pieces[as_index+1]
return UserPostsNode(user, count, offset, varname)
@register.tag
def get_tag_counts(parser, token):
pieces = token.contents.split()
if len(pieces) != 4:
raise template.TemplateSyntaxError('%r tag must be in format {%% %r comma,separated,tags as varname %%}' % pieces[0])
tags = pieces[1].split(',')
post_ct = ContentType.objects.get_for_model(Post).id
tags = Tag.objects.filter(items__content_type=post_ct, name__in=tags).annotate(count=Count('id'))
varname = pieces[-1]
return TagListNode(tags, varname)
@register.tag
def get_popular_tags(parser, token):
pieces = token.contents.split()
if len(pieces) != 4:
raise template.TemplateSyntaxError('%r tag must be in format {%% %r num as varname %%}' % pieces[0])
num_tags = int(pieces[1])
post_ct = ContentType.objects.get_for_model(Post).id
tags = Tag.objects.filter(items__content_type=post_ct).annotate(count=Count('id')).order_by('-count').filter(count__gt=5)[:num_tags]
varname = pieces[-1]
return TagListNode(tags, varname)
@register.simple_tag
def gravatar(email):
return render_to_string("blogdor/gravatar_img.html", {"url": utils.gravatar(email)})
| bsd-3-clause | 1,578,761,664,994,745,000 | 32.02521 | 136 | 0.643511 | false |
markmc/oslo.messaging | tests/test_notifier.py | 1 | 7858 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
import uuid
import fixtures
import testscenarios
from oslo import messaging
from oslo.messaging.notify import _impl_messaging
from oslo.messaging.notify import _impl_test
from oslo.messaging.notify import notifier as msg_notifier
from oslo.messaging.openstack.common import jsonutils
from oslo.messaging.openstack.common import timeutils
from oslo.messaging import serializer as msg_serializer
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class _FakeTransport(object):
def __init__(self, conf):
self.conf = conf
def _send(self, target, ctxt, message,
wait_for_reply=None, timeout=None, envelope=False):
pass
class _ReRaiseLoggedExceptionsFixture(fixtures.Fixture):
"""Record logged exceptions and re-raise in cleanup.
The notifier just logs notification send errors so, for the sake of
debugging test failures, we record any exceptions logged and re-raise them
during cleanup.
"""
class FakeLogger(object):
def __init__(self):
self.exceptions = []
def exception(self, msg, *args, **kwargs):
self.exceptions.append(sys.exc_info()[1])
def setUp(self):
super(_ReRaiseLoggedExceptionsFixture, self).setUp()
self.logger = self.FakeLogger()
def reraise_exceptions():
for ex in self.logger.exceptions:
raise ex
self.addCleanup(reraise_exceptions)
class TestMessagingNotifier(test_utils.BaseTestCase):
_v1 = [
('v1', dict(v1=True)),
('not_v1', dict(v1=False)),
]
_v2 = [
('v2', dict(v2=True)),
('not_v2', dict(v2=False)),
]
_topics = [
('no_topics', dict(topics=[])),
('single_topic', dict(topics=['notifications'])),
('multiple_topic2', dict(topics=['foo', 'bar'])),
]
_priority = [
('debug', dict(priority='debug')),
('info', dict(priority='info')),
('warn', dict(priority='warn')),
('error', dict(priority='error')),
('critical', dict(priority='critical')),
]
_payload = [
('payload', dict(payload={'foo': 'bar'})),
]
_context = [
('ctxt', dict(ctxt={'user': 'bob'})),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._v1,
cls._v2,
cls._topics,
cls._priority,
cls._payload,
cls._context)
def setUp(self):
super(TestMessagingNotifier, self).setUp()
self.conf.register_opts(msg_notifier._notifier_opts)
self.addCleanup(timeutils.clear_time_override)
self.logger = self.useFixture(_ReRaiseLoggedExceptionsFixture()).logger
self.stubs.Set(_impl_messaging, 'LOG', self.logger)
self.stubs.Set(msg_notifier, '_LOG', self.logger)
def test_notifier(self):
drivers = []
if self.v1:
drivers.append('messaging')
if self.v2:
drivers.append('messagingv2')
self.config(notification_driver=drivers)
self.config(notification_topics=self.topics)
transport = _FakeTransport(self.conf)
notifier = messaging.Notifier(transport, 'test.localhost')
self.mox.StubOutWithMock(transport, '_send')
message_id = uuid.uuid4()
self.mox.StubOutWithMock(uuid, 'uuid4')
uuid.uuid4().AndReturn(message_id)
timeutils.set_time_override()
message = {
'message_id': str(message_id),
'publisher_id': 'test.localhost',
'event_type': 'test.notify',
'priority': self.priority.upper(),
'payload': self.payload,
'timestamp': str(timeutils.utcnow.override_time),
}
sends = []
if self.v1:
sends.append(dict(envelope=False))
if self.v2:
sends.append(dict(envelope=True))
for send_kwargs in sends:
for topic in self.topics:
target = messaging.Target(topic='%s.%s' % (topic,
self.priority))
transport._send(target, self.ctxt, message, **send_kwargs)
self.mox.ReplayAll()
method = getattr(notifier, self.priority)
method(self.ctxt, 'test.notify', self.payload)
TestMessagingNotifier.generate_scenarios()
class TestSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestSerializer, self).setUp()
self.addCleanup(_impl_test.reset)
self.addCleanup(timeutils.clear_time_override)
def test_serializer(self):
transport = _FakeTransport(self.conf)
serializer = msg_serializer.NoOpSerializer()
notifier = messaging.Notifier(transport,
'test.localhost',
driver='test',
topic='test',
serializer=serializer)
message_id = uuid.uuid4()
self.mox.StubOutWithMock(uuid, 'uuid4')
uuid.uuid4().AndReturn(message_id)
timeutils.set_time_override()
self.mox.StubOutWithMock(serializer, 'serialize_entity')
serializer.serialize_entity({}, 'bar').AndReturn('sbar')
self.mox.ReplayAll()
notifier.info({}, 'test.notify', 'bar')
message = {
'message_id': str(message_id),
'publisher_id': 'test.localhost',
'event_type': 'test.notify',
'priority': 'INFO',
'payload': 'sbar',
'timestamp': str(timeutils.utcnow.override_time),
}
self.assertEquals(_impl_test.NOTIFICATIONS, [({}, message, 'INFO')])
class TestLogNotifier(test_utils.BaseTestCase):
def setUp(self):
super(TestLogNotifier, self).setUp()
self.conf.register_opts(msg_notifier._notifier_opts)
self.addCleanup(timeutils.clear_time_override)
def test_notifier(self):
self.config(notification_driver=['log'])
transport = _FakeTransport(self.conf)
notifier = messaging.Notifier(transport, 'test.localhost')
message_id = uuid.uuid4()
self.mox.StubOutWithMock(uuid, 'uuid4')
uuid.uuid4().AndReturn(message_id)
timeutils.set_time_override()
message = {
'message_id': str(message_id),
'publisher_id': 'test.localhost',
'event_type': 'test.notify',
'priority': 'INFO',
'payload': 'bar',
'timestamp': str(timeutils.utcnow.override_time),
}
logger = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(logging, 'getLogger')
logging.getLogger('oslo.messaging.notification.test.notify').\
AndReturn(logger)
logger.info(jsonutils.dumps(message))
self.mox.ReplayAll()
notifier.info({}, 'test.notify', 'bar')
| apache-2.0 | 3,101,194,088,723,180,000 | 28.992366 | 79 | 0.580937 | false |
d120/pyTUID | pyTUID/models.py | 1 | 1337 | import ast
from django.db import models
from django.utils.translation import ugettext as _
class TUIDUser(models.Model):
"""Represents a TUID user with various properties returned from CAS"""
class Meta:
verbose_name = _('TUID User')
verbose_name_plural = _('TUID Users')
uid = models.CharField(max_length=50, unique=True, verbose_name=_('TUID'))
surname = models.CharField(max_length=50, verbose_name=_('surname'))
given_name = models.CharField(max_length=50, verbose_name=_('given name'))
email = models.EmailField(blank=True, null=True, verbose_name=_('email'))
groups = models.TextField(verbose_name=_('cas groups'))
def group_list(self):
"""Returns all the groups as list of strings"""
if self.groups and len(self.groups) > 0:
return ast.literal_eval(self.groups) if self.groups[0] == '[' and self.groups[-1] == ']' else [self.groups]
else:
return []
def in_group(self, group_string):
"""Checks wether this user is in the specified group"""
return group_string in self.group_list()
def name(self):
"""Returns the users full name"""
return self.given_name + ' ' + self.surname
name.short_description = _('name')
def __str__(self):
return self.name() + ' (' + self.uid + ')'
| mit | -224,083,952,602,156,320 | 37.2 | 119 | 0.628272 | false |
IntegratedAlarmSystem-Group/ias-webserver | alarms/tests/tests_core_consumer.py | 1 | 3914 | import datetime
import pytest
from channels.testing import WebsocketCommunicator
from alarms.collections import AlarmCollection
from ias_webserver.routing import application as ias_app
from ias_webserver.settings import PROCESS_CONNECTION_PASS
class TestCoreConsumer:
"""This class defines the test suite for the CoreConsumer"""
def setup_method(self):
"""TestCase setup, executed before each test of the TestCase"""
# Arrange:
self.iasio_alarm = {
'id': "AlarmType-ID",
'shortDesc': "Test iasio",
'iasType': "alarm",
'docUrl': 'www.dummy-url.com'
}
self.iasio_double = {
'id': "DoubleType-ID",
'shortDesc': "Test iasio",
'iasType': "double",
'docUrl': 'www.dummy-url.com'
}
self.iasios = [self.iasio_alarm, self.iasio_double]
self.ws_url = '/core/?password={}'.format(PROCESS_CONNECTION_PASS)
@pytest.mark.asyncio
@pytest.mark.django_db
async def test_receive_json(self):
""" Test if the core consumer receives the list of iasios and passes it to the AlarmCollection """
AlarmCollection.reset(self.iasios)
old_alarms_count = len(AlarmCollection.get_all_as_list())
# Connect:
communicator = WebsocketCommunicator(ias_app, self.ws_url)
connected, subprotocol = await communicator.connect()
assert connected, 'The communicator was not connected'
# Arrange:
current_time = datetime.datetime.now()
formatted_current_time = current_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
core_ids = [
'AlarmType-ID1',
'AlarmType-ID2',
'AlarmType-ID3'
]
msg = [
{
"value": "SET_MEDIUM",
"productionTStamp": formatted_current_time,
"sentToBsdbTStamp": formatted_current_time,
"mode": "OPERATIONAL", # 5: OPERATIONAL
"iasValidity": "RELIABLE",
"fullRunningId": "(Monitored-System-ID:MONITORED_SOFTWARE_SYSTEM)" + \
"@(plugin-ID:PLUGIN)@(Converter-ID:CONVERTER)@(AlarmType-ID1:IASIO)",
"valueType": "ALARM"
},
{
"value": "SET_HIGH",
"productionTStamp": formatted_current_time,
"sentToBsdbTStamp": formatted_current_time,
"mode": "OPERATIONAL", # 5: OPERATIONAL
"iasValidity": "RELIABLE",
"fullRunningId": "(Monitored-System-ID:MONITORED_SOFTWARE_SYSTEM)" + \
"@(plugin-ID:PLUGIN)@(Converter-ID:CONVERTER)@(AlarmType-ID2:IASIO)",
"valueType": "ALARM"
},
{
"value": "SET_MEDIUM",
"productionTStamp": formatted_current_time,
"sentToBsdbTStamp": formatted_current_time,
"mode": "OPERATIONAL", # 5: OPERATIONAL
"iasValidity": "RELIABLE",
"fullRunningId": "(Monitored-System-ID:MONITORED_SOFTWARE_SYSTEM)" + \
"@(plugin-ID:PLUGIN)@(Converter-ID:CONVERTER)@(AlarmType-ID3:IASIO)",
"valueType": "ALARM"
},
]
# Act:
await communicator.send_json_to(msg)
response = await communicator.receive_from()
# Assert:
all_alarms_list = [a.core_id for a in AlarmCollection.get_all_as_list()]
new_alarms_count = len(all_alarms_list)
assert response == 'Received 3 IASIOS', 'The alarms were not received'
assert old_alarms_count + 3 == new_alarms_count, 'The Iasios shoul have been added to the AlarmCollection'
for core_id in core_ids:
assert core_id in all_alarms_list, 'The alarm {} is not in the collection'.format(core_id)
# Close:
await communicator.disconnect()
| lgpl-3.0 | -7,944,325,527,755,995,000 | 42.010989 | 114 | 0.572305 | false |
lsp84ch83/PyText | Appium/appium-framwork/script/test_Anewnotest1.py | 1 | 2759 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2018/4/1 16:24
# @Author : Soner
# @version : 1.0.0
# @license : Copyright(C), Your Company
from appium import webdriver
from selenium.webdriver.common.by import By
from time import sleep
import unittest
import xlutils,xlrd,xlwt
class Anewnotest1(unittest.TestCase):
# setUp 初始化
def setUp(self):
# 获取手机的信息
desired_caps = {
'platformName': 'Android', # 平台
'platformVersion': '4.4', # 版本号
'deviceName': '192.168.103.101:5555', # 设备名称
'appPackage': 'com.youdao.note', # 应用包名
'appActivity': '.activity2.SplashActivity', # Activity名
'unicodeKeyboard': 'True', # 防止键盘中文不能输入
'resetKeyboard': 'True' # 重置设置生效
}
# 启动appium
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
sleep(3)
def test_newnote(self):
driver = self.driver
# 读取excel
wb = xlrd.open_workbook(r'f:\PyText\Appium\appium-framwork\data\data.xls')
sh = wb.sheet_by_name('note')
r_num = sh.nrows
# 循环读取
for i in range(1, r_num):
id - sh.cell_value(i, 0)
title = sh.cell_value(i, 1)
content = sh.cell_value(i, 2)
result = sh.cell_value(i, 3)
sleep(3)
# 新建笔记
driver.find_element(By.ID, 'com.youdao.note:id/add_note_floater_open').click()
# 选择新建笔记
driver.find_element(By.NAME, '新建笔记').click()
# 输入笔记名称
driver.find_element(By.ID, 'com.youdao.note:id/note_title').send_keys(title)
# 输入笔记内容
driver.find_element(By.XPATH,
'//android.widget.LinearLayout[@resource-id=\"com.youdao.note:id/note_content\"]/android.widget.EditText[1]').send_keys(
content)
# 保存笔记
driver.find_element(By.NAME, '完成').click()
# 验证
if title == '':
res1 = driver.find_element(By.ID, 'com.youdao.note:id/title').text
res2 = driver.find_element(By.ID, 'com.youdao.note:id/summary').text
if res1 == res2:
print('success')
else:
print('fail')
elif result == 'ok':
if driver.find_element(By.NAME, title) and driver.find_element(By.NAME, content):
print("success")
else:
print("fail")
def tearDown(self):
self.driver.quit() | gpl-3.0 | 7,042,348,827,297,111,000 | 32.714286 | 152 | 0.524855 | false |
tboyce021/home-assistant | tests/components/airly/test_init.py | 1 | 4179 | """Test init of Airly integration."""
from datetime import timedelta
from homeassistant.components.airly.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import STATE_UNAVAILABLE
from . import API_POINT_URL
from tests.common import MockConfigEntry, load_fixture
from tests.components.airly import init_integration
async def test_async_setup_entry(hass, aioclient_mock):
"""Test a successful setup entry."""
await init_integration(hass, aioclient_mock)
state = hass.states.get("air_quality.home")
assert state is not None
assert state.state != STATE_UNAVAILABLE
assert state.state == "14"
async def test_config_not_ready(hass, aioclient_mock):
"""Test for setup failure if connection to Airly is missing."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
unique_id="123-456",
data={
"api_key": "foo",
"latitude": 123,
"longitude": 456,
"name": "Home",
},
)
aioclient_mock.get(API_POINT_URL, exc=ConnectionError())
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_config_without_unique_id(hass, aioclient_mock):
"""Test for setup entry without unique_id."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
data={
"api_key": "foo",
"latitude": 123,
"longitude": 456,
"name": "Home",
},
)
aioclient_mock.get(API_POINT_URL, text=load_fixture("airly_valid_station.json"))
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_LOADED
assert entry.unique_id == "123-456"
async def test_config_with_turned_off_station(hass, aioclient_mock):
"""Test for setup entry for a turned off measuring station."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
unique_id="123-456",
data={
"api_key": "foo",
"latitude": 123,
"longitude": 456,
"name": "Home",
},
)
aioclient_mock.get(API_POINT_URL, text=load_fixture("airly_no_station.json"))
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_update_interval(hass, aioclient_mock):
"""Test correct update interval when the number of configured instances changes."""
entry = await init_integration(hass, aioclient_mock)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
for instance in hass.data[DOMAIN].values():
assert instance.update_interval == timedelta(minutes=15)
entry = MockConfigEntry(
domain=DOMAIN,
title="Work",
unique_id="66.66-111.11",
data={
"api_key": "foo",
"latitude": 66.66,
"longitude": 111.11,
"name": "Work",
},
)
aioclient_mock.get(
"https://airapi.airly.eu/v2/measurements/point?lat=66.660000&lng=111.110000",
text=load_fixture("airly_valid_station.json"),
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
assert entry.state == ENTRY_STATE_LOADED
for instance in hass.data[DOMAIN].values():
assert instance.update_interval == timedelta(minutes=30)
async def test_unload_entry(hass, aioclient_mock):
"""Test successful unload of entry."""
entry = await init_integration(hass, aioclient_mock)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
| apache-2.0 | 8,017,846,302,526,744,000 | 30.186567 | 87 | 0.643934 | false |
cnamejj/PyProc | regentest/self_maps.py | 1 | 1217 | #!/usr/bin/env python
"""Handle records from /proc/self/maps data files"""
import regentest as RG
import ProcHandlers as PH
PFC = PH.ProcFieldConstants
# ---
def re_self_maps(inprecs):
"""Iterate through parsed records and re-generate data file"""
__leadtemp = "{st:08x}-{en:08x} {fl:4s} {offset:08x} \
{major:02x}:{minor:02x} {inode:d} "
__ptr_size = 8
__preflen = (__ptr_size * 6) + 25
__preftemp = "{{pref:<{plen:d}s}}".format(plen=__preflen)
__template = "{pref:s}{path:s}"
for __hilit in inprecs:
__ff = inprecs.field
#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8
__lead = __leadtemp.format(st=__ff[PFC.F_START], en=__ff[PFC.F_END],
fl=__ff[PFC.F_FLAGS], offset=__ff[PFC.F_PAGE_OFFSET],
major=__ff[PFC.F_MAJOR_DEV], minor=__ff[PFC.F_MINOR_DEV],
inode=__ff[PFC.F_INODE]
)
__path = __ff[PFC.F_PATH]
if __path == "":
print __lead
else:
__pref = __preftemp.format(pref=__lead)
print __template.format(pref=__pref, path=__ff[PFC.F_PATH])
RG.RECREATOR[PH.GET_HANDLER("/proc/self/maps")] = re_self_maps
| gpl-2.0 | -2,877,985,667,914,708,000 | 27.97619 | 80 | 0.518488 | false |
robwebset/script.videoextras | resources/lib/CacheCleanup.py | 1 | 1858 | # -*- coding: utf-8 -*-
import re
import traceback
import xbmc
import xbmcvfs
import xbmcaddon
# Import the common settings
from settings import Settings
from settings import log
from settings import os_path_join
ADDON = xbmcaddon.Addon(id='script.videoextras')
PROFILE_DIR = xbmc.translatePath(ADDON.getAddonInfo('profile')).decode("utf-8")
#################################
# Class to tidy up any
#################################
class CacheCleanup():
# Cleans out all the cached files
@staticmethod
def removeAllCachedFiles():
CacheCleanup.removeCacheFile(Settings.MOVIES, True)
CacheCleanup.removeCacheFile(Settings.TVSHOWS, True)
CacheCleanup.removeCacheFile(Settings.MUSICVIDEOS, True)
CacheCleanup.removeCacheFile('overlay_image_used.txt')
# Removes the cache file for a given type
@staticmethod
def removeCacheFile(target, isDir=False):
try:
fullFilename = os_path_join(PROFILE_DIR, target)
log("VideoExtrasCleanup: Checking cache file %s" % fullFilename)
# If the file already exists, delete it
if xbmcvfs.exists(fullFilename):
if isDir:
# Remove the png files in the directory first
dirs, files = xbmcvfs.listdir(fullFilename)
for aFile in files:
m = re.search("[0-9]+[a-zA-Z_]*.png", aFile, re.IGNORECASE)
if m:
pngFile = os_path_join(fullFilename, aFile)
xbmcvfs.delete(pngFile)
# Now remove the actual directory
xbmcvfs.rmdir(fullFilename)
else:
xbmcvfs.delete(fullFilename)
except:
log("CacheCleanup: %s" % traceback.format_exc(), xbmc.LOGERROR)
| gpl-2.0 | 8,544,385,020,276,670,000 | 33.407407 | 83 | 0.588267 | false |
AragurDEV/yowsup | yowsup/layers/protocol_notifications/layer.py | 1 | 1890 | from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
from yowsup.layers.protocol_acks.protocolentities import OutgoingAckProtocolEntity
class YowNotificationsProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"notification": (self.recvNotification, self.sendNotification)
}
super(YowNotificationsProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "notification Ib Layer"
def sendNotification(self, entity):
if entity.getTag() == "notification":
self.toLower(entity.toProtocolTreeNode())
def recvNotification(self, node):
if node["type"] == "picture":
if node.getChild("set"):
self.toUpper(SetPictureNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node.getChild("delete"):
self.toUpper(DeletePictureNotificationProtocolEntity.fromProtocolTreeNode(node))
else:
self.raiseErrorForNode(node)
elif node["type"] == "status":
self.toUpper(StatusNotificationProtocolEntity.fromProtocolTreeNode(node))
elif node["type"] in ["contacts", "subject", "w:gp2"]:
# Implemented in respectively the protocol_contacts and protocol_groups layer
pass
elif node["type"] in ["features", "contacts", "web", "location"]:
# implement individually at some point
# but keep this pass block so system doesn't crash on these types
pass
elif node["type"] in ["business"]:
print("unhandled business notification")
pass
else:
self.raiseErrorForNode(node)
ack = OutgoingAckProtocolEntity(node["id"], "notification", node["type"], node["from"])
self.toLower(ack.toProtocolTreeNode())
| gpl-3.0 | -6,190,608,310,758,386,000 | 42.953488 | 96 | 0.642328 | false |
zaina/nova | nova/virt/vmwareapi/vmops.py | 1 | 87594 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import os
import time
import decorator
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
vmops_opts = [
cfg.StrOpt('cache_prefix',
help='The prefix for where cached images are stored. This is '
'NOT the full path - just a folder prefix. '
'This should only be used when a datastore cache should '
'be shared between compute nodes. Note: this should only '
'be used when the compute nodes have a shared file '
'system.'),
]
CONF = cfg.CONF
CONF.register_opts(vmops_opts, 'vmware')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('enabled', 'nova.vnc', group='vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
RESIZE_TOTAL_STEPS = 6
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VirtualMachineInstanceConfigInfo(object):
"""Parameters needed to create and configure a new instance."""
def __init__(self, instance, image_info, datastore, dc_info, image_cache):
# Some methods called during spawn take the instance parameter purely
# for logging purposes.
# TODO(vui) Clean them up, so we no longer need to keep this variable
self.instance = instance
self.ii = image_info
self.root_gb = instance.root_gb
self.datastore = datastore
self.dc_info = dc_info
self._image_cache = image_cache
@property
def cache_image_folder(self):
if self.ii.image_id is None:
return
return self._image_cache.get_image_cache_folder(
self.datastore, self.ii.image_id)
@property
def cache_image_path(self):
if self.ii.image_id is None:
return
cached_image_file_name = "%s.%s" % (self.ii.image_id,
self.ii.file_type)
return self.cache_image_folder.join(cached_image_file_name)
# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
# for cases where mocking time.sleep() can have unintended effects on code
# not under test. For now, unblock the affected test cases by providing
# a wrapper function to work around needing to mock time.sleep()
def _time_sleep_wrapper(delay):
time.sleep(delay)
@decorator.decorator
def retry_if_task_in_progress(f, *args, **kwargs):
retries = max(CONF.vmware.api_retry_count, 1)
delay = 1
for attempt in range(1, retries + 1):
if attempt != 1:
_time_sleep_wrapper(delay)
delay = min(2 * delay, 60)
try:
f(*args, **kwargs)
return
except vexc.TaskInProgress:
pass
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._root_resource_pool = vm_util.get_res_pool_ref(self._session,
self._cluster)
self._datastore_regex = datastore_regex
self._base_folder = self._get_base_folder()
self._tmp_folder = 'vmware_temp'
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def _get_base_folder(self):
# Enable more than one compute node to run on the same host
if CONF.vmware.cache_prefix:
base_folder = '%s%s' % (CONF.vmware.cache_prefix,
CONF.image_cache_subdirectory_name)
# Ensure that the base folder is unique per compute node
elif CONF.remove_unused_base_images:
base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
base_folder = CONF.image_cache_subdirectory_name
return base_folder
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session.vim.service_content
LOG.debug("Extending root virtual disk to %s", requested_size,
instance=instance)
vmdk_extend_task = self._session._call_method(
self._session.vim,
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
ds_path = ds_obj.DatastorePath.parse(file)
self._delete_datastore_file(ds_path, dc_ref)
LOG.debug("Extended root virtual disk", instance=instance)
def _delete_datastore_file(self, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (vexc.CannotDeleteFileException,
vexc.FileFaultException,
vexc.FileLockedException,
vexc.FileNotFoundException):
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread trying to delete the file",
{'ds': datastore_path},
exc_info=True)
def _extend_if_required(self, dc_info, image_info, instance,
root_vmdk_path):
"""Increase the size of the root vmdk if necessary."""
if instance.root_gb * units.Gi > image_info.file_size:
size_in_kb = instance.root_gb * units.Mi
self._extend_virtual_disk(instance, size_in_kb,
root_vmdk_path, dc_info.ref)
def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
injected_files, admin_password):
session_vim = self._session.vim
cookies = session_vim.client.options.transport.cookiejar
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
datastore.name,
dc_info.name,
instance.uuid,
cookies)
uploaded_iso_path = datastore.build_path(uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
str(uploaded_iso_path))
def _get_instance_metadata(self, context, instance):
flavor = instance.flavor
return ('name:%s\n'
'userid:%s\n'
'username:%s\n'
'projectid:%s\n'
'projectname:%s\n'
'flavor:name:%s\n'
'flavor:memory_mb:%s\n'
'flavor:vcpus:%s\n'
'flavor:ephemeral_gb:%s\n'
'flavor:root_gb:%s\n'
'flavor:swap:%s\n'
'imageid:%s\n'
'package:%s\n') % (instance.display_name,
context.user_id,
context.user_name,
context.project_id,
context.project_name,
flavor.name,
flavor.memory_mb,
flavor.vcpus,
flavor.ephemeral_gb,
flavor.root_gb,
flavor.swap,
instance.image_ref,
version.version_string_with_package())
def build_virtual_machine(self, instance, image_info,
dc_info, datastore, network_info, extra_specs,
metadata):
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
image_info.vif_model,
network_info)
if extra_specs.storage_policy:
profile_spec = vm_util.get_storage_profile_spec(
self._session, extra_specs.storage_policy)
else:
profile_spec = None
# Get the create vm config spec
client_factory = self._session.vim.client.factory
config_spec = vm_util.get_vm_create_spec(client_factory,
instance,
datastore.name,
vif_infos,
extra_specs,
image_info.os_type,
profile_spec=profile_spec,
metadata=metadata)
# Create the VM
vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
config_spec, self._root_resource_pool)
return vm_ref
def _get_extra_specs(self, flavor):
extra_specs = vm_util.ExtraSpecs()
for (key, type) in (('cpu_limit', int),
('cpu_reservation', int),
('cpu_shares_level', str),
('cpu_shares_share', int)):
value = flavor.extra_specs.get('quota:' + key)
if value:
setattr(extra_specs.cpu_limits, key, type(value))
extra_specs.cpu_limits.validate()
hw_version = flavor.extra_specs.get('vmware:hw_version')
extra_specs.hw_version = hw_version
if CONF.vmware.pbm_enabled:
storage_policy = flavor.extra_specs.get('vmware:storage_policy',
CONF.vmware.pbm_default_policy)
extra_specs.storage_policy = storage_policy
return extra_specs
def _fetch_image_as_file(self, context, vi, image_ds_loc):
"""Download image as an individual file to host via HTTP PUT."""
session = self._session
session_vim = session.vim
cookies = session_vim.client.options.transport.cookiejar
LOG.debug("Downloading image file data %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image(
context,
vi.instance,
session._host,
session._port,
vi.dc_info.name,
vi.datastore.name,
image_ds_loc.rel_path,
cookies=cookies)
def _fetch_image_as_vapp(self, context, vi, image_ds_loc):
"""Download stream optimized image to host as a vApp."""
# The directory of the imported disk is the unique name
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
LOG.debug("Downloading stream optimized image %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s as vApp",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image_stream_optimized(
context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
def _fetch_image_as_ova(self, context, vi, image_ds_loc):
"""Download root disk of an OVA image as streamOptimized."""
# The directory of the imported disk is the unique name
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
images.fetch_image_ova(context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, "tmp-sparse.vmdk")
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_flat_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(tmp_image_ds_loc),
vi.ii.file_size_in_kb)
flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
'-flat.vmdk')
flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
self._delete_datastore_file(str(flat_vmdk_ds_loc), vi.dc_info.ref)
return tmp_dir_loc, flat_vmdk_ds_loc
def _prepare_stream_optimized_image(self, vi):
vm_name = "%s_%s" % (constants.IMAGE_VM_PREFIX,
uuidutils.generate_uuid())
tmp_dir_loc = vi.datastore.build_path(vm_name)
tmp_image_ds_loc = tmp_dir_loc.join("%s.vmdk" % tmp_dir_loc.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_iso_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
try:
ds_util.file_move(self._session, dc_ref,
src_folder_ds_path, dst_folder_ds_path)
except vexc.FileAlreadyExistsException:
# Folder move has failed. This may be due to the fact that a
# process or thread has already completed the operation.
# Since image caching is synchronized, this can only happen
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
tmp_dir_loc = tmp_image_ds_loc.parent.parent
converted_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
# converts fetched image to preallocated disk
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(tmp_image_ds_loc),
str(converted_image_ds_loc))
self._delete_datastore_file(str(tmp_image_ds_loc), vi.dc_info.ref)
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_flat_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_stream_optimized_image(self, vi, tmp_image_ds_loc):
dst_path = vi.cache_image_folder.join("%s.vmdk" % vi.ii.image_id)
ds_util.mkdir(self._session, vi.cache_image_folder, vi.dc_info.ref)
try:
ds_util.disk_move(self._session, vi.dc_info.ref,
tmp_image_ds_loc, dst_path)
except vexc.FileAlreadyExistsException:
pass
def _cache_iso_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _get_vm_config_info(self, instance, image_info,
storage_policy=None):
"""Captures all relevant information from the spawn parameters."""
if (instance.root_gb != 0 and
image_info.file_size > instance.root_gb * units.Gi):
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
allowed_ds_types = ds_util.get_allowed_datastore_types(
image_info.disk_type)
datastore = ds_util.get_datastore(self._session,
self._cluster,
self._datastore_regex,
storage_policy,
allowed_ds_types)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
return VirtualMachineInstanceConfigInfo(instance,
image_info,
datastore,
dc_info,
self._imagecache)
def _get_image_callbacks(self, vi):
disk_type = vi.ii.disk_type
if vi.ii.is_ova:
image_fetch = self._fetch_image_as_ova
elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_fetch = self._fetch_image_as_vapp
else:
image_fetch = self._fetch_image_as_file
if vi.ii.is_iso:
image_prepare = self._prepare_iso_image
image_cache = self._cache_iso_image
elif disk_type == constants.DISK_TYPE_SPARSE:
image_prepare = self._prepare_sparse_image
image_cache = self._cache_sparse_image
elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_prepare = self._prepare_stream_optimized_image
image_cache = self._cache_stream_optimized_image
elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
image_prepare = self._prepare_flat_image
image_cache = self._cache_flat_image
else:
reason = _("disk type '%s' not supported") % disk_type
raise exception.InvalidDiskInfo(reason=reason)
return image_prepare, image_fetch, image_cache
def _fetch_image_if_missing(self, context, vi):
image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
LOG.debug("Processing image %s", vi.ii.image_id, instance=vi.instance)
with lockutils.lock(str(vi.cache_image_path),
lock_file_prefix='nova-vmware-fetch_image'):
self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
ds_browser = self._get_ds_browser(vi.datastore.ref)
if not ds_util.file_exists(self._session, ds_browser,
vi.cache_image_folder,
vi.cache_image_path.basename):
LOG.debug("Preparing fetch location", instance=vi.instance)
tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
LOG.debug("Fetch image to %s", tmp_image_ds_loc,
instance=vi.instance)
image_fetch(context, vi, tmp_image_ds_loc)
LOG.debug("Caching image", instance=vi.instance)
image_cache(vi, tmp_image_ds_loc)
LOG.debug("Cleaning up location %s", str(tmp_dir_loc),
instance=vi.instance)
self._delete_datastore_file(str(tmp_dir_loc), vi.dc_info.ref)
def _create_and_attach_ephemeral_disk(self, instance, vm_ref, dc_info,
size, adapter_type, path):
disk_type = constants.DISK_TYPE_THIN
vm_util.create_virtual_disk(
self._session, dc_info.ref,
adapter_type,
disk_type,
path,
size)
self._volumeops.attach_disk_to_vm(
vm_ref, instance,
adapter_type, disk_type,
path, size, False)
def _create_ephemeral(self, bdi, instance, vm_ref, dc_info,
datastore, folder, adapter_type):
ephemerals = None
if bdi is not None:
ephemerals = driver.block_device_info_get_ephemerals(bdi)
for idx, eph in enumerate(ephemerals):
size = eph['size'] * units.Mi
at = eph.get('disk_bus') or adapter_type
filename = vm_util.get_ephemeral_name(idx)
path = str(ds_obj.DatastorePath(datastore.name, folder,
filename))
self._create_and_attach_ephemeral_disk(instance, vm_ref,
dc_info, size,
at, path)
# There may be block devices defined but no ephemerals. In this case
# we need to allocate a ephemeral disk if required
if not ephemerals and instance.ephemeral_gb:
size = instance.ephemeral_gb * units.Mi
filename = vm_util.get_ephemeral_name(0)
path = str(ds_obj.DatastorePath(datastore.name, folder,
filename))
self._create_and_attach_ephemeral_disk(instance, vm_ref,
dc_info, size,
adapter_type, path)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
client_factory = self._session.vim.client.factory
image_info = images.VMwareImage.from_image(instance.image_ref,
image_meta)
extra_specs = self._get_extra_specs(instance.flavor)
vi = self._get_vm_config_info(instance, image_info,
extra_specs.storage_policy)
metadata = self._get_instance_metadata(context, instance)
# Creates the virtual machine. The virtual machine reference returned
# is unique within Virtual Center.
vm_ref = self.build_virtual_machine(instance,
image_info,
vi.dc_info,
vi.datastore,
network_info,
extra_specs,
metadata)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance uuid.
vm_util.vm_ref_cache_update(instance.uuid, vm_ref)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info,
vm_ref=vm_ref)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc.enabled:
self._get_and_set_vnc_config(client_factory, instance, vm_ref)
block_device_mapping = []
if block_device_info is not None:
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if instance.image_ref:
self._imagecache.enlist_image(
image_info.image_id, vi.datastore, vi.dc_info.ref)
self._fetch_image_if_missing(context, vi)
if image_info.is_iso:
self._use_iso_image(vm_ref, vi)
elif image_info.linked_clone:
self._use_disk_image_as_linked_clone(vm_ref, vi)
else:
self._use_disk_image_as_full_clone(vm_ref, vi)
if len(block_device_mapping) > 0:
msg = "Block device information present: %s" % block_device_info
# NOTE(mriedem): block_device_info can contain an auth_password
# so we have to scrub the message before logging it.
LOG.debug(strutils.mask_password(msg), instance=instance)
# Before attempting to attach any volume, make sure the
# block_device_mapping (i.e. disk_bus) is valid
self._is_bdm_valid(block_device_mapping)
for disk in block_device_mapping:
connection_info = disk['connection_info']
adapter_type = disk.get('disk_bus') or vi.ii.adapter_type
# TODO(hartsocks): instance is unnecessary, remove it
# we still use instance in many locations for no other purpose
# than logging, can we simplify this?
if disk.get('boot_index') == 0:
self._volumeops.attach_root_volume(connection_info,
instance, vi.datastore.ref, adapter_type)
else:
self._volumeops.attach_volume(connection_info,
instance, adapter_type)
# Create ephemeral disks
self._create_ephemeral(block_device_info, instance, vm_ref,
vi.dc_info, vi.datastore, instance.uuid,
vi.ii.adapter_type)
if configdrive.required_by(instance):
self._configure_config_drive(
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password)
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _is_bdm_valid(self, block_device_mapping):
"""Checks if the block device mapping is valid."""
valid_bus = (constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL)
for disk in block_device_mapping:
adapter_type = disk.get('disk_bus')
if (adapter_type is not None and adapter_type not in valid_bus):
raise exception.UnsupportedHardware(model=adapter_type,
virt="vmware")
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
port=self._session._port,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
client_factory = self._session.vim.client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
constants.ADAPTER_TYPE_IDE)
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance to attach cdrom %s",
file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
LOG.debug("Reconfigured VM instance to attach cdrom %s",
file_path, instance=instance)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session.vim,
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
@retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session.vim,
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def _create_linked_clone_from_snapshot(self, instance,
vm_ref, snapshot_ref, dc_info):
"""Create linked clone VM to be deployed to same ds as source VM
"""
client_factory = self._session.vim.client.factory
rel_spec = vm_util.relocate_vm_spec(
client_factory,
datastore=None,
host=None,
disk_move_type="createNewChildDiskBacking")
clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec,
power_on=False, snapshot=snapshot_ref, template=True)
vm_name = "%s_%s" % (constants.SNAPSHOT_VM_PREFIX,
uuidutils.generate_uuid())
LOG.debug("Creating linked-clone VM from snapshot", instance=instance)
vm_clone_task = self._session._call_method(
self._session.vim,
"CloneVM_Task",
vm_ref,
folder=dc_info.vmFolder,
name=vm_name,
spec=clone_spec)
self._session._wait_for_task(vm_clone_task)
LOG.info(_LI("Created linked-clone VM from snapshot"),
instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
vm_clone_task, "Task", "info")
return task_info.result
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Creates a linked clone VM from the snapshot
4. Exports the disk in the link clone VM as a streamOptimized disk.
5. Delete the linked clone VM
6. Deletes the snapshot in original instance.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
def _get_vm_and_vmdk_attribs():
# Get the vmdk info that the VM is pointing to
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
instance.uuid)
if not vmdk.path:
LOG.debug("No root disk defined. Unable to snapshot.",
instance=instance)
raise error_util.NoRootDiskDefined()
lst_properties = ["datastore", "summary.config.guestId"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
props)
os_type = query['summary.config.guestId']
datastores = query['datastore']
return (vmdk, datastores, os_type)
vmdk, datastores, os_type = _get_vm_and_vmdk_attribs()
ds_ref = datastores.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# TODO(vui): convert to creating plain vm clone and uploading from it
# instead of using live vm snapshot.
snapshot_ref = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_vm_ref = None
try:
# Create a temporary VM (linked clone from snapshot), then export
# the VM's root disk to glance via HttpNfc API
snapshot_vm_ref = self._create_linked_clone_from_snapshot(
instance, vm_ref, snapshot_ref, dc_info)
images.upload_image_stream_optimized(
context, image_id, instance, self._session, vm=snapshot_vm_ref,
vmdk_size=vmdk.capacity_in_bytes)
finally:
if snapshot_vm_ref:
vm_util.destroy_vm(self._session, instance, snapshot_vm_ref)
# Deleting the snapshot after destroying the temporary VM created
# based on it allows the instance vm's disks to be consolidated.
# TODO(vui) Add handling for when vmdk volume is attached.
self._delete_vm_snapshot(instance, vm_ref, snapshot_ref)
def reboot(self, instance, network_info, reboot_type="SOFT"):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning" and
reboot_type == "SOFT"):
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session.vim, "RebootGuest",
vm_ref)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session.vim,
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, destroy_disks=True):
# Destroy a VM instance
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query.get('config.files.vmPathName')
vm_ds_path = None
if vm_config_pathname is not None:
vm_ds_path = ds_obj.DatastorePath.parse(
vm_config_pathname)
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session.vim,
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: "
"%s"), excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
try:
dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents "
"from the disk"), exc_info=True)
except exception.InstanceNotFound:
LOG.warning(_LW('Instance does not exist on backend'),
instance=instance)
except Exception:
LOG.exception(_LE('Destroy instance failed'),
instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance.uuid)
def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
1. Power off, if it is in poweredOn state.
2. Un-register.
3. Delete the contents of the folder holding the VM related data.
"""
LOG.debug("Destroying instance", instance=instance)
self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session.vim,
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session.vim,
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def _get_rescue_device(self, instance, vm_ref):
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
return vm_util.find_rescue_device(hardware_devices,
instance)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
Attach the image that the instance was created from and boot from it.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the root disk vmdk object
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
# Get the image details of the instance
image_info = images.VMwareImage.from_image(instance.image_ref,
image_meta)
vi = VirtualMachineInstanceConfigInfo(instance,
image_info,
datastore,
dc_info,
self._imagecache)
vm_util.power_off_instance(self._session, instance, vm_ref)
# Get the rescue disk path
rescue_disk_path = datastore.build_path(instance.uuid,
"%s-rescue.%s" % (image_info.image_id, image_info.file_type))
# Copy the cached image to the be the rescue disk. This will be used
# as the rescue disk for the instance.
ds_util.disk_copy(self._session, dc_info.ref,
vi.cache_image_path, rescue_disk_path)
# Attach the rescue disk to the instance
self._volumeops.attach_disk_to_vm(vm_ref, instance, vmdk.adapter_type,
vmdk.disk_type, rescue_disk_path)
# Get the rescue device and configure the boot order to
# boot from this device
rescue_device = self._get_rescue_device(instance, vm_ref)
factory = self._session.vim.client.factory
boot_spec = vm_util.get_vm_boot_spec(factory, rescue_device)
# Update the VM with the new boot order and power on
vm_util.reconfigure_vm(self._session, vm_ref, boot_spec)
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the rescue device and detach it from the instance.
try:
rescue_device = self._get_rescue_device(instance, vm_ref)
except exception.NotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to access the rescue disk'),
instance=instance)
vm_util.power_off_instance(self._session, instance, vm_ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
destroy_disk=True)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def _resize_vm(self, context, instance, vm_ref, flavor):
"""Resizes the VM according to the flavor."""
client_factory = self._session.vim.client.factory
extra_specs = self._get_extra_specs(flavor)
metadata = self._get_instance_metadata(context, instance)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(flavor.vcpus),
int(flavor.memory_mb),
extra_specs,
metadata=metadata)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
def _resize_disk(self, instance, vm_ref, vmdk, flavor):
if (flavor.root_gb > instance.root_gb and
flavor.root_gb > vmdk.capacity_in_bytes / units.Gi):
root_disk_in_kb = flavor.root_gb * units.Mi
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
resized_disk = str(ds_obj.DatastorePath(datastore, folder,
'resized.vmdk'))
ds_util.disk_copy(self._session, dc_info.ref, vmdk.path,
str(resized_disk))
self._extend_virtual_disk(instance, root_disk_in_kb, resized_disk,
dc_info.ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
original_disk = str(ds_obj.DatastorePath(datastore, folder,
'original.vmdk'))
ds_util.disk_move(self._session, dc_info.ref, vmdk.path,
original_disk)
ds_util.disk_move(self._session, dc_info.ref, resized_disk,
vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
def _remove_ephemerals(self, vm_ref):
devices = vm_util.get_ephemerals(self._session, vm_ref)
if devices:
vm_util.detach_devices_from_vm(self._session, vm_ref, devices)
def _resize_create_ephemerals(self, vm_ref, instance, block_device_info):
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
self._create_ephemeral(block_device_info, instance, vm_ref,
dc_info, datastore, folder, vmdk.adapter_type)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
# Checks if the migration needs a disk resize down.
if (flavor.root_gb < instance.root_gb or
(flavor.root_gb != 0 and
flavor.root_gb < vmdk.capacity_in_bytes / units.Gi)):
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# TODO(garyk): treat dest parameter. Migration needs to be treated.
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# 1. Power off the instance
vm_util.power_off_instance(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Reconfigure the VM properties
self._resize_vm(context, instance, vm_ref, flavor)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3.Reconfigure the disk properties
self._resize_disk(instance, vm_ref, vmdk, flavor)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Purge ephemeral disks
self._remove_ephemerals(vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
ds_util.disk_delete(self._session, dc_info.ref,
str(original_disk))
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that the VM is off
vm_util.power_off_instance(self._session, instance, vm_ref)
client_factory = self._session.vim.client.factory
# Reconfigure the VM properties
extra_specs = self._get_extra_specs(instance.flavor)
metadata = self._get_instance_metadata(context, instance)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(instance.vcpus),
int(instance.memory_mb),
extra_specs,
metadata=metadata)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
# Reconfigure the disks if necessary
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
ds_util.disk_delete(self._session, dc_info.ref, vmdk.path)
ds_util.disk_move(self._session, dc_info.ref,
str(original_disk), vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
# Reconfigure ephemerals
self._remove_ephemerals(vm_ref)
self._resize_create_ephemerals(vm_ref, instance, block_device_info)
if power_on:
vm_util.power_on_instance(self._session, instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# 5. Update ephemerals if necessary
self._resize_create_ephemerals(vm_ref, instance, block_device_info)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
# 6. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=6,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session.vim,
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info)
for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, vm_props)
max_mem = int(query.get('summary.config.memorySizeMB', 0)) * 1024
num_cpu = int(query.get('summary.config.numCpu', 0))
return hardware.InstanceInfo(
state=VMWARE_POWER_STATES[query['runtime.powerState']],
max_mem_kb=max_mem,
mem_kb=max_mem,
num_cpu=num_cpu)
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return {'vmware:' + k: v for k, v in data.items()}
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details.maximum = data.get('memorySizeMB', 0)
diags.memory_details.used = data.get('guestMemoryUsage', 0)
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info,
vm_ref=None):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
if vm_ref is None:
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance, vm_ref):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_obj.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session.vim.client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
dc_info = self.get_datacenter_ref_and_name(ds.ref)
datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if (conn_state not in ["orphaned", "inaccessible"] and
uuidutils.is_uuid_like(vm_name)):
lst_vm_names.append(vm_name)
retrieve_result = self._session._call_method(vutil,
'continue_retrieval',
retrieve_result)
return lst_vm_names
def instance_exists(self, instance):
try:
vm_util.get_vm_ref(self._session, instance)
return True
except exception.InstanceNotFound:
return False
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
vif_model = image_meta.properties.get('hw_vif_model',
constants.DEFAULT_VIF_MODEL)
vif_model = vm_util.convert_vif_model(vif_model)
vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
vif_model, utils.is_neutron(), vif)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_attach_port_index(self._session, vm_ref)
client_factory = self._session.vim.client.factory
attach_config_spec = vm_util.get_network_attach_config_spec(
client_factory, vif_info, port_index)
LOG.debug("Reconfiguring VM to attach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
' %s'),
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to attach interface", instance=instance)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_vm_detach_port_index(self._session,
vm_ref,
vif['id'])
if port_index is None:
msg = _("No device with interface-id %s exists on "
"VM") % vif['id']
raise exception.NotFound(msg)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
device = vmwarevif.get_network_device(hardware_devices,
vif['address'])
if device is None:
msg = _("No device with MAC address %s exists on the "
"VM") % vif['address']
raise exception.NotFound(msg)
client_factory = self._session.vim.client.factory
detach_config_spec = vm_util.get_network_detach_config_spec(
client_factory, device, port_index)
LOG.debug("Reconfiguring VM to detach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to detach interface", instance=instance)
def _use_disk_image_as_full_clone(self, vm_ref, vi):
"""Uses cached image disk by copying it into the VM directory."""
instance_folder = vi.instance.uuid
root_disk_name = "%s.vmdk" % vi.instance.uuid
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(root_disk_ds_loc))
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, False)
def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
ds_browser = self._get_ds_browser(ds_ref)
return ds_util.file_exists(
self._session, ds_browser, sized_disk_ds_loc.parent,
sized_disk_ds_loc.basename)
def _use_disk_image_as_linked_clone(self, vm_ref, vi):
"""Uses cached image as parent of a COW child in the VM directory."""
sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
if vi.root_gb > 0:
sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
# Ensure only a single thread extends the image at once.
# We do this by taking a lock on the name of the extended
# image. This allows multiple threads to create resized
# copies simultaneously, as long as they are different
# sizes. Threads attempting to create the same resized copy
# will be serialized, with only the first actually creating
# the copy.
#
# Note that the object is in a per-nova cache directory,
# so inter-nova locking is not a concern. Consequently we
# can safely use simple thread locks.
with lockutils.lock(str(sized_disk_ds_loc),
lock_file_prefix='nova-vmware-image'):
if not self._sized_image_exists(sized_disk_ds_loc,
vi.datastore.ref):
LOG.debug("Copying root disk of size %sGb", vi.root_gb,
instance=vi.instance)
try:
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_LW("Root disk file creation "
"failed - %s"), e)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e})
try:
ds_util.file_delete(self._session,
sized_disk_ds_loc,
vi.dc_info.ref)
except vexc.FileNotFoundException:
# File was never created: cleanup not
# required
pass
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
# Associate the sized image disk to the VM by attaching to the VM a
# COW child of said disk.
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(sized_disk_ds_loc),
vi.root_gb * units.Mi, vi.ii.linked_clone)
def _use_iso_image(self, vm_ref, vi):
"""Uses cached image as a bootable virtual cdrom."""
self._attach_cdrom_to_vm(
vm_ref, vi.instance, vi.datastore.ref,
str(vi.cache_image_path))
# Optionally create and attach blank disk
if vi.root_gb > 0:
instance_folder = vi.instance.uuid
root_disk_name = "%s.vmdk" % vi.instance.uuid
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
# It is pointless to COW a blank disk
linked_clone = False
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, linked_clone)
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
for dco in dcs.objects:
dc_ref = dco.obj
ds_refs = []
prop_dict = vm_util.propset_dict(dco.propSet)
name = prop_dict.get('name')
vmFolder = prop_dict.get('vmFolder')
datastore_refs = prop_dict.get('datastore')
if datastore_refs:
datastore_refs = datastore_refs.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
else:
LOG.debug("Datacenter %s doesn't have any datastore "
"associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
dcs = self._session._call_method(vutil, 'continue_retrieval',
dcs)
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
if self._root_resource_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', self._root_resource_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance.uuid, 'host_name': host_name},
instance=instance)
return ctype.ConsoleVNC(**vnc_console)
def get_mks_console(self, instance):
vm_ref = vm_util.get_vm_ref(self._session, instance)
ticket = self._session._call_method(self._session.vim,
'AcquireTicket',
vm_ref,
ticketType='mks')
thumbprint = ticket.sslThumbprint.replace(':', '').lower()
mks_auth = {'ticket': ticket.ticket,
'cfgFile': ticket.cfgFile,
'thumbprint': thumbprint}
internal_access_path = jsonutils.dumps(mks_auth)
return ctype.ConsoleMKS(ticket.host, ticket.port, internal_access_path)
| apache-2.0 | 4,759,720,369,780,155,000 | 46.017713 | 79 | 0.537571 | false |
albanatita/data-process | ishtarTools.py | 1 | 2664 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 10:49:02 2015
@author: admin
"""
import os, datetime, time
import ConvertFiles
import sqlite3
import readHdf5
def massConversion():
path=r"D:"+os.sep+"DATA"+os.sep+"Acquired_data"
# listFiles=['00823_Data']
iteration=range(10,46)
listeFiles=['01078_Data']
# for i in iteration:
# listeFiles.append('010'+str(i)+'_Data')
# for file in os.listdir(path):
# if file.endswith(".tdms"):
# listeFiles.append(file[0:-5])
#conn=sqlite3.connect('ishtar')
#curs=conn.cursor()
#tblcmd='create table shots (shotnbr int(6),file char(40))'
#curs.execute(tblcmd)
#conn.commit()
for x in listeFiles:
print x
ConvertFiles.convert_tdms(path,x,False)
#
# for x in listFiles2:
class Environment():
def __init__(self):
self.path=r"D:"+os.sep+"DATA"+os.sep+"Acquired_data"
def addDate():
path=r"D:"+os.sep+"DATA"+os.sep+"Acquired_data"
env=Environment()
listeFiles=[]
for file in os.listdir(path):
if file.endswith(".h5"):
listeFiles.append(file[0:-3])
for file in listeFiles:
try:
timei=time.ctime(os.path.getmtime(path+os.sep+file+'.tdms'))
readHdf5.saveAttr(file,'date',timei,env)
except:
print file+'.tdms not found'
def addMagneticField():
path=r"D:"+os.sep+"DATA"+os.sep+"Acquired_data"
env=Environment()
listeFiles=[]
for file in os.listdir(path):
if file.endswith(".h5"):
listeFiles.append(file[0:-3])
for file in listeFiles:
try:
readHdf5.saveAttr(file,'date',timei,env)
except:
print file+'.tdms not found'
def addWincc():
path=r"D:"+os.sep+"DATA"+os.sep+"Acquired_data"
env=Environment()
listeFiles=[]
for file in os.listdir(path):
#print file[0:2]
if file.endswith(".csv") and file[0:2]=='Is':
#listeFiles.append(file[0:-3])
inputfile=open(path+os.sep+file)
#inputfile.next()
print inputfile.readline()
#timei=datetime.datetime.strptime(inputfile.readline()[13:-1],'%d.%m.%Y %H:%M:%S')
timei=inputfile.readline()[13:-1]
print timei
h5file='0'+file[7:-4]+'_Data'
print h5file
readHdf5.saveAttr(h5file,'date',timei,env)
if __name__=='__main__':
#massConversion()
#addMagneticField()
#addWincc() | gpl-2.0 | 3,389,679,518,884,044,300 | 26.666667 | 94 | 0.541291 | false |
vir-mir/aiovalidator | aiovalidator/middlewares/validator.py | 1 | 1514 | import asyncio
import itertools
import json
import sys
from functools import wraps
from aiovalidator.fields.base import BaseField
from aiovalidator.fields.manager import ManagerField
PY_35 = sys.version_info >= (3, 5)
if PY_35:
from json import JSONDecodeError
else:
JSONDecodeError = ValueError
__all__ = ['validator_factory']
def _loads(data):
try:
return json.loads(data)
except JSONDecodeError:
return {}
def validator_factory(loads=_loads):
@asyncio.coroutine
def validator(app, handler):
if getattr(handler, 'skip_validate', False):
return handler
cls_field = getattr(handler, 'Field', None)
if not cls_field:
return handler
else:
fields = (
(name, getattr(cls_field, name))
for name in dir(cls_field)
if isinstance(getattr(cls_field, name), BaseField)
)
load = getattr(handler, 'validator_loads', None) or loads
@wraps(handler)
@asyncio.coroutine
def wrapper(request):
data = dict(itertools.chain(
request.match_info.items(),
request.GET.items(),
load((yield from request.text())).items()))
manager = ManagerField(fields, request, data)
yield from manager.init()
request['fields'] = manager.manager_dict
return (yield from handler(request))
return wrapper
return validator
| apache-2.0 | -7,801,116,108,073,615,000 | 24.233333 | 66 | 0.597754 | false |
josircg/raizcidadanista | raizcidadanista/financeiro/migrations/0009_auto__chg_field_despesa_valor__chg_field_projeto_orcamento.py | 1 | 18530 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Despesa.valor'
db.alter_column('financeiro_despesa', 'valor', self.gf('utils.fields.BRDecimalField')(max_digits=14, decimal_places=2))
# Changing field 'Projeto.orcamento'
db.alter_column('financeiro_projeto', 'orcamento', self.gf('utils.fields.BRDecimalField')(max_digits=16, decimal_places=2))
def backwards(self, orm):
# Changing field 'Despesa.valor'
db.alter_column('financeiro_despesa', 'valor', self.gf('django.db.models.fields.DecimalField')(max_digits=14, decimal_places=2))
# Changing field 'Projeto.orcamento'
db.alter_column('financeiro_projeto', 'orcamento', self.gf('django.db.models.fields.DecimalField')(max_digits=16, decimal_places=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cadastro.membro': {
'Meta': {'ordering': "['nome']", 'object_name': 'Membro', '_ormbases': ['cadastro.Pessoa']},
'aprovador': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro_aprovador'", 'null': 'True', 'to': "orm['auth.User']"}),
'assinado': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'atividade_profissional': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'contrib_prox_pgto': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contrib_tipo': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'contrib_valor': ('utils.fields.BRDecimalField', [], {'default': '0', 'max_digits': '7', 'decimal_places': '2'}),
'cpf': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dt_prefiliacao': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dtnascimento': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'endereco': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'endereco_cep': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'endereco_complemento': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'endereco_num': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'estadocivil': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'facebook_access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'filiacao_partidaria': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filiado': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fundador': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'municipio_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'municipio_naturalidade': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome_da_mae': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'pessoa_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cadastro.Pessoa']", 'unique': 'True', 'primary_key': 'True'}),
'rg': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'secao_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'titulo_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'twitter_id': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uf_eleitoral': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']", 'null': 'True', 'blank': 'True'}),
'uf_naturalidade': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'uf_naturalidade'", 'null': 'True', 'to': "orm['municipios.UF']"}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'membro'", 'null': 'True', 'to': "orm['auth.User']"}),
'zona_eleitoral': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'})
},
'cadastro.pessoa': {
'Meta': {'ordering': "['nome']", 'object_name': 'Pessoa'},
'apelido': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'celular': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'dtcadastro': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'residencial': ('django.db.models.fields.CharField', [], {'max_length': '14', 'null': 'True', 'blank': 'True'}),
'sexo': ('django.db.models.fields.CharField', [], {'default': "'O'", 'max_length': '1'}),
'status_email': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'uf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['municipios.UF']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'financeiro.conta': {
'Meta': {'ordering': "('conta',)", 'object_name': 'Conta'},
'ativa': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'conta': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nota': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '1'})
},
'financeiro.deposito': {
'Meta': {'ordering': "['dt']", 'object_name': 'Deposito', '_ormbases': ['financeiro.Operacao']},
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'receita': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Receita']", 'null': 'True', 'blank': 'True'})
},
'financeiro.despesa': {
'Meta': {'object_name': 'Despesa'},
'documento': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'dtemissao': ('django.db.models.fields.DateField', [], {}),
'dtvencimento': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fornecedor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Fornecedor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'integral': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'observacoes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tipo_despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '14', 'decimal_places': '2'})
},
'financeiro.fornecedor': {
'Meta': {'ordering': "('nome',)", 'object_name': 'Fornecedor'},
'ativo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dados_financeiros': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identificador': ('django.db.models.fields.CharField', [], {'max_length': '14'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'servico_padrao': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'})
},
'financeiro.metaarrecadacao': {
'Meta': {'object_name': 'MetaArrecadacao'},
'data_inicial': ('django.db.models.fields.DateField', [], {}),
'data_limite': ('django.db.models.fields.DateField', [], {}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'financeiro.operacao': {
'Meta': {'ordering': "['dt']", 'object_name': 'Operacao'},
'conferido': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'conta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'dt': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'obs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'referencia': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'valor': ('django.db.models.fields.DecimalField', [], {'max_digits': '14', 'decimal_places': '2'})
},
'financeiro.pagamento': {
'Meta': {'ordering': "['dt']", 'object_name': 'Pagamento', '_ormbases': ['financeiro.Operacao']},
'despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Despesa']", 'null': 'True', 'blank': 'True'}),
'fornecedor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Fornecedor']"}),
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'projeto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Projeto']", 'null': 'True', 'blank': 'True'}),
'tipo_despesa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.TipoDespesa']", 'null': 'True', 'blank': 'True'})
},
'financeiro.periodocontabil': {
'Meta': {'ordering': "['ciclo']", 'object_name': 'PeriodoContabil'},
'ciclo': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publico': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'financeiro.projeto': {
'Meta': {'object_name': 'Projeto'},
'ativo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'descricao': ('django.db.models.fields.TextField', [], {}),
'dtfim': ('django.db.models.fields.DateField', [], {}),
'dtinicio': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'orcamento': ('utils.fields.BRDecimalField', [], {'max_digits': '16', 'decimal_places': '2'}),
'responsavel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'financeiro.receita': {
'Meta': {'ordering': "('conta__conta',)", 'object_name': 'Receita'},
'colaborador': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Membro']", 'null': 'True', 'blank': 'True'}),
'conta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'dtaviso': ('django.db.models.fields.DateField', [], {}),
'dtpgto': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nota': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valor': ('utils.fields.BRDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'financeiro.tipodespesa': {
'Meta': {'object_name': 'TipoDespesa'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'descricao_breve': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'financeiro.transferencia': {
'Meta': {'ordering': "['dt']", 'object_name': 'Transferencia', '_ormbases': ['financeiro.Operacao']},
'destino': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Conta']"}),
'operacao_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['financeiro.Operacao']", 'unique': 'True', 'primary_key': 'True'}),
'transf_associada': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['financeiro.Transferencia']", 'null': 'True', 'blank': 'True'})
},
'municipios.uf': {
'Meta': {'ordering': "(u'nome',)", 'object_name': 'UF'},
'id_ibge': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regiao': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'uf': ('django.db.models.fields.CharField', [], {'max_length': '2'})
}
}
complete_apps = ['financeiro'] | gpl-3.0 | 4,204,574,867,102,121,500 | 81.36 | 184 | 0.549973 | false |
OmeGak/indico-plugins | importer_invenio/indico_importer_invenio/zodbimport.py | 1 | 1541 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.console import cformat
from indico_zodbimport import Importer, convert_to_unicode
from indico_importer_invenio.plugin import ImporterInvenioPlugin
class InvenioImporter(Importer):
plugins = {'importer', 'importer_invenio'}
def migrate(self):
self.migrate_settings()
def migrate_settings(self):
print cformat('%{white!}migrating settings')
ImporterInvenioPlugin.settings.delete_all()
opts = self.zodb_root['plugins']['importer']._PluginType__plugins['invenio']._PluginBase__options
ImporterInvenioPlugin.settings.set('server_url',
convert_to_unicode(opts['location']._PluginOption__value).strip())
db.session.commit()
| gpl-3.0 | 3,441,702,454,702,315,000 | 39.552632 | 109 | 0.722258 | false |
mazvv/travelcrm | travelcrm/forms/vats.py | 1 | 3033 | # -*-coding: utf-8 -*-
import colander
from . import(
Date,
SelectInteger,
ResourceSchema,
BaseForm,
BaseSearchForm,
BaseAssignForm,
)
from ..resources.vats import VatsResource
from ..models import DBSession
from ..models.vat import Vat
from ..models.service import Service
from ..models.account import Account
from ..models.note import Note
from ..models.task import Task
from ..lib.qb.vats import VatsQueryBuilder
from ..lib.utils.security_utils import get_auth_employee
from ..lib.utils.common_utils import translate as _
@colander.deferred
def date_validator(node, kw):
request = kw.get('request')
def validator(node, value):
vat = (
DBSession.query(Vat)
.filter(
Vat.date == value,
Vat.account_id == request.params.get('account_id'),
Vat.service_id == request.params.get('service_id'),
)
.first()
)
if (
vat
and str(vat.id) != request.params.get('id')
):
raise colander.Invalid(
node,
_(u'Vat for this date exists'),
)
return colander.All(validator,)
class _VatSchema(ResourceSchema):
date = colander.SchemaNode(
Date(),
validator=date_validator,
)
account_id = colander.SchemaNode(
SelectInteger(Account),
)
service_id = colander.SchemaNode(
SelectInteger(Service),
)
vat = colander.SchemaNode(
colander.Decimal('.01'),
validator=colander.Range(min=0, max=100),
)
calc_method = colander.SchemaNode(
colander.String()
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
missing=None
)
class VatForm(BaseForm):
_schema = _VatSchema
def submit(self, vat=None):
if not vat:
vat = Vat(
resource=VatsResource.create_resource(
get_auth_employee(self.request)
)
)
else:
vat.resource.notes = []
vat.resource.tasks = []
vat.date = self._controls.get('date')
vat.account_id = self._controls.get('account_id')
vat.service_id = self._controls.get('service_id')
vat.vat = self._controls.get('vat')
vat.calc_method = self._controls.get('calc_method')
vat.descr = self._controls.get('descr')
for id in self._controls.get('note_id'):
note = Note.get(id)
vat.resource.notes.append(note)
for id in self._controls.get('task_id'):
task = Task.get(id)
vat.resource.tasks.append(task)
return vat
class VatSearchForm(BaseSearchForm):
_qb = VatsQueryBuilder
class VatAssignForm(BaseAssignForm):
def submit(self, ids):
for id in ids:
vat = Vat.get(id)
vat.resource.maintainer_id = self._controls.get(
'maintainer_id'
)
| gpl-3.0 | -5,373,886,258,973,040,000 | 25.840708 | 68 | 0.572371 | false |
DaveMDS/epymc | epymc/sdb.py | 1 | 5661 | #!/usr/bin/env python
# This Python file uses the following encoding: utf-8
#
# Copyright (C) 2010-2018 Davide Andreoli <[email protected]>
#
# This file is part of EpyMC, an EFL based Media Center written in Python.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function
import sys
import os
import shelve
import glob
from queue import Queue
from efl import ecore
from epymc import utils
from epymc.gui import EmcDialog
def DBG(msg):
# print('SDB: %s' % msg)
pass
_queue: Queue
_queue_timer: ecore.Timer
_instances = []
class EmcDatabase(object):
""" TODO doc this """
def __init__(self, name, version=None):
self._name = name
self._vers = version
self._vkey = '__database__version__'
self._sync_timer = None
# build the db name (different db for py2 and py3)
dbname = os.path.join(utils.user_conf_dir,
'db_py%d_%s' % (sys.version_info[0], name))
DBG('Open db: ' + name + ' from file: ' + dbname)
# check if the db exist (or is the first time we use it)
first_run = False if glob.glob(dbname + '*') else True
# open the shelve
self._sh = shelve.open(dbname)
if (not first_run) and (version is not None) and (self.get_version() != version):
# the db is outdated
text = _(
'<b>The database %s is outdated!</b><br><br>'
'The old file has been renamed with a .backup extension and a new (empty) '
'one has been created.<br><br>'
'Sorry for the incovenience.') % name
EmcDialog(style='warning', title=_('EpyMC Database'), text=text)
# close the shelve
self._sh.close()
# rename db files to .backup
for fname in glob.glob(dbname + '*'):
os.rename(fname, fname + '.backup')
# reopen a new (empty) shelve
self._sh = shelve.open(dbname)
if version is not None:
# store the version inside the db
self._sh[self._vkey] = version
_instances.append(self)
def _close(self):
DBG('Closing database %s' % self._name)
if self._sync_timer is not None:
self._sync_timer.delete()
self._sync_timer = None
self._sh.close()
def __len__(self):
if self._vers:
return len(self._sh) - 1
else:
return len(self._sh)
def __contains__(self, key):
return key in self._sh
def __iter__(self):
return self.items()
def items(self):
for k, v in self._sh.items():
if k != self._vkey:
yield k, v
def keys(self):
if self._vers:
return [k for k in self._sh.keys() if k != self._vkey]
else:
return self._sh.keys()
def get_data(self, key):
DBG('Get Data for db: %s, key: %s' % (self._name, key))
return self._sh[key]
def set_data(self, key, data, thread_safe=False):
DBG('Set data for db: %s, id: %s' % (self._name, key))
if thread_safe:
# just put in the queue
_queue.put((self, key, data))
else:
# update the db now
self._sh[key] = data
self._delayed_sync()
def del_data(self, key):
if key in self._sh:
del self._sh[key]
self._delayed_sync()
def id_exists(self, key):
return key in self._sh
def get_version(self):
if self._vkey in self._sh:
return self._sh[self._vkey]
def dump(self):
import pprint
print('=' * 60)
print('DB NAME: "{}" - VERSION: {}'.format(self._name, self._vers))
print('=' * 60)
for key in self._sh.keys():
print('\nDB KEY: "{}"'.format(key))
pprint.pprint(self._sh[key])
print('=' * 60)
def _delayed_sync(self):
if self._sync_timer is None:
self._sync_timer = ecore.Timer(5.0, self._sync_timer_cb)
else:
self._sync_timer.reset()
def _sync_timer_cb(self):
DBG("Syncing database %s" % self._name)
self._sh.sync()
self._sync_timer = None
return ecore.ECORE_CALLBACK_CANCEL
##################
def init():
global _queue
global _queue_timer
_queue = Queue()
_queue_timer = ecore.Timer(0.2, _process_queue)
def shutdown():
global _queue
global _queue_timer
_queue_timer.delete()
del _queue
for db in _instances:
db._close()
def _process_queue():
global _queue
if _queue.empty():
return True
count = 10
# DBG("Queue size: " + str(_queue.qsize()))
while not _queue.empty() and count > 0:
# DBG('Queue processing...count:%d len:%d' % (count, _queue.qsize()))
count -= 1
(db, key, data) = _queue.get_nowait()
db._sh[key] = data
db._delayed_sync()
return ecore.ECORE_CALLBACK_RENEW
| gpl-3.0 | -2,831,570,294,776,829,400 | 26.480583 | 91 | 0.561208 | false |
dstenb/pylaunchr-svtplay | format.py | 1 | 1106 | import datetime
def format_published_at(episode):
if not episode.published_at:
return ""
elif episode.published_today():
format = "Published today %H:%M"
return episode.published_at.strftime(format)
elif episode.published_yesterday():
format = "Published yesterday %H:%M"
return episode.published_at.strftime(format)
else:
format = "Published %d %B"
return episode.published_at.strftime(format)
def format_duration(episode):
duration = episode.duration
if not duration:
return ""
def seconds():
return "%d sec" % (duration.seconds % 60, )
def minutes():
return "%d min" % ((duration.seconds // 60) % 60, )
def minutes_and_seconds():
if (duration.seconds % 60) == 0:
return minutes()
else:
return minutes() + " " + seconds()
if duration < datetime.timedelta(minutes=1):
return seconds()
elif duration < datetime.timedelta(hours=1):
return minutes_and_seconds()
else:
return "%d h " + minutes_and_seconds()
| mit | 4,477,413,895,579,498,500 | 25.97561 | 59 | 0.597649 | false |
deltachat/deltachat-pages | tools/create-local-help.py | 1 | 4317 | #!/usr/bin/env python3
# the structure of the help files is:
# - ANY_DIR/help/LANG/help.html (files generated by deltachat-pages)
# - ANY_DIR/help/help.css (file is should be provided by deltachat-UI, not generated by deltachat-pages)
from shutil import copyfile
import sys
import os
import re
# list all files that should go to the local help here.
# the path should be the path used eg. in the <img> tag.
linked_files = ["assets/home/delta-what-optim.png"]
def read_file(filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def write_file(filename, content):
f = open(filename, 'w')
f.write(content)
f.close()
def generate_file(srcdir, destdir, lang, file, add_top_links):
print("generate local help in " + destdir + "/" + lang + "/" + file)
content = read_file(srcdir + "/" + lang + "/" + file)
content = re.sub(r"^.*<div id=\"content\">.*<h1>.*?</h1>.*?<ul.*?>",
"<!DOCTYPE html>\n"
+ "<html>"
+ "<head>"
+ "<meta charset=\"UTF-8\" />"
+ "<meta name=\"viewport\" content=\"initial-scale=1.0\" />"
+ "<link rel=\"stylesheet\" href=\"../help.css\" />"
+ "</head>"
+ "<body>"
+ "<ul id=\"top\">",
content,
flags=re.MULTILINE|re.DOTALL)
content = re.sub(r"</div>.*?</body>.*</html>.*$",
"</body>"
+ "</html>",
content,
flags=re.MULTILINE|re.DOTALL)
for linked_file in linked_files:
srcfile = "../" + linked_file
destfile = "../" + linked_file.split("/")[-1]
content = re.sub(srcfile, destfile, content)
if add_top_links:
top_link = "<p class=\"back\"><a href=\"#top\">^</a></p>"
content = re.sub(r"<h([234].*?)>",
top_link + "<h\\1>",
content,
flags=re.MULTILINE|re.DOTALL) + top_link
write_file(destdir + "/" + lang + "/" + file, content)
def generate_lang(srcdir, destdir, lang, add_top_links):
os.makedirs(destdir + "/" + lang, exist_ok=True)
generate_file(srcdir, destdir, lang, "help.html", add_top_links)
def generate_help(srcdir, destdir, add_top_links=False):
generate_lang(srcdir, destdir, "cs", add_top_links)
generate_lang(srcdir, destdir, "de", add_top_links)
generate_lang(srcdir, destdir, "en", add_top_links)
generate_lang(srcdir, destdir, "es", add_top_links)
generate_lang(srcdir, destdir, "fr", add_top_links)
generate_lang(srcdir, destdir, "id", add_top_links)
generate_lang(srcdir, destdir, "it", add_top_links)
generate_lang(srcdir, destdir, "pl", add_top_links)
generate_lang(srcdir, destdir, "nl", add_top_links)
generate_lang(srcdir, destdir, "ru", add_top_links)
generate_lang(srcdir, destdir, "sq", add_top_links)
generate_lang(srcdir, destdir, "uk", add_top_links)
generate_lang(srcdir, destdir, "zh_CN", add_top_links)
for linked_file in linked_files:
srcfile = srcdir + "/" + linked_file
destfile = destdir + "/" + linked_file.split("/")[-1]
print("copy " + srcfile + " to " + destfile)
copyfile(srcfile, destfile)
if __name__ == "__main__":
if len(sys.argv) < 3:
raise SystemExit("usage: create-local-help.py INPUT_DIR OUTPUT_DIR [--add-top-links]"
+"\n eg. create-local-help.py _site ../foobar")
srcdir = sys.argv[1]
print("using source directory: " + srcdir)
destdir = sys.argv[2]
print("using destination directory: " + destdir)
add_top_links = False
if len(sys.argv) == 4 and sys.argv[3] == "--add-top-links":
add_top_links = True
print("add links back to top of file: yes")
else:
print("add links back to top of file: no")
if not os.path.isdir(srcdir):
raise SystemExit("Error: " + srcdir + " is no existent directory.")
if not os.path.isdir(destdir):
raise SystemExit("Error: " + destdir + " is no existent directory.")
generate_help(srcdir, destdir, add_top_links=add_top_links) | gpl-3.0 | 219,344,217,322,062,800 | 34.105691 | 105 | 0.552235 | false |
mibofra/olifant | gui.py | 1 | 26152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Sat Mar 31 15:44:43 2012
import wx
import os
import sys
from olifant import Olifant
from olifantException import OlifantException
# begin wxGlade: extracode
# end wxGlade
def showError(label,text):
dial = wx.MessageDialog(None, text , label , wx.ICON_ERROR)
dial.ShowModal()
class DialogUSbSelection(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: DialogUSbSelection.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.UsbSelectionLabel = wx.StaticText(self, -1, "Select Usb Key", style=wx.ALIGN_CENTRE)
self.UsbSelectCombobox = wx.ComboBox(self, -1, choices=[], style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: DialogUSbSelection.__set_properties
self.SetTitle("UsbSelect")
# end wxGlade
def __do_layout(self):
# begin wxGlade: DialogUSbSelection.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(self.UsbSelectionLabel, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
sizer_1.Add(self.UsbSelectCombobox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 5)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
# end of class DialogUSbSelection
class MyFrame(wx.Frame):
olifant = None #olifant
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.LogoMainFrame_1 = wx.StaticBitmap(self, -1, wx.Bitmap("images/icon.png", wx.BITMAP_TYPE_ANY), style=wx.SIMPLE_BORDER)
self.labelMainFrame_1 = wx.StaticText(self, -1, "Olifant 1.0", style=wx.ALIGN_CENTRE)
self.MonitorModSelectionLabel = wx.StaticText(self, -1, "Select monitoring mode:", style=wx.ALIGN_CENTRE)
self.MonitorModSelectionBox = wx.ComboBox(self, -1, choices=["Password mode", "USB mode", "Strong mode"], style=wx.CB_DROPDOWN | wx.CB_DROPDOWN | wx.CB_READONLY)
self.LockButton = wx.Button(self, -1, "Lock")
self.PowerSupplyCheckbox = wx.CheckBox(self, -1, "power supply")
self.PowerBCheckbox = wx.CheckBox(self, -1, "power button")
self.BatteryModCheckbox = wx.CheckBox(self, -1, "battery mode")
self.ClosedlidModCheckbox = wx.CheckBox(self, -1, "closed lid")
self.window_1 = wx.HyperlinkCtrl(self, -1, "About Olifant", "https://launchpad.net/olifant")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("Olifant")
self.SetSize((436, 316))
self.SetFocus()
self.LogoMainFrame_1.SetMinSize((64, 64))
self.labelMainFrame_1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.MonitorModSelectionBox.SetSelection(0)
# end wxGlade
self.MonitorModSelectionBox.SetEditable(False)
self.PowerSupplyCheckbox.SetValue(True)
self.PowerBCheckbox.SetValue(True)
self.BatteryModCheckbox.SetValue(True)
self.ClosedlidModCheckbox.SetValue(True)
self.Bind(wx.EVT_BUTTON, self.OnLockClick, self.LockButton)
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizerMainFrame = wx.BoxSizer(wx.VERTICAL)
GridMainFrame = wx.GridSizer(1, 4, 0, 0)
sizerMainFrame.Add((400, 20), 0, 0, 0)
sizerMainFrame.Add(self.LogoMainFrame_1, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizerMainFrame.Add(self.labelMainFrame_1, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizerMainFrame.Add((400, 20), 0, 0, 0)
sizerMainFrame.Add(self.MonitorModSelectionLabel, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
sizerMainFrame.Add(self.MonitorModSelectionBox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10)
sizerMainFrame.Add(self.LockButton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
GridMainFrame.Add(self.PowerSupplyCheckbox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridMainFrame.Add(self.PowerBCheckbox, 0, wx.ALL | wx.ALIGN_RIGHT | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridMainFrame.Add(self.BatteryModCheckbox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridMainFrame.Add(self.ClosedlidModCheckbox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
sizerMainFrame.Add(GridMainFrame, 1, wx.EXPAND, 0)
sizerMainFrame.Add(self.window_1, 0, wx.EXPAND | wx.ALIGN_RIGHT | wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizerMainFrame)
sizerMainFrame.SetSizeHints(self)
self.Layout()
self.Centre()
# end wxGlade
def OnLockClick(self, event):
alarms = []
if self.PowerSupplyCheckbox.IsChecked():
alarms.append(Olifant.AC_ALARM)
if self.PowerBCheckbox.IsChecked():
alarms.append(Olifant.POWER_BUTTON_ALARM)
if self.BatteryModCheckbox.IsChecked():
alarms.append(Olifant.BATTERY_ALARM)
if self.ClosedlidModCheckbox.IsChecked():
alarms.append(Olifant.LID_OPENED_ALARM)
if len(alarms) == 0:
showError('Warning','You have all 3 options disabled, are you sure?Olifant will just do nothin')
choice = self.MonitorModSelectionBox.GetCurrentSelection()
try:
if choice == 0:
self.olifant = Olifant(Olifant.PASSWD_MODE,alarms)
elif choice == 1:
self.olifant = Olifant(Olifant.USB_MODE,alarms)
elif choice == 2:
self.olifant = Olifant(Olifant.STRONG_MODE,alarms)
else:
showError('Wrong Selection','Olifant option unknown')
if choice == 0:
passdlg = MyDialog2(self,-1)
passdlg.clearAll() #TODO we need this because of a bug
passdlg.ShowModal()
pwd = passdlg.getPasswd()
if pwd == '':
showError('Error!','password cannot be empty')
else:
try:
self.olifant.lock(pwd)
"""
FlagList = ['FULLSCREEN_NOMENUBAR',
'FULLSCREEN_NOTOOLBAR',
'FULLSCREEN_NOSTATUSBAR',
'FULLSCREEN_NOBORDER',
'FULLSCREEN_NOCAPTION',
'FULLSCREEN_ALL']
self.ShowFullScreen(True,FlagList)
"""
activedlg = MyDialog(self,-1)
activedlg.setOlifant(self.olifant)
activedlg.setParentFrame(self)
activedlg.ShowModal()
except OlifantException as ex:
showError('Olifant exception',ex.getMessage())
else:
showError('Error','Not supported yet')
except OlifantException as ex:
showError('Olifant exception',ex.getMessage())
# end of class MyFrame
"""
useless one!!
"""
class MyDialog2(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog2.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.ChoosePswdLabel = wx.StaticText(self, -1, "Choose password", style=wx.ALIGN_CENTRE)
self.ChoosePswdBox = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD)
self.ConfirmPswdLabel = wx.StaticText(self, -1, "Confirm password", style=wx.ALIGN_CENTRE)
self.ConfirmPswdBox = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD)
self.KeypadButton_1 = wx.Button(self, -1, "1")
self.KeypadButton_2 = wx.Button(self, -1, "2")
self.KeypadButton_3 = wx.Button(self, -1, "3")
self.KeypadButton_4 = wx.Button(self, -1, "4")
self.KeypadButton_5 = wx.Button(self, -1, "5")
self.KeypadButton_6 = wx.Button(self, -1, "6")
self.KeypadButton_7 = wx.Button(self, -1, "7")
self.KeypadButton_8 = wx.Button(self, -1, "8")
self.KeypadButton_9 = wx.Button(self, -1, "9")
self.KeypadButton_DEL = wx.Button(self, -1, "DEL")
self.KeypadButton_0 = wx.Button(self, -1, "0")
self.KeypadButtonButton_Enable = wx.Button(self, -1, "ENABLE")
self.__set_properties()
self.__do_layout()
# end wxGlade
self.focus = self.ChoosePswdBox
def __set_properties(self):
# begin wxGlade: MyDialog2.__set_properties
self.SetTitle("dialog_3")
self.SetSize((300, 370))
self.ChoosePswdLabel.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Ubuntu"))
self.ChoosePswdBox.SetMinSize((150, 30))
self.ConfirmPswdLabel.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Ubuntu"))
self.ConfirmPswdBox.SetMinSize((150, 30))
self.KeypadButton_1.SetMinSize((50, 50))
self.KeypadButton_2.SetMinSize((50, 50))
self.KeypadButton_3.SetMinSize((50, 50))
self.KeypadButton_4.SetMinSize((50, 50))
self.KeypadButton_5.SetMinSize((50, 50))
self.KeypadButton_6.SetMinSize((50, 50))
self.KeypadButton_7.SetMinSize((50, 50))
self.KeypadButton_8.SetMinSize((50, 50))
self.KeypadButton_9.SetMinSize((50, 50))
self.KeypadButton_DEL.SetMinSize((50, 50))
self.KeypadButton_0.SetMinSize((50, 50))
self.KeypadButtonButton_Enable.SetMinSize((50, 50))
self.KeypadButtonButton_Enable.SetFont(wx.Font(7, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
# end wxGlade
self.ChoosePswdBox.SetEditable(False)
self.KeypadButton_1.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_2.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_3.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_4.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_5.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_6.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_7.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_8.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_9.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_DEL.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButton_0.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.KeypadButtonButton_Enable.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.ChoosePswdBox.Bind(wx.EVT_SET_FOCUS, self.__onPswdBoxFocused)
self.ConfirmPswdBox.Bind(wx.EVT_SET_FOCUS, self.__onPswdBoxFocused)
#self.choose_pswd.SetEditable(False)
def __do_layout(self):
# begin wxGlade: MyDialog2.__do_layout
sizerPswdDialog = wx.BoxSizer(wx.VERTICAL)
GridPswdDialog_4 = wx.GridSizer(2, 3, 0, 0)
GridPswdDialog_3 = wx.GridSizer(2, 3, 0, 0)
GridPswdDialog_2 = wx.GridSizer(1, 3, 0, 0)
GridPswdDialog_1 = wx.GridSizer(1, 3, 0, 0)
sizerPswdDialog.Add(self.ChoosePswdLabel, 0, wx.TOP | wx.ALIGN_CENTER_HORIZONTAL, 10)
sizerPswdDialog.Add(self.ChoosePswdBox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 6)
sizerPswdDialog.Add(self.ConfirmPswdLabel, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
sizerPswdDialog.Add(self.ConfirmPswdBox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 6)
GridPswdDialog_1.Add(self.KeypadButton_1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_1.Add(self.KeypadButton_2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_1.Add(self.KeypadButton_3, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizerPswdDialog.Add(GridPswdDialog_1, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridPswdDialog_2.Add(self.KeypadButton_4, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_2.Add(self.KeypadButton_5, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_2.Add(self.KeypadButton_6, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizerPswdDialog.Add(GridPswdDialog_2, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridPswdDialog_3.Add(self.KeypadButton_7, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_3.Add(self.KeypadButton_8, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_3.Add(self.KeypadButton_9, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizerPswdDialog.Add(GridPswdDialog_3, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
GridPswdDialog_4.Add(self.KeypadButton_DEL, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_4.Add(self.KeypadButton_0, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
GridPswdDialog_4.Add(self.KeypadButtonButton_Enable, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizerPswdDialog.Add(GridPswdDialog_4, 1, wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
self.SetSizer(sizerPswdDialog)
self.Layout()
# end wxGlade
def __onKeyClick(self,evt):
button = (evt.GetEventObject()).Label
passwd = self.focus.GetValue()
if button == 'DEL':
if len(passwd) > 0:
passwd = passwd[:-1]
self.focus.SetValue(passwd)
elif button == 'ENABLE':
self.Close()
else:
passwd += button
self.focus.SetValue(passwd)
def __onPswdBoxFocused(self, evt):
self.focus = evt.GetEventObject()
def getPasswd(self):
if self.ConfirmPswdBox.GetValue() != self.ChoosePswdBox.GetValue():
showError('Error!','Password and confirmation do not match.')
else:
return self.ChoosePswdBox.GetValue()
def clearAll(self):
self.ChoosePswdBox.SetValue("")
self.ConfirmPswdBox.SetValue("")
"""
def OnAlarmClick(self, event, ol):
passvalue_1 = self.pass1.GetValue()
passvalue_2 = self.pass2.GetValue()
if passvalue_1 == passvalue_2:
try:
olifant.lock(passvalue_1)
almdlg = MyDialog(self, -1)
almdlg.Lock_copy.Bind(EVT_BUTTON, almdlg.OnClick, olifant, passvalue1)
almdlg.Destroy()
except OlifantException as ex:
print ex.getMessage() #TODO far comparire box di errore
else:
print "Le password sono diverse." #TODO far comparire dialog box di errore
"""
# end of class MyDialog2
class MyPanel(wx.Panel):
def __init__(self, *args, **kwds):
# begin wxGlade: MyPanel.__init__
kwds["style"] = wx.TAB_TRAVERSAL
wx.Panel.__init__(self, *args, **kwds)
self.AboutLogo = wx.StaticBitmap(self, -1, wx.Bitmap("images/icon.png", wx.BITMAP_TYPE_ANY))
self.AboutLabel_1 = wx.StaticText(self, -1, "Olifant 1.0", style=wx.ALIGN_CENTRE)
self.AboutLabel_2 = wx.StaticText(self, -1, "https://launchpad.net/olifant", style=wx.ALIGN_CENTRE)
self.AboutLabel_3 = wx.StaticText(self, -1, "author")
self.AboutLabel_4 = wx.StaticText(self, -1, "kokito\n(jumba@LP)", style=wx.ALIGN_CENTRE)
self.AboutLabel_5 = wx.StaticText(self, -1, "Actual developers")
self.AboutLabel_6 = wx.StaticText(self, -1, "Cristian_C\nSquall867")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyPanel.__set_properties
self.SetSize((312, 312))
self.AboutLogo.SetMinSize((16, 16))
self.AboutLabel_1.SetFont(wx.Font(14, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.AboutLabel_2.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.AboutLabel_3.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.AboutLabel_5.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyPanel.__do_layout
AboutSizer = wx.BoxSizer(wx.VERTICAL)
AboutSizer.Add(self.AboutLogo, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 8)
AboutSizer.Add(self.AboutLabel_1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
AboutSizer.Add(self.AboutLabel_2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
AboutSizer.Add(self.AboutLabel_3, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
AboutSizer.Add(self.AboutLabel_4, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
AboutSizer.Add(self.AboutLabel_5, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
AboutSizer.Add(self.AboutLabel_6, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
self.SetSizer(AboutSizer)
# end wxGlade
# end of class MyPanel
class MyDialog(wx.Dialog):
olifant = None
parentFrame = None
def __init__(self,*args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.LogoActivated = wx.StaticBitmap(self, -1, wx.Bitmap("images/icon.png", wx.BITMAP_TYPE_ANY))
self.ActivatedLabel_1 = wx.StaticText(self, -1, "Olifant 1.0", style=wx.ALIGN_CENTRE)
self.ActivatedLabel_2 = wx.StaticText(self, -1, "ALARM ACTIVATED", style=wx.ALIGN_CENTRE)
self.AlarmActivatedUnlockButton = wx.Button(self, -1, "Unlock")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetTitle("dialog_1")
self.LogoActivated.SetMinSize((16, 16))
self.ActivatedLabel_1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.ActivatedLabel_2.SetForegroundColour(wx.Colour(255, 0, 0))
self.ActivatedLabel_2.SetFont(wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
# end wxGlade
self.AlarmActivatedUnlockButton.Bind(wx.EVT_BUTTON,self.OnUnlockClick)
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
SizerAlarmActivated = wx.BoxSizer(wx.VERTICAL)
SizerAlarmActivated.Add((400, 30), 0, 0, 0)
SizerAlarmActivated.Add(self.LogoActivated, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
SizerAlarmActivated.Add(self.ActivatedLabel_1, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
SizerAlarmActivated.Add(self.ActivatedLabel_2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 26)
SizerAlarmActivated.Add((400, 30), 0, 0, 0)
SizerAlarmActivated.Add(self.AlarmActivatedUnlockButton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
SizerAlarmActivated.Add((400, 30), 0, 0, 0)
self.SetSizer(SizerAlarmActivated)
SizerAlarmActivated.Fit(self)
self.Layout()
# end wxGlade
def setOlifant(self,olifant):
self.olifant = olifant
def setParentFrame(self,frame):
self.parentFrame = frame
def OnUnlockClick(self,evt):
passdlg = MyDialog1(self, -1)
passdlg.clearAll() #TODO we need this because of a bug
passdlg.ShowModal()
try:
self.olifant.unlock(passdlg.getPasswd())
self.parentFrame.ShowFullScreen(False)
self.Close()
except OlifantException as ex:
showError('Olifant Exception',ex.getMessage())
# end of class MyDialog
class MyDialog1(wx.Dialog):
passwd = []
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog1.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.UnlockPswdLabel = wx.StaticText(self, -1, "Password", style=wx.ALIGN_CENTRE)
self.UnlockPswdTextbox = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD)
self.UnlockKeypadButton_1 = wx.Button(self, -1, "1")
self.UnlockKeypadButton_2 = wx.Button(self, -1, "2")
self.UnlockKeypadButton_3 = wx.Button(self, -1, "3")
self.UnlockKeypadButton_4 = wx.Button(self, -1, "4")
self.UnlockKeypadButton_5 = wx.Button(self, -1, "5")
self.UnlockKeypadButton_6 = wx.Button(self, -1, "6")
self.UnlockKeypadButton_7 = wx.Button(self, -1, "7")
self.UnlockKeypadButton_8 = wx.Button(self, -1, "8")
self.UnlockKeypadButton_9 = wx.Button(self, -1, "9")
self.UnlockKeypadButton_DEL = wx.Button(self, -1, "DEL")
self.UnlockKeypadButton_0 = wx.Button(self, -1, "0")
self.UnlockKeypadButton_Disable = wx.Button(self, -1, "DISABLE")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog1.__set_properties
self.SetTitle("dialog_2")
self.SetSize((300, 321))
self.UnlockPswdLabel.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Ubuntu"))
self.UnlockPswdTextbox.SetMinSize((150, 30))
self.UnlockKeypadButton_1.SetMinSize((50, 50))
self.UnlockKeypadButton_2.SetMinSize((50, 50))
self.UnlockKeypadButton_3.SetMinSize((50, 50))
self.UnlockKeypadButton_4.SetMinSize((50, 50))
self.UnlockKeypadButton_5.SetMinSize((50, 50))
self.UnlockKeypadButton_6.SetMinSize((50, 50))
self.UnlockKeypadButton_7.SetMinSize((50, 50))
self.UnlockKeypadButton_8.SetMinSize((50, 50))
self.UnlockKeypadButton_9.SetMinSize((50, 50))
self.UnlockKeypadButton_DEL.SetMinSize((50, 50))
self.UnlockKeypadButton_0.SetMinSize((50, 50))
self.UnlockKeypadButton_Disable.SetMinSize((50, 50))
self.UnlockKeypadButton_Disable.SetFont(wx.Font(7, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "Ubuntu"))
# end wxGlade
self.UnlockPswdTextbox.SetEditable(False)
self.UnlockKeypadButton_1.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_2.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_3.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_4.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_5.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_6.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_7.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_8.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_9.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_DEL.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_0.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
self.UnlockKeypadButton_Disable.Bind(wx.EVT_BUTTON,self.__onKeyClick)#,self.button_1_u)
def __do_layout(self):
# begin wxGlade: MyDialog1.__do_layout
UnlockSizer = wx.BoxSizer(wx.VERTICAL)
UnlockGrid_4 = wx.GridSizer(2, 3, 0, 0)
UnlockGrid_3 = wx.GridSizer(2, 3, 0, 0)
UnlockGrid_2 = wx.GridSizer(1, 3, 0, 0)
UnlockGrid_1 = wx.GridSizer(1, 3, 0, 0)
UnlockSizer.Add(self.UnlockPswdLabel, 0, wx.TOP | wx.ALIGN_CENTER_HORIZONTAL, 10)
UnlockSizer.Add(self.UnlockPswdTextbox, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 6)
UnlockGrid_1.Add(self.UnlockKeypadButton_1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_1.Add(self.UnlockKeypadButton_2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_1.Add(self.UnlockKeypadButton_3, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockSizer.Add(UnlockGrid_1, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
UnlockGrid_2.Add(self.UnlockKeypadButton_4, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_2.Add(self.UnlockKeypadButton_5, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_2.Add(self.UnlockKeypadButton_6, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockSizer.Add(UnlockGrid_2, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
UnlockGrid_3.Add(self.UnlockKeypadButton_7, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_3.Add(self.UnlockKeypadButton_8, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_3.Add(self.UnlockKeypadButton_9, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockSizer.Add(UnlockGrid_3, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
UnlockGrid_4.Add(self.UnlockKeypadButton_DEL, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_4.Add(self.UnlockKeypadButton_0, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockGrid_4.Add(self.UnlockKeypadButton_Disable, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
UnlockSizer.Add(UnlockGrid_4, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 10)
self.SetSizer(UnlockSizer)
self.Layout()
# end wxGlade
def __onKeyClick(self,evt):
button = (evt.GetEventObject()).Label
if not ( (button == 'DEL') or (button == 'DISABLE') ):
self.passwd.append(button)
self.UnlockPswdTextbox.SetValue(''.join(self.passwd))
elif button == 'DEL' and (len(self.passwd)>0):
self.passwd.pop()
self.UnlockPswdTextbox.SetValue(''.join(self.passwd))
else:
self.Close()
def getPasswd(self):
return ''.join(self.passwd)
def clearAll(self):
self.passwd = []
"""
def OnAlarmClick(self, event, ol, password):
passvalue = self.pswd.GetValue()
if passvalue == password:
try:
self.olifant.unlock(passvalue)
except OlifantException as ex:
print ex.getMessage() #TODO far comparire box di errore
else:
print "Le password sono diverse." #TODO far comparire dialog box di errore
"""
# end of class MyDialog1
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
Olifant_main = MyFrame(None, -1, "")
app.SetTopWindow(Olifant_main)
Olifant_main.Show()
app.MainLoop()
| gpl-3.0 | 8,529,343,343,583,915,000 | 45.951526 | 169 | 0.66645 | false |
boldprogressives/django-opendebates | opendebates/opendebates/utils.py | 1 | 2781 | import json
import random
from .models import Voter
def get_voter(request):
if request.user.is_authenticated():
try:
voter = request.user.voter
except Voter.DoesNotExist:
return {}
return {'email': request.user.email,
'zip': voter.zip,
}
elif 'voter' in request.session:
return request.session['voter']
def get_headers_from_request(request):
try:
headers = {}
for key in request.META:
if key.startswith("HTTP_"):
headers[key] = request.META[key]
return json.dumps(headers)
except Exception:
return None
def get_ip_address_from_request(request):
PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', '127.')
ip_address = ''
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', '')
if x_forwarded_for and ',' not in x_forwarded_for:
if not x_forwarded_for.startswith(PRIVATE_IPS_PREFIX):
ip_address = x_forwarded_for.strip()
else:
ips = [ip.strip() for ip in x_forwarded_for.split(',')]
for ip in ips:
if ip.startswith(PRIVATE_IPS_PREFIX):
continue
else:
ip_address = ip
break
if not ip_address:
x_real_ip = request.META.get('HTTP_X_REAL_IP', '')
if x_real_ip:
if not x_real_ip.startswith(PRIVATE_IPS_PREFIX):
ip_address = x_real_ip.strip()
if not ip_address:
remote_addr = request.META.get('REMOTE_ADDR', '')
if remote_addr:
if not remote_addr.startswith(PRIVATE_IPS_PREFIX):
ip_address = remote_addr.strip()
if remote_addr.startswith(PRIVATE_IPS_PREFIX):
ip_address = remote_addr.strip()
if not ip_address:
ip_address = '127.0.0.1'
return ip_address
def choose_sort(sort):
sort = sort or random.choice(["trending", "trending", "random"])
return sort
def sort_list(citations_only, sort, ideas):
ideas = ideas.filter(
approved=True,
duplicate_of__isnull=True
).select_related("voter", "category", "voter__user")
if citations_only:
ideas = ideas.filter(citation_verified=True)
if sort == "editors":
ideas = ideas.order_by("-editors_pick")
elif sort == "trending":
ideas = ideas.order_by("-score")
elif sort == "random":
ideas = ideas.order_by("-random_id")
elif sort == "-date":
ideas = ideas.order_by("-created_at")
elif sort == "+date":
ideas = ideas.order_by("created_at")
elif sort == "-votes":
ideas = ideas.order_by("-votes")
elif sort == "+votes":
ideas = ideas.order_by("votes")
return ideas
| apache-2.0 | 4,563,523,322,794,240,000 | 30.247191 | 68 | 0.563107 | false |
timgrossmann/InstaPy | instapy/time_util.py | 1 | 1588 | """Helper module to handle time related stuff"""
from time import sleep as original_sleep
from datetime import datetime
from random import gauss
from random import uniform
# Amount of variance to be introduced
# i.e. random time will be in the range: TIME +/- STDEV %
STDEV = 0.5
sleep_percentage = 1
sleep_percentage = sleep_percentage * uniform(0.9, 1.1)
def randomize_time(mean):
allowed_range = mean * STDEV
stdev = allowed_range / 3 # 99.73% chance to be in the allowed range
t = 0
while abs(mean - t) > allowed_range:
t = gauss(mean, stdev)
return t
def set_sleep_percentage(percentage):
global sleep_percentage
sleep_percentage = percentage / 100
sleep_percentage = sleep_percentage * uniform(0.9, 1.1)
def sleep(t, custom_percentage=None):
if custom_percentage is None:
custom_percentage = sleep_percentage
time = randomize_time(t) * custom_percentage
original_sleep(time)
def sleep_actual(t):
original_sleep(t)
def get_time(labels):
"""To get a use out of this helpful function
catch in the same order of passed parameters"""
if not isinstance(labels, list):
labels = [labels]
results = []
for label in labels:
if label == "this_minute":
results.append(datetime.now().strftime("%M"))
if label == "this_hour":
results.append(datetime.now().strftime("%H"))
elif label == "today":
results.append(datetime.now().strftime("%Y-%m-%d"))
results = results if len(results) > 1 else results[0]
return results
| gpl-3.0 | -4,373,231,225,892,659,700 | 24.612903 | 73 | 0.654912 | false |
PeridotYouClod/gRPC-Makerboards | DaoServer.py | 1 | 2008 | import concurrent.futures as futures
import grpc
import time
import ProtoConfig
import generated.proto_out.dao_pb2 as dao_pb2
import generated.proto_out.dao_pb2_grpc as dao_grpc
from pylibs.Database import Mongo
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Dao(dao_grpc.DaoServicer):
def __init__(self, sensor_db):
super().__init__()
self.sensor_db = sensor_db
def Select(self, request, context):
table = request.table
limit = request.limit
cols = request.cols
print('Got request {\n%s}\n' % (request))
colNames = [col.name for col in cols]
findResult = self.sensor_db.Find(table=table, columns=colNames, limit=limit)
allColValues = {col.name: [] for col in cols} # Col name to list of vals
for doc in findResult:
for col in cols:
# print('%s added to %s' % (doc[col.name], col.name))
allColValues[col.name].append(doc[col.name])
dataColumns = [self._NewDataColumn(colName, vals) for (colName, vals)
in allColValues.items()]
return dao_pb2.SelectReply(columns=dataColumns)
def _NewDataColumn(self, columnName, values):
datacolumn = dao_pb2.DataColumn(name=columnName)
if not values:
print("Warning: No values found.")
elif type(values[0]) is int:
datacolumn.intValues.extend(values)
elif type(values[0]) is str:
datacolumn.stringValues.extend(values)
else:
print("ERROR: Unknown Type!")
return datacolumn
def serve():
protoConfig = ProtoConfig.getConfig()
sensor_db = Mongo()
sensor_db.GetClient() # initalize the Db
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
dao_grpc.add_DaoServicer_to_server(Dao(sensor_db), server)
port = protoConfig.ports.daoPort
server.add_insecure_port('[::]:%s' % port)
server.start()
print('Started Dao Server on Port %s ' % port)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| mit | 6,045,501,900,179,157,000 | 30.375 | 80 | 0.667829 | false |
tind/invenio-communities | tests/test_utils.py | 1 | 2203 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Utility functions tests."""
from __future__ import absolute_import, print_function
from invenio_records.api import Record
from invenio_communities.models import InclusionRequest
from invenio_communities.utils import render_template_to_string
def test_template_formatting_from_string(app):
"""Test formatting of string-based template to string."""
with app.app_context():
out = render_template_to_string("foobar: {{ baz }}", _from_string=True,
**{'baz': 'spam'})
assert out == 'foobar: spam'
def test_email_formatting(app, db, communities, user):
"""Test formatting of the email message with the default template."""
with app.extensions['mail'].record_messages() as outbox:
(comm1, comm2, comm3) = communities
rec1 = Record.create({
'title': 'Foobar and Bazbar',
'description': 'On Foobar, Bazbar and <b>more</b>.'
})
# Request
InclusionRequest.create(community=comm1, record=rec1, user=user)
# Check emails being sent
assert len(outbox) == 1
sent_msg = outbox[0]
assert sent_msg.recipients == [user.email]
assert comm1.title in sent_msg.body
| gpl-2.0 | -8,326,078,723,156,442,000 | 35.716667 | 79 | 0.687245 | false |
viaregio/django-newsletter | newsletter/admin_utils.py | 1 | 1463 | from django.http import Http404
from django.utils.functional import update_wrapper
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.util import unquote
from django.utils.encoding import force_unicode
class ExtendibleModelAdminMixin(object):
def _getobj(self, request, object_id):
opts = self.model._meta
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to
# be able to determine whether a given object exists.
obj = None
if obj is None:
raise Http404(
_(
'%(name)s object with primary key '
'%(key)r does not exist.'
) % {
'name': force_unicode(opts.verbose_name),
'key': unicode(object_id)
}
)
return obj
def _wrap(self, view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
def _view_name(self, name):
info = self.model._meta.app_label, self.model._meta.module_name, name
return '%s_%s_%s' % info
| agpl-3.0 | -6,176,417,406,292,895,000 | 33.023256 | 77 | 0.549556 | false |
datakid/tvet | tafe/migrations/0061_auto__chg_field_course_course_code__add_unique_staffattendance_session.py | 1 | 26055 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Course.course_code'
db.alter_column('tafe_course', 'course_code', self.gf('django.db.models.fields.CharField')(max_length=20))
# Adding unique constraint on 'StaffAttendance', fields ['session', 'staff_member']
db.create_unique('tafe_staffattendance', ['session_id', 'staff_member_id'])
# Adding unique constraint on 'StudentAttendance', fields ['session', 'student']
db.create_unique('tafe_studentattendance', ['session_id', 'student_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentAttendance', fields ['session', 'student']
db.delete_unique('tafe_studentattendance', ['session_id', 'student_id'])
# Removing unique constraint on 'StaffAttendance', fields ['session', 'staff_member']
db.delete_unique('tafe_staffattendance', ['session_id', 'staff_member_id'])
# Changing field 'Course.course_code'
db.alter_column('tafe_course', 'course_code', self.gf('django.db.models.fields.CharField')(max_length=8))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tafe.applicant': {
'Meta': {'ordering': "['first_name', 'surname']", 'object_name': 'Applicant'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'applied_for': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applicants'", 'to': "orm['tafe.Course']"}),
'date_of_application': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_offer_accepted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_offer_sent': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'education_level': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'eligibility': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'experience': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'Tarawa'", 'max_length': "'10'", 'null': 'True', 'blank': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'applicant_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'other_courses': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'applicant_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'phone2': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'ranking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'short_listed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '62', 'blank': 'True'}),
'student_details': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tafe.Student']", 'null': 'True', 'blank': 'True'}),
'successful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'test_ap': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'test_eng': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'test_ma': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.assessment': {
'Meta': {'object_name': 'Assessment'},
'date_due': ('django.db.models.fields.DateField', [], {}),
'date_given': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assessments'", 'to': "orm['tafe.Subject']"})
},
'tafe.course': {
'Meta': {'object_name': 'Course'},
'aqf_level': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'course_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.Enrolment']", 'blank': 'True'}),
'subjects': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['tafe.Subject']"}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'tafe.credential': {
'Meta': {'object_name': 'Credential'},
'aqf_level': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'credential_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credential_last_change_by'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'credential_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'tafe.enrolment': {
'Meta': {'object_name': 'Enrolment'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enrolments'", 'to': "orm['tafe.Course']"}),
'date_ended': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 2, 14, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enrolment_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'mark': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enrolment_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'semester_1_payment': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'semester_1_payment_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'semester_1_payment_receipt': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'semester_2_payment': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'semester_2_payment_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'semester_2_payment_receipt': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '110', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enrolments'", 'to': "orm['tafe.Student']"}),
'withdrawal_reason': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'})
},
'tafe.grade': {
'Meta': {'object_name': 'Grade'},
'date_started': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'grade_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'grade_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grades'", 'to': "orm['tafe.Student']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grades'", 'to': "orm['tafe.Subject']"})
},
'tafe.result': {
'Meta': {'object_name': 'Result'},
'assessment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['tafe.Assessment']"}),
'date_submitted': ('django.db.models.fields.DateField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': "orm['tafe.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'result_last_change_by'", 'to': "orm['auth.User']"}),
'mark': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'result_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"})
},
'tafe.session': {
'Meta': {'object_name': 'Session'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'room_number': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'session_number': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.StudentAttendance']", 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['tafe.Subject']"}),
'timetable': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': "orm['tafe.Timetable']"})
},
'tafe.staff': {
'Meta': {'ordering': "['first_name', 'surname']", 'object_name': 'Staff'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classification': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'credential': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'credentials'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['tafe.Credential']"}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'Tarawa'", 'max_length': "'10'", 'null': 'True', 'blank': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staff_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staff_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'phone2': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '62', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.staffattendance': {
'Meta': {'unique_together': "(('staff_member', 'session'),)", 'object_name': 'StaffAttendance'},
'absent': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'staffattendance_last_change_by'", 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'staffattendance_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '1', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'staffattendance_attendance_records'", 'to': "orm['tafe.Session']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'blank': 'True'}),
'staff_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attendance_records'", 'to': "orm['tafe.Staff']"})
},
'tafe.staffislpr': {
'Meta': {'object_name': 'StaffISLPR'},
'date_tested': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'islpr_listening': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_overall': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_reading': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_speaking': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_writing': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'staff_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'islpr_record'", 'to': "orm['tafe.Staff']"})
},
'tafe.student': {
'Meta': {'ordering': "['first_name', 'surname']", 'object_name': 'Student'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'disability': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'disability_description': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'education_level': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'F'", 'max_length': "'1'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.CharField', [], {'default': "'Tarawa'", 'max_length': "'10'", 'null': 'True', 'blank': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'student_last_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'student_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'phone2': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '62', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'tafe.studentattendance': {
'Meta': {'unique_together': "(('student', 'session'),)", 'object_name': 'StudentAttendance'},
'absent': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_change_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studentattendance_last_change_by'", 'to': "orm['auth.User']"}),
'penultimate_change_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studentattendance_penultimate_change_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '1', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'studentattendance_attendance_records'", 'to': "orm['tafe.Session']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attendance_records'", 'to': "orm['tafe.Student']"})
},
'tafe.studentislpr': {
'Meta': {'object_name': 'StudentISLPR'},
'date_tested': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'islpr_listening': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_overall': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_reading': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_speaking': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'islpr_writing': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'islpr_record'", 'to': "orm['tafe.Student']"})
},
'tafe.subject': {
'Meta': {'ordering': "['name', 'year']", 'object_name': 'Subject'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'semester': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '135'}),
'staff_member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tafe.Staff']", 'null': 'True', 'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['tafe.Student']", 'null': 'True', 'through': "orm['tafe.Grade']", 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'tafe.timetable': {
'Meta': {'unique_together': "(('year', 'term'),)", 'object_name': 'Timetable'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '12'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'term': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
}
}
complete_apps = ['tafe'] | gpl-3.0 | 6,782,769,871,154,029,000 | 87.928328 | 212 | 0.554097 | false |
dferens/django-classsettings | classsettings/settings.py | 1 | 2315 | import inspect
import sys
from operator import itemgetter
from django.utils import six, importlib
def inspect_class(cls):
cls._instance = instance = cls()
module = importlib.import_module(cls.__module__)
public_attributes = []
for attr_name in dir(instance):
if not attr_name.startswith('_'):
value = getattr(instance, attr_name)
value = value() if inspect.ismethod(value) else value
public_attributes.append((attr_name, value))
return public_attributes, module
class SettingsMeta(type):
def __init__(cls, name, bases, attrs):
public_attrs, module = inspect_class(cls)
for attr_name, value in public_attrs:
setattr(module, attr_name, value)
class ConfigMeta(type):
def __new__(cls, name, bases, attrs):
Class = super(ConfigMeta, cls).__new__(cls, name, bases, attrs)
if name == 'NewBase':
return Class
public_attributes, module = inspect_class(Class)
result = ConfigResult(public_attributes, Class)
setattr(module, name, result)
return result
class ConfigResult(dict):
"""
Dict-like object which adds inheritance support.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 2:
# Used to create dict object
return super(ConfigResult, cls).__new__(cls, *args, **kwargs)
else:
# Used as superclass
name, bases, attrs = args
bases = tuple(b.ConfigClass for b in bases if isinstance(b, ConfigResult))
return ConfigMeta(name, bases, attrs)
def __init__(self, *args, **kwargs):
if len(args) == 2:
# Is used as dict instance
dict_arg, self._ConfigClass = args
super(ConfigResult, self).__init__(dict_arg, **kwargs)
else:
# Is used as class
pass
@property
def ConfigClass(self):
return self._ConfigClass
class Settings(six.with_metaclass(SettingsMeta)):
"""
Calls each public method of class and injects it's value into it's
module's scope.
"""
class Config(six.with_metaclass(ConfigMeta)):
"""
Calls each public method of class, constructs dictionary with `name-result`
pairs and replaces class with it.
"""
| mit | 4,555,942,589,989,331,500 | 27.231707 | 86 | 0.606479 | false |
zhinaonet/sqlmap-z | extra/mssqlsig/update.py | 1 | 5109 | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import os
import re
import urllib2
import urlparse
from xml.dom.minidom import Document
# Path to the XML file with signatures
MSSQL_XML = os.path.abspath("../../xml/banner/mssql.xml")
# Url to update Microsoft SQL Server XML versions file from
MSSQL_VERSIONS_URL = "http://www.sqlsecurity.com/FAQs/SQLServerVersionDatabase/tabid/63/Default.aspx"
def updateMSSQLXML():
if not os.path.exists(MSSQL_XML):
errMsg = "[ERROR] file '%s' does not exist. Please run the script from its parent directory" % MSSQL_XML
print errMsg
return
infoMsg = "[INFO] retrieving data from '%s'" % MSSQL_VERSIONS_URL
print infoMsg
try:
req = urllib2.Request(MSSQL_VERSIONS_URL)
f = urllib2.urlopen(req)
mssqlVersionsHtmlString = f.read()
f.close()
except urllib2.URLError:
__mssqlPath = urlparse.urlsplit(MSSQL_VERSIONS_URL)
__mssqlHostname = __mssqlPath[1]
warnMsg = "[WARNING] sqlmap was unable to connect to %s," % __mssqlHostname
warnMsg += " check your Internet connection and retry"
print warnMsg
return
releases = re.findall("class=\"BCC_DV_01DarkBlueTitle\">SQL Server\s(.+?)\sBuilds", mssqlVersionsHtmlString, re.I)
releasesCount = len(releases)
# Create the minidom document
doc = Document()
# Create the <root> base element
root = doc.createElement("root")
doc.appendChild(root)
for index in xrange(0, releasesCount):
release = releases[index]
# Skip Microsoft SQL Server 6.5 because the HTML
# table is in another format
if release == "6.5":
continue
# Create the <signatures> base element
signatures = doc.createElement("signatures")
signatures.setAttribute("release", release)
root.appendChild(signatures)
startIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index])
if index == releasesCount - 1:
stopIdx = len(mssqlVersionsHtmlString)
else:
stopIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index + 1])
mssqlVersionsReleaseString = mssqlVersionsHtmlString[startIdx:stopIdx]
servicepackVersion = re.findall("</td><td>(7\.0|2000|2005|2008|2008 R2)*(.*?)</td><td.*?([\d\.]+)</td>[\r]*\n", mssqlVersionsReleaseString, re.I)
for servicePack, version in servicepackVersion:
if servicePack.startswith(" "):
servicePack = servicePack[1:]
if "/" in servicePack:
servicePack = servicePack[:servicePack.index("/")]
if "(" in servicePack:
servicePack = servicePack[:servicePack.index("(")]
if "-" in servicePack:
servicePack = servicePack[:servicePack.index("-")]
if "*" in servicePack:
servicePack = servicePack[:servicePack.index("*")]
if servicePack.startswith("+"):
servicePack = "0%s" % servicePack
servicePack = servicePack.replace("\t", " ")
servicePack = servicePack.replace("No SP", "0")
servicePack = servicePack.replace("RTM", "0")
servicePack = servicePack.replace("TM", "0")
servicePack = servicePack.replace("SP", "")
servicePack = servicePack.replace("Service Pack", "")
servicePack = servicePack.replace("<a href=\"http:", "")
servicePack = servicePack.replace(" ", " ")
servicePack = servicePack.replace("+ ", "+")
servicePack = servicePack.replace(" +", "+")
if servicePack.endswith(" "):
servicePack = servicePack[:-1]
if servicePack and version:
# Create the main <card> element
signature = doc.createElement("signature")
signatures.appendChild(signature)
# Create a <version> element
versionElement = doc.createElement("version")
signature.appendChild(versionElement)
# Give the <version> elemenet some text
versionText = doc.createTextNode(version)
versionElement.appendChild(versionText)
# Create a <servicepack> element
servicepackElement = doc.createElement("servicepack")
signature.appendChild(servicepackElement)
# Give the <servicepack> elemenet some text
servicepackText = doc.createTextNode(servicePack)
servicepackElement.appendChild(servicepackText)
# Save our newly created XML to the signatures file
mssqlXml = codecs.open(MSSQL_XML, "w", "utf8")
doc.writexml(writer=mssqlXml, addindent=" ", newl="\n")
mssqlXml.close()
infoMsg = "[INFO] done. retrieved data parsed and saved into '%s'" % MSSQL_XML
print infoMsg
if __name__ == "__main__":
updateMSSQLXML()
| gpl-3.0 | -4,861,813,990,019,743,000 | 36.291971 | 153 | 0.614014 | false |
yaybu/callsign | callsign/tests/test_restapi.py | 1 | 6051 | #Copyright 2013 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.trial import unittest
from mock import MagicMock
from callsign.restapi import (
RootResource,
DomainResource,
RecordResource,
MissingDomainResource,
ForbiddenDomainResource,
)
import socket
class TestRootResource(unittest.TestCase):
def setUp(self):
self.config = MagicMock()
self.dnsserver = MagicMock()
self.resource = RootResource(self.config, self.dnsserver)
def test_get(self):
self.dnsserver.zones = MagicMock(return_value=["foo", "bar"])
rv = self.resource.render_GET(None)
self.assertEqual(rv, "\n".join(["foo", "bar"]))
def test_getChild_exists(self):
self.config.get = MagicMock(return_value="")
zone = MagicMock()
def get_zone(x):
if x == "foo":
return zone
raise KeyError
self.dnsserver.get_zone.side_effect = get_zone
rv = self.resource.getChild("foo", None)
self.assert_(isinstance(rv, DomainResource))
self.assertEqual(rv.zone, zone)
rv = self.resource.getChild("bar", None)
self.assert_(isinstance(rv, MissingDomainResource))
self.assertEqual(rv.name, "bar")
def test_getChild_exists_with_lockdown(self):
self.config.get = MagicMock(return_value="foo bar")
zone = MagicMock()
def get_zone(x):
if x == "foo":
return zone
raise KeyError
self.dnsserver.get_zone.side_effect = get_zone
rv = self.resource.getChild("foo", None)
self.assert_(isinstance(rv, DomainResource))
self.assertEqual(rv.zone, zone)
rv = self.resource.getChild("bar", None)
self.assert_(isinstance(rv, MissingDomainResource))
self.assertEqual(rv.name, "bar")
rv = self.resource.getChild("baz", None)
self.assert_(isinstance(rv, ForbiddenDomainResource))
class TestDomainResource(unittest.TestCase):
def setUp(self):
self.zone = MagicMock()
self.dnsserver = MagicMock()
self.resource = DomainResource(self.zone, self.dnsserver)
def test_GET(self):
data = [
("A", "www", "192.168.0.1"),
("A", "x", "192.168.0.2"),
]
self.zone.a_records = MagicMock(return_value=data)
rv = self.resource.render_GET(None)
self.assertEqual(rv, "\n".join(["%s %s %s" % (x, y, z) for (x, y, z) in data]))
class TestMissingDomainResource(unittest.TestCase):
def setUp(self):
self.name = "foo"
self.dnsserver = MagicMock()
self.resource = MissingDomainResource(self.name, self.dnsserver)
def test_GET(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
def test_PUT(self):
request = MagicMock()
self.resource.render_PUT(request)
self.dnsserver.add_zone.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(201)
def test_HEAD(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
def test_DELETE(self):
request = MagicMock()
self.resource.render_GET(request)
request.setResponseCode.assert_called_once_with(404)
class TestRecordResource(unittest.TestCase):
def setUp(self):
self.name = "foo"
self.zone = MagicMock()
self.resource = RecordResource(self.name, self.zone)
def test_PUT(self):
request = MagicMock()
request.content.read.return_value = "A 192.168.0.1"
self.resource.render_PUT(request)
self.zone.set_record.assert_called_once_with(self.name, "192.168.0.1")
request.setResponseCode.assert_called_once_with(201)
def test_PUT_invalid_body(self):
request = MagicMock()
request.content.read.return_value = "wrong"
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_invalid_body)
def test_PUT_wrong_record_type(self):
request = MagicMock()
request.content.read.return_value = "MX 192.168.0.1"
self.zone.set_record.return_value = (False, "foo")
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_wrong_record_type)
def test_PUT_malformed(self):
request = MagicMock()
request.content.read.return_value = "A foo"
self.zone.set_record.side_effect = socket.error()
self.resource.render_PUT(request)
request.setResponseCode.assert_called_once_with(400, message=self.resource.err_malformed)
def test_DELETE(self):
request = MagicMock()
self.resource.render_DELETE(request)
self.zone.delete_record.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(204)
def test_DELETE_missing(self):
request = MagicMock()
self.zone.delete_record.side_effect = KeyError()
self.resource.render_DELETE(request)
self.zone.delete_record.assert_called_once_with(self.name)
request.setResponseCode.assert_called_once_with(404)
def test_GET(self):
self.zone.get_record.return_value = ("A", "192.168.0.1")
rv = self.resource.render_GET(None)
self.assertEqual(rv, "A 192.168.0.1")
| apache-2.0 | -606,286,042,955,971,500 | 33.976879 | 105 | 0.648323 | false |
ekansa/open-context-py | opencontext_py/libs/binaryfiles.py | 1 | 9846 | import os, sys, shutil
import codecs
import requests
from io import BytesIO
from time import sleep
from internetarchive import get_session, get_item
from django.conf import settings
from django.utils.http import urlquote, quote_plus, urlquote_plus
from opencontext_py.libs.generalapi import GeneralAPI
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile, ManageMediafiles
from opencontext_py.apps.ocitems.manifest.models import Manifest
class BinaryFiles():
"""
This class has useful methods for managing binary
media files. It is mainly for copying and moving such files form the file system,
the localhost, or a remote host over HTTP.
For archiving purposes, it is often needed to stage such files locally.
"""
def __init__(self):
self.root_export_dir = settings.STATIC_EXPORTS_ROOT
self.cache_file_dir = 'binary-cache'
self.full_path_cache_dir = None
self.do_http_request_for_cache = True # use an HTTP request to get a file for local caching and saving with a new filename
self.delay_before_request = .5 # delay a request by .5 seconds so as not to overwhelm a remote server
self.remote_uri_sub = None # substitution for a remote uri
self.local_uri_sub = None # local substitution uri prefix, so no retrieval from remote
self.local_filesystem_uri_sub = None # substitution to get a path to the local file in the file system
self.pref_tiff_archive = False # Prefer to archive a TIFF archive file
self.errors = []
def get_cache_full_file(self, json_ld, man_obj):
""" gets and caches the fill file, saving temporarily to a local directory """
file_name = None
slug = man_obj.slug
file_uri = self.get_archive_fileuri(json_ld)
if not file_uri:
import pdb; pdb.set_trace()
print('Cannot find a file_uri in {} [{}]'.format(man_obj.label, man_obj.uuid))
return None
# We found a file uri.
if isinstance(self.local_uri_sub, str) and isinstance(self.remote_uri_sub, str):
# get a local copy of the file, not a remote copy
file_uri = file_uri.replace(self.remote_uri_sub, self.local_uri_sub)
if 'https://' in self.remote_uri_sub:
# so we also replace the https or http version of the remote with a local
alt_remote_sub = self.remote_uri_sub.replace('https://', 'http://')
file_uri = file_uri.replace(alt_remote_sub, self.local_uri_sub)
if '.' in file_uri:
file_ex = file_uri.split('.')
file_name = slug + '.' + file_ex[-1]
else:
file_name = slug
file_ok = self.get_cache_remote_file_content(file_name, file_uri)
if not file_ok:
file_name = False
error_msg = 'UUID: ' + man_obj.uuid + ' file_uri: ' + file_uri
error_msg += ' file caching error.'
self.errors.append(error_msg)
return file_name
def get_full_fileuri(self, json_ld):
""" gets the full file uri """
if not 'oc-gen:has-files' in json_ld:
return None
for f_obj in json_ld['oc-gen:has-files']:
if f_obj['type'] == 'oc-gen:fullfile':
return f_obj['id']
return None
def get_archive_fileuri(self, json_ld):
""" gets the full file uri """
if not 'oc-gen:has-files' in json_ld:
return None
if self.pref_tiff_archive:
for f_obj in json_ld['oc-gen:has-files']:
if f_obj['type'] == 'oc-gen:archive':
return f_obj['id']
# no TIFF archive file found, so use the full-file
return self.get_full_fileuri(json_ld)
def get_cache_remote_file_content(self, file_name, file_uri, act_dir=None):
""" either uses an HTTP request to get a remote file
or looks for the file in the file system and copies it within
the file system
"""
if not act_dir:
act_dir = self.cache_file_dir
if self.do_http_request_for_cache:
ok = self.get_cache_remote_file_content_http(file_name,
file_uri,
act_dir)
else:
ok = self.get_cache_remote_file_content_filesystem(file_name,
file_uri,
act_dir)
return ok
def get_cache_remote_file_content_filesystem(self, file_name, file_uri, act_dir=None):
""" use the file system to get the file for caching
and saving with a new filename
"""
ok = False
dir_file = self.join_dir_filename(file_name, act_dir)
if os.path.exists(dir_file):
# the file already exists, no need to download it again
print('Already cached: ' + dir_file)
ok = True
else:
print('Cannot find: ' + dir_file)
print('Need to copy with file-system: ' + file_uri)
if isinstance(self.remote_uri_sub, str) and isinstance(self.local_filesystem_uri_sub, str):
original_path = file_uri.replace(self.remote_uri_sub,
self.local_filesystem_uri_sub)
if os.path.exists(original_path):
try:
shutil.copy2(original_path, dir_file)
ok = True
except:
print('Problem copying to: ' + dir_file)
ok = False
else:
print('CANNOT FIND ORIGINAL AT: ' + original_path)
return ok
def get_cache_remote_file_content_http(self, file_name, file_uri, act_dir=None):
""" uses HTTP requests to get the content of a remote file,
saves it to cache with the filename 'file_name'
"""
ok = False
dir_file = self.join_dir_filename(file_name, act_dir)
if os.path.exists(dir_file):
# the file already exists, no need to download it again
print('Already cached: ' + dir_file)
ok = True
else:
print('Cannot find: ' + dir_file)
print('Need to download: ' + file_uri)
if not isinstance(self.local_uri_sub, str):
# only delay if we're not looking locally for the file
sleep(self.delay_before_request)
r = requests.get(file_uri, stream=True)
if r.status_code == 200:
with open(dir_file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
f.close()
ok = True
else:
# try with different capitalization
if '.JPG' in file_uri:
new_file_uri = file_uri.replace('.JPG', '.jpg')
elif '.jpg' in file_uri:
new_file_uri = file_uri.replace('.jpg', '.JPG')
else:
new_file_uri = None
if new_file_uri is not None:
print('Now trying with different capitalization: ' + new_file_uri)
if not isinstance(self.local_uri_sub, str):
# only delay if we're not looking locally for the file
sleep(self.delay_before_request)
r = requests.get(new_file_uri, stream=True)
if r.status_code == 200:
with open(dir_file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
f.close()
ok = True
return ok
def join_dir_filename(self, file_name, act_dir):
""" outputs a full path WITH filename """
if isinstance(act_dir, str):
path = self.set_check_directory(act_dir)
elif isinstance(self.full_path_cache_dir, str):
path = self.full_path_cache_dir
else:
path = self.root_export_dir
dir_file = os.path.join(path, file_name)
return dir_file
def check_exists(self, file_name, act_dir):
""" checks to see if a file exists """
dir_file = self.join_dir_filename(file_name, act_dir)
if os.path.exists(dir_file):
output = True
else:
output = False
return output
def set_check_directory(self, act_dir):
""" Prepares a directory to find import GeoJSON files """
output = False
if isinstance(self.full_path_cache_dir, str):
full_dir = self.full_path_cache_dir
if not os.path.exists(full_dir):
os.makedirs(full_dir)
else:
full_dir = self.root_export_dir
if isinstance(act_dir, str):
if len(act_dir) > 0:
full_dir = self.root_export_dir + '/' + act_dir
full_dir = full_dir.replace('//', '/')
if not os.path.exists(full_dir):
os.makedirs(full_dir)
if os.path.exists(full_dir):
output = full_dir
return output
def get_directory_files(self, act_dir):
""" Gets a list of files from a directory """
files = False
path = self.set_check_directory(act_dir)
if os.path.exists(path):
for dirpath, dirnames, filenames in os.walk(path):
files = sorted(filenames)
else:
print('Cannot find: ' + path)
return files | gpl-3.0 | -2,155,670,723,415,455,500 | 43.355856 | 131 | 0.538899 | false |
joerick/pyinstrument | pyinstrument/renderers/jsonrenderer.py | 1 | 2646 | import json
from pyinstrument.renderers.base import Renderer
from pyinstrument import processors
# note: this file is called jsonrenderer to avoid hiding built-in module 'json'.
encode_str = json.encoder.encode_basestring
def encode_bool(a_bool):
return 'true' if a_bool else 'false'
class JSONRenderer(Renderer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def render_frame(self, frame):
if frame is None:
return u'null'
# we don't use the json module because it uses 2x stack frames, so
# crashes on deep but valid call stacks
property_decls = []
property_decls.append(u'"function": %s' % encode_str(frame.function))
property_decls.append(u'"file_path_short": %s' % encode_str(frame.file_path_short))
property_decls.append(u'"file_path": %s' % encode_str(frame.file_path))
property_decls.append(u'"line_no": %d' % frame.line_no)
property_decls.append(u'"time": %f' % frame.time())
property_decls.append(u'"is_application_code": %s' % encode_bool(frame.is_application_code))
# can't use list comprehension here because it uses two stack frames each time.
children_jsons = []
for child in frame.children:
children_jsons.append(self.render_frame(child))
property_decls.append(u'"children": [%s]' % u','.join(children_jsons))
if frame.group:
property_decls.append(u'"group_id": %s' % encode_str(frame.group.id))
return u'{%s}' % u','.join(property_decls)
def render(self, session):
frame = self.preprocess(session.root_frame())
property_decls = []
property_decls.append(u'"start_time": %f' % session.start_time)
property_decls.append(u'"duration": %f' % session.duration)
property_decls.append(u'"sample_count": %d' % session.sample_count)
property_decls.append(u'"program": %s' % encode_str(session.program))
if session.cpu_time is None:
property_decls.append(u'"cpu_time": null')
else:
property_decls.append(u'"cpu_time": %f' % session.cpu_time)
property_decls.append(u'"root_frame": %s' % self.render_frame(frame))
return u'{%s}\n' % u','.join(property_decls)
def default_processors(self):
return [
processors.remove_importlib,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.group_library_frames_processor,
processors.remove_unnecessary_self_time_nodes,
processors.remove_irrelevant_nodes,
]
| bsd-3-clause | 8,942,332,713,662,596,000 | 38.492537 | 100 | 0.62963 | false |
B3AU/micropython | PDM.py | 1 | 1141 | __author__ = 'beau'
import pyb
class PDM():
def __init__(self,pout='X11',tim=4,freq=50):
"""
:param pout: output pin nr
:param tim: timer number
:param freq: frequency of the bitstream
"""
self.max = 2**24-1#2**31-1 crashes with larger ints? 24bit resolution is fine enough ;)
self.pout = pyb.Pin(pout, pyb.Pin.OUT_PP)
self.err = 0 # error accumulator
self.output = 0
self.freq = freq
self.tim = pyb.Timer(tim)
self.tim.init(freq=freq)
self.tim.callback(lambda t: self.call_me())
def set_output(self,out):
"""
:param out: desired output as a value between 0 and 1
"""
print ('setting output to '+str(out))
self.tim.deinit()
self.output = int(self.max*out)
self.tim.init(freq=self.freq)
self.tim.callback(lambda t: self.call_me())
def call_me(self):
if self.err >= 0:
self.pout.low()
self.err -= self.output
else:
self.pout.high()
self.err += self.max
self.err -= self.output | lgpl-3.0 | 8,587,209,012,637,136,000 | 24.954545 | 95 | 0.533742 | false |
oliviertilmans/ipmininet | ipmininet/router/config/openrd.py | 1 | 14773 | from .base import RouterDaemon
from .utils import ConfigDict
class OpenrDaemon(RouterDaemon):
"""The base class for the OpenR daemon"""
NAME = 'openr'
@property
def STARTUP_LINE_EXTRA(self):
# Add options to the standard startup line
return ''
@property
def startup_line(self):
return '{name} {cfg} {extra}'\
.format(name=self.NAME,
cfg=self._cfg_options(),
extra=self.STARTUP_LINE_EXTRA)
def build(self):
cfg = ConfigDict()
return cfg
def _defaults(self, **kwargs):
"""
Default parameters of the OpenR daemon. The template file openr.mako
sets the default parameters listed here. See:
https://github.com/facebook/openr/blob/master/openr/docs/Runbook.md.
:param alloc_prefix_len: Block size of allocated prefix in terms of
it's prefix length. In this case '/80' prefix will be elected for a
node. e.g. 'face:b00c:0:0:1234::/80'. Default: 128.
:param assume_drained: Default: False.
:param config_store_filepath: Default:
/tmp/aq_persistent_config_store.bin
:param decision_debounce_max_ms: Knobs to control how often to run
Decision. On receipt of first even debounce is created with MIN
time which grows exponentially up to max if there are more events
before debounce is executed. This helps us to react to single
network failures quickly enough (with min duration) while avoid
high CPU utilization under heavy network churn. Default: 250.
:param decision_debounce_min_ms: Knobs to control how often to run
Decision. On receipt of first even debounce is created with MIN time
which grows exponentially up to max if there are more events before
debounce is executed. This helps us to react to single network
failures quickly enough (with min duration) while avoid high CPU
utilization under heavy network churn. Default: 10.
:param decision_rep_port: Default: 60004.
:param domain: Name of domain this node is part of. OpenR will 'only'
form adjacencies to OpenR instances within it's own domain. This
option becomes very useful if you want to run OpenR on two nodes
adjacent to each other but belonging to different domains, e.g.
Data Center and Wide Area Network. Usually it should depict the
Network. Default: openr.
:param dryrun: OpenR will not try to program routes in it's default
configuration. You should explicitly set this option to false to
proceed with route programming. Default: False.
:param enable_subnet_validation: OpenR supports subnet validation to
avoid mis-cabling of v4 addresses on different subnets on each end
of the link. Need to enable v4 and this flag at the same time to
turn on validation. Default: True.
:param enable_fib_sync: Default: False.
:param enable_health_checker: OpenR can measure network health
internally by pinging other nodes in the network and exports this
information as counters or via breeze APIs. By default health
checker is disabled. The expectation is that each node must have at
least one v6 loopback addressed announced into the network for the
reachability check. Default: False.
:param enable_legacy_flooding: Default: True.
:param enable_lfa: With this option, additional Loop-Free Alternate
(LFA) routes can be computed, per RFC 5286, for fast failure
recovery. Under the failure of all primary nexthops for a prefix,
because of link failure, next best precomputed LFA will be used
without need of an SPF run. Default: False.
:param enable_netlink_fib_handler: Knob to enable/disable default
implementation of 'FibService' that comes along with OpenR for
Linux platform. If you want to run your own FIB service then
disable this option. Default: True.
:param enable_netlink_system_handler: Knob to enable/disable default
implementation of 'SystemService' and 'PlatformPublisher' that
comes along with OpenR for Linux platform. If you want to run your
own SystemService then disable this option. Default: True.
:param enable_perf_measurement: Experimental feature to measure
convergence performance. Performance information can be viewed via
breeze API 'breeze perf fib'. Default: True.
:param enable_prefix_alloc: Enable prefix allocator to elect and assign
a unique prefix for the node. You will need to specify other
configuration parameters below. Default: False.
:param enable_rtt_metric: Default mechanism for cost of a link is '1'
and hence cost of path is hop count. With this option you can ask
OpenR to compute and use RTT of a link as a metric value. You
should only use this for networks where links have significant
delay, on the order of a couple of milliseconds. Using this for
point-to-point links will cause lot of churn in metric updates as
measured RTT will fluctuate a lot because of packet processing
overhead. RTT is measured at application level and hence the
fluctuation for point-to-point links. Default: True.
:param enable_secure_thrift_server: Flag to enable TLS for our thrift
server. Disable this for plaintext thrift. Default: False.
:param enable_segment_routing: Experimental and partially implemented
segment routing feature. As of now it only elects node/adjacency
labels. In future we will extend it to compute and program FIB
routes. Default: False.
:param enable_spark: Default: True.
:param enable_v4: OpenR supports v4 as well but it needs to be turned
on explicitly. It is expected that each interface will have v4
address configured for link local transport and v4/v6 topologies
are congruent. Default: False.
:param enable_watchdog: Default: True.
:param fib_handler_port: TCP port on which 'FibService' will be
listening. Default: 60100.
:param fib_rep_port: Default: 60009.
:param health_checker_ping_interval_s: Configure ping interval of the
health checker. The below option configures it to ping all other
nodes every 3 seconds. Default: 3.
:param health_checker_rep_port: Default: 60012.
:param ifname_prefix: Interface prefixes to perform neighbor discovery
on. All interfaces whose names start with these are used for
neighbor discovery. Default: ""
:param iface_regex_exclude: Default:"".
:param iface_regex_include: Default: "".
:param ip_tos: Set type of service (TOS) value with which every control
plane packet from Open/R will be marked with. This marking can be
used to prioritize control plane traffic (as compared to data
plane) so that congestion in network doesn't affect operations of
Open/R. Default: 192
:param key_prefix_filters: This comma separated string is used to set
the key prefixes when key prefix filter is enabled (See
SET_LEAF_NODE). It is also set when requesting KEY_DUMP from peer
to request keys that match one of these prefixes. Default: "".
:param kvstore_flood_msg_per_sec: Default: 0.
:param kvstore_flood_msg_burst_size: Default: 0.
:param kvstore_flood_msg_per_sec: Default: 0.
:param kvstore_ttl_decrement_ms: Default: 1.
:param kvstore_zmq_hwm: Set buffering size for KvStore socket
communication. Updates to neighbor node during flooding can be
buffered upto this number. For larger networks where burst of
updates can be high having high value makes sense. For smaller
networks where burst of updates are low, having low value makes
more sense. Default: 65536.
:param link_flap_initial_backoff_ms: Default: 1000.
:param link_flap_max_backoff_ms: Default: 60000.
:param link_monitor_cmd_port: Default: 60006.
:param loopback_iface: Indicates loopback address to which auto elected
prefix will be assigned if enabled. Default: "lo".
:param memory_limit_mb: Enforce upper limit on amount of memory in
mega-bytes that open/r process can use. Above this limit watchdog
thread will trigger crash. Service can be auto-restarted via system
or some kind of service manager. This is very useful to guarantee
protocol doesn't cause trouble to other services on device where it
runs and takes care of slow memory leak kind of issues. Default:
300.
:param minloglevel: Log messages at or above this level. Again, the
numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0,
1, 2, and 3, respectively. Default: 0.
:param node_name: Name of the OpenR node. Crucial setting if you run
multiple nodes. Default: "".
:param override_loopback_addr: Whenever new address is elected for a
node, before assigning it to interface all previously allocated
prefixes or other global prefixes will be overridden with the new
one. Use it with care! Default: False.
:param prefix_manager_cmd_port: Default: 60011.
:param prefixes: Static list of comma separate prefixes to announce
from the current node. Can't be changed while running. Default: "".
:param redistribute_ifaces: Comma separated list of interface names
whose '/32' (for v4) and '/128' (for v6) should be announced. OpenR
will monitor address add/remove activity on this interface and
announce it to rest of the network. Default: "lo1".
:param seed_prefix: In order to elect a prefix for the node a super
prefix to elect from is required. This is only applicable when
'ENABLE_PREFIX_ALLOC' is set to true. Default: "".
:param set_leaf_node: Sometimes a node maybe a leaf node and have only
one path in to network. This node does not require to keep track of
the entire topology. In this case, it may be useful to optimize
memory by reducing the amount of key/vals tracked by the node.
Setting this flag enables key prefix filters defined by
KEY_PREFIX_FILTERS. A node only tracks keys in kvstore that matches
one of the prefixes in KEY_PREFIX_FILTERS. Default: False.
:param set_loopback_address: If set to true along with
'ENABLE_PREFIX_ALLOC' then second valid IP address of the block
will be assigned onto 'LOOPBACK_IFACE' interface. e.g. in this case
'face:b00c:0:0:1234::1/80' will be assigned on 'lo' interface.
Default: False.
:param spark_fastinit_keepalive_time_ms: When interface is detected UP,
OpenR can perform fast initial neighbor discovery as opposed to
slower keep alive packets. Default value is 100 which means
neighbor will be discovered within 200ms on a link. Default: 100.
:param spark_hold_time_s: Hold time indicating time in seconds from
it's last hello after which neighbor will be declared as down.
Default: 30.
:param spark_keepalive_time_s: How often to send spark hello messages
to neighbors. Default: 3.
:param static_prefix_alloc: Default: False.
:param tls_acceptable_peers: A comma separated list of strings. Strings
are x509 common names to accept SSL connections from. Default: ""
:param tls_ecc_curve_name: If we are running an SSL thrift server, this
option specifies the eccCurveName for the associated
wangle::SSLContextConfig. Default: "prime256v1".
:param tls_ticket_seed_path: If we are running an SSL thrift server,
this option specifies the TLS ticket seed file path to use for
client session resumption. Default: "".
:param x509_ca_path: If we are running an SSL thrift server, this
option specifies the certificate authority path for verifying
peers. Default: "".
:param x509_cert_path: If we are running an SSL thrift server, this
option specifies the certificate path for the associated
wangle::SSLContextConfig. Default: "".
:param x509_key_path: If we are running an SSL thrift server, this
option specifies the key path for the associated
wangle::SSLContextConfig. Default: "".
:param logbufsecs: Default: 0
:param log_dir: Directory to store log files at. The folder must exist.
Default: /var/log.
:param max_log_size: Default: 1.
:param v: Show all verbose 'VLOG(m)' messages for m less or equal the
value of this flag. Use higher value for more verbose logging.
Default: 1.
"""
defaults = ConfigDict()
# Apply daemon-specific defaults
self.set_defaults(defaults)
# Use user-supplied defaults if present
defaults.update(**kwargs)
return defaults
def set_defaults(self, defaults):
super().set_defaults(defaults)
def _cfg_options(self):
"""The OpenR daemon has currently no option to read config from
configuration file itself. The run_openr.sh script can be used to read
options from environment files. However, we want to run the daemon
directly. The default options from the shell script are implemented in
the openr.mako template and passed to the daemon as argument."""
cfg = ConfigDict()
cfg[self.NAME] = self.build()
return self.template_lookup.get_template(self.template_filenames[0])\
.render(node=cfg)
@property
def dry_run(self):
"""The OpenR dryrun runs the daemon and does not shutdown the daemon.
As a workaround we only show the version of the openr daemon"""
# TODO: Replace with a config parser or shutdown the daemon after few
# seconds
return '{name} --version'\
.format(name=self.NAME)
| gpl-2.0 | 7,937,481,067,980,952,000 | 58.329317 | 80 | 0.658363 | false |
initbrain/intelwiz | intelwiz/core/flowchart/Node.py | 1 | 26899 | # -*- coding: utf-8 -*-
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph.graphicsItems.GraphicsObject import GraphicsObject
import pyqtgraph.functions as fn
from .Terminal import *
from pyqtgraph.pgcollections import OrderedDict
from pyqtgraph.debug import *
import numpy as np
from .eq import *
def strDict(d):
return dict([(str(k), v) for k, v in d.items()])
class Node(QtCore.QObject):
"""
Node represents the basic processing unit of a flowchart.
A Node subclass implements at least:
1) A list of input / ouptut terminals and their properties
2) a process() function which takes the names of input terminals as keyword arguments and returns a dict with the names of output terminals as keys.
A flowchart thus consists of multiple instances of Node subclasses, each of which is connected
to other by wires between their terminals. A flowchart is, itself, also a special subclass of Node.
This allows Nodes within the flowchart to connect to the input/output nodes of the flowchart itself.
Optionally, a node class can implement the ctrlWidget() method, which must return a QWidget (usually containing other widgets) that will be displayed in the flowchart control panel. Some nodes implement fairly complex control widgets, but most nodes follow a simple form-like pattern: a list of parameter names and a single value (represented as spin box, check box, etc..) for each parameter. To make this easier, the CtrlNode subclass allows you to instead define a simple data structure that CtrlNode will use to automatically generate the control widget. """
sigOutputChanged = QtCore.Signal(object) # self
sigClosed = QtCore.Signal(object)
sigRenamed = QtCore.Signal(object, object)
sigTerminalRenamed = QtCore.Signal(object, object) # term, oldName
sigTerminalAdded = QtCore.Signal(object, object) # self, term
sigTerminalRemoved = QtCore.Signal(object, object) # self, term
def __init__(self, name, terminals=None, allowAddInput=False, allowAddOutput=False, allowRemove=True):
"""
============== ============================================================
Arguments
name The name of this specific node instance. It can be any
string, but must be unique within a flowchart. Usually,
we simply let the flowchart decide on a name when calling
Flowchart.addNode(...)
terminals Dict-of-dicts specifying the terminals present on this Node.
Terminal specifications look like::
'inputTerminalName': {'io': 'in'}
'outputTerminalName': {'io': 'out'}
There are a number of optional parameters for terminals:
multi, pos, renamable, removable, multiable, bypass. See
the Terminal class for more information.
allowAddInput bool; whether the user is allowed to add inputs by the
context menu.
allowAddOutput bool; whether the user is allowed to add outputs by the
context menu.
allowRemove bool; whether the user is allowed to remove this node by the
context menu.
============== ============================================================
"""
QtCore.QObject.__init__(self)
self._name = name
self._bypass = False
self.bypassButton = None ## this will be set by the flowchart ctrl widget..
self._freeze = False #TODO added
self.freezeButton = None ## this will be set by the flowchart ctrl widget..
self._graphicsItem = None
self.terminals = OrderedDict()
self._inputs = OrderedDict()
self._outputs = OrderedDict()
self._allowAddInput = allowAddInput ## flags to allow the user to add/remove terminals
self._allowAddOutput = allowAddOutput
self._allowRemove = allowRemove
self.exception = None
if terminals is None:
return
for name, opts in terminals.items():
self.addTerminal(name, **opts)
def nextTerminalName(self, name):
"""Return an unused terminal name"""
name2 = name
i = 1
while name2 in self.terminals:
name2 = "%s.%d" % (name, i)
i += 1
return name2
def addInput(self, name="Input", **args):
"""Add a new input terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
This is a convenience function that just calls addTerminal(io='in', ...)"""
#print "Node.addInput called."
return self.addTerminal(name, io='in', **args)
def addOutput(self, name="Output", **args):
"""Add a new output terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
This is a convenience function that just calls addTerminal(io='out', ...)"""
return self.addTerminal(name, io='out', **args)
def removeTerminal(self, term):
"""Remove the specified terminal from this Node. May specify either the
terminal's name or the terminal itself.
Causes sigTerminalRemoved to be emitted."""
if isinstance(term, Terminal):
name = term.name()
else:
name = term
term = self.terminals[name]
#print "remove", name
#term.disconnectAll()
term.close()
del self.terminals[name]
if name in self._inputs:
del self._inputs[name]
if name in self._outputs:
del self._outputs[name]
self.graphicsItem().updateTerminals()
self.sigTerminalRemoved.emit(self, term)
def terminalRenamed(self, term, oldName):
"""Called after a terminal has been renamed
Causes sigTerminalRenamed to be emitted."""
newName = term.name()
for d in [self.terminals, self._inputs, self._outputs]:
if oldName not in d:
continue
d[newName] = d[oldName]
del d[oldName]
self.graphicsItem().updateTerminals()
self.sigTerminalRenamed.emit(term, oldName)
def addTerminal(self, name, **opts):
"""Add a new terminal to this Node with the given name. Extra
keyword arguments are passed to Terminal.__init__.
Causes sigTerminalAdded to be emitted."""
name = self.nextTerminalName(name)
term = Terminal(self, name, **opts)
self.terminals[name] = term
if term.isInput():
self._inputs[name] = term
elif term.isOutput():
self._outputs[name] = term
self.graphicsItem().updateTerminals()
self.sigTerminalAdded.emit(self, term)
return term
def inputs(self):
"""Return dict of all input terminals.
Warning: do not modify."""
return self._inputs
def outputs(self):
"""Return dict of all output terminals.
Warning: do not modify."""
return self._outputs
def process(self, **kargs):
"""Process data through this node. This method is called any time the flowchart
wants the node to process data. It will be called with one keyword argument
corresponding to each input terminal, and must return a dict mapping the name
of each output terminal to its new value.
This method is also called with a 'display' keyword argument, which indicates
whether the node should update its display (if it implements any) while processing
this data. This is primarily used to disable expensive display operations
during batch processing.
"""
return {}
def graphicsItem(self):
"""Return the GraphicsItem for this node. Subclasses may re-implement
this method to customize their appearance in the flowchart."""
if self._graphicsItem is None:
self._graphicsItem = NodeGraphicsItem(self)
return self._graphicsItem
## this is just bad planning. Causes too many bugs.
def __getattr__(self, attr):
"""Return the terminal with the given name"""
if attr not in self.terminals:
raise AttributeError(attr)
else:
import traceback
traceback.print_stack()
print("Warning: use of node.terminalName is deprecated; use node['terminalName'] instead.")
return self.terminals[attr]
def __getitem__(self, item):
#return getattr(self, item)
"""Return the terminal with the given name"""
if item not in self.terminals:
raise KeyError(item)
else:
return self.terminals[item]
def name(self):
"""Return the name of this node."""
return self._name
def rename(self, name):
"""Rename this node. This will cause sigRenamed to be emitted."""
oldName = self._name
self._name = name
#self.emit(QtCore.SIGNAL('renamed'), self, oldName)
self.sigRenamed.emit(self, oldName)
def dependentNodes(self):
"""Return the list of nodes which provide direct input to this node"""
nodes = set()
for t in self.inputs().values():
nodes |= set([i.node() for i in t.inputTerminals()])
return nodes
#return set([t.inputTerminals().node() for t in self.listInputs().itervalues()])
def __repr__(self):
return "<Node %s @%x>" % (self.name(), id(self))
def ctrlWidget(self):
"""Return this Node's control widget.
By default, Nodes have no control widget. Subclasses may reimplement this
method to provide a custom widget. This method is called by Flowcharts
when they are constructing their Node list."""
return None
def bypass(self, byp):
"""Set whether this node should be bypassed.
When bypassed, a Node's process() method is never called. In some cases,
data is automatically copied directly from specific input nodes to
output nodes instead (see the bypass argument to Terminal.__init__).
This is usually called when the user disables a node from the flowchart
control panel.
"""
self._bypass = byp
if self.bypassButton is not None:
self.bypassButton.setChecked(byp)
self.update()
def freeze(self, freeze):
"""Set whether this node should be freezed.
When freezed, a Node's process() method is never called.
This is usually called when the user freeze a node from the flowchart
control panel.
"""
self._freeze = self.processFreezed() if freeze else False #TODO Added
if self.freezeButton is not None:
self.freezeButton.setChecked(freeze)
self.update()
self.recolor()
def isBypassed(self):
"""Return True if this Node is currently bypassed."""
return self._bypass
def isFreezed(self): #TODO added
"""Return True if this Node is currently freezed."""
return True if self._freeze else False
def setInput(self, **args):
"""Set the values on input terminals. For most nodes, this will happen automatically through Terminal.inputChanged.
This is normally only used for nodes with no connected inputs."""
changed = False
for k, v in args.items():
term = self._inputs[k]
oldVal = term.value()
if not eq(oldVal, v):
changed = True
term.setValue(v, process=False)
if changed and '_updatesHandled_' not in args:
self.update()
def inputValues(self):
"""Return a dict of all input values currently assigned to this node."""
vals = {}
for n, t in self.inputs().items():
vals[n] = t.value()
return vals
def outputValues(self):
"""Return a dict of all output values currently generated by this node."""
vals = {}
for n, t in self.outputs().items():
vals[n] = t.value()
return vals
def connected(self, localTerm, remoteTerm):
"""Called whenever one of this node's terminals is connected elsewhere."""
pass
def disconnected(self, localTerm, remoteTerm):
"""Called whenever one of this node's terminals is disconnected from another."""
pass
def update(self, signal=True):
"""Collect all input values, attempt to process new output values, and propagate downstream.
Subclasses should call update() whenever thir internal state has changed
(such as when the user interacts with the Node's control widget). Update
is automatically called when the inputs to the node are changed.
"""
vals = self.inputValues()
#print " inputs:", vals
try:
if self.isBypassed():
out = self.processBypassed(vals)
elif self.isFreezed(): #TODO added
out = self.processFreezed()
else:
out = self.process(**strDict(vals))
#print " output:", out
if out is not None:
if signal:
self.setOutput(**out)
else:
self.setOutputNoSignal(**out)
for n,t in self.inputs().items():
t.setValueAcceptable(True)
self.clearException()
except:
#printExc( "Exception while processing %s:" % self.name())
for n,t in self.outputs().items():
t.setValue(None)
self.setException(sys.exc_info())
if signal:
#self.emit(QtCore.SIGNAL('outputChanged'), self) ## triggers flowchart to propagate new data
self.sigOutputChanged.emit(self) ## triggers flowchart to propagate new data
def processBypassed(self, args):
"""Called when the flowchart would normally call Node.process, but this node is currently bypassed.
The default implementation looks for output terminals with a bypass connection and returns the
corresponding values. Most Node subclasses will _not_ need to reimplement this method."""
result = {}
for term in list(self.outputs().values()):
byp = term.bypassValue()
if byp is None:
result[term.name()] = None
else:
result[term.name()] = args.get(byp, None)
return result
def processFreezed(self): #TODO added
"""Called when the flowchart would normally call Node.process, but this node is currently freezed."""
result = {}
for term in list(self.outputs().values()):
result[term.name()] = term.value()
return result
def setOutput(self, **vals):
self.setOutputNoSignal(**vals)
#self.emit(QtCore.SIGNAL('outputChanged'), self) ## triggers flowchart to propagate new data
self.sigOutputChanged.emit(self) ## triggers flowchart to propagate new data
def setOutputNoSignal(self, **vals):
for k, v in vals.items():
term = self.outputs()[k]
term.setValue(v)
#targets = term.connections()
#for t in targets: ## propagate downstream
#if t is term:
#continue
#t.inputChanged(term)
term.setValueAcceptable(True)
def setException(self, exc):
self.exception = exc
self.recolor()
def clearException(self):
self.setException(None)
def recolor(self):
if self.exception is None:
if self.isFreezed(): #TODO Added
self.graphicsItem().setPen(QtGui.QPen(QtGui.QColor(0, 128, 255), 3))
else:
self.graphicsItem().setPen(QtGui.QPen(QtGui.QColor(0, 0, 0)))
else:
self.graphicsItem().setPen(QtGui.QPen(QtGui.QColor(150, 0, 0), 3))
def saveState(self):
"""Return a dictionary representing the current state of this node
(excluding input / output values). This is used for saving/reloading
flowcharts. The default implementation returns this Node's position,
bypass state, and information about each of its terminals.
Subclasses may want to extend this method, adding extra keys to the returned
dict."""
pos = self.graphicsItem().pos()
state = {'pos': (pos.x(), pos.y()), 'bypass': self.isBypassed(), 'freeze': self._freeze} #TODO Added
termsEditable = self._allowAddInput | self._allowAddOutput
for term in self._inputs.values() + self._outputs.values():
termsEditable |= term._renamable | term._removable | term._multiable
if termsEditable:
state['terminals'] = self.saveTerminals()
return state
def restoreState(self, state):
"""Restore the state of this node from a structure previously generated
by saveState(). """
pos = state.get('pos', (0,0))
freeze = state.get('freeze', False)
self._freeze = freeze
self.graphicsItem().setPos(*pos)
self.bypass(state.get('bypass', False))
self.freeze(True if freeze else False) #TODO Added
self._freeze = freeze
if freeze:
self.setOutput(**freeze)
if 'terminals' in state:
self.restoreTerminals(state['terminals'])
def saveTerminals(self):
terms = OrderedDict()
for n, t in self.terminals.items():
terms[n] = (t.saveState())
return terms
def restoreTerminals(self, state):
for name in list(self.terminals.keys()):
if name not in state:
self.removeTerminal(name)
for name, opts in state.items():
if name in self.terminals:
term = self[name]
term.setOpts(**opts)
continue
try:
opts = strDict(opts)
self.addTerminal(name, **opts)
except:
printExc("Error restoring terminal %s (%s):" % (str(name), str(opts)))
def clearTerminals(self):
for t in self.terminals.values():
t.close()
self.terminals = OrderedDict()
self._inputs = OrderedDict()
self._outputs = OrderedDict()
def close(self):
"""Cleans up after the node--removes terminals, graphicsItem, widget"""
self.disconnectAll()
self.clearTerminals()
item = self.graphicsItem()
if item.scene() is not None:
item.scene().removeItem(item)
self._graphicsItem = None
w = self.ctrlWidget()
if w is not None:
w.setParent(None)
#self.emit(QtCore.SIGNAL('closed'), self)
self.sigClosed.emit(self)
def disconnectAll(self):
for t in self.terminals.values():
t.disconnectAll()
#class NodeGraphicsItem(QtGui.QGraphicsItem):
class NodeGraphicsItem(GraphicsObject):
def __init__(self, node):
#QtGui.QGraphicsItem.__init__(self)
GraphicsObject.__init__(self)
#QObjectWorkaround.__init__(self)
#self.shadow = QtGui.QGraphicsDropShadowEffect()
#self.shadow.setOffset(5,5)
#self.shadow.setBlurRadius(10)
#self.setGraphicsEffect(self.shadow)
self.pen = fn.mkPen(0,0,0)
self.selectPen = fn.mkPen(200,200,200,width=2)
self.brush = fn.mkBrush(200, 200, 200, 150)
self.hoverBrush = fn.mkBrush(200, 200, 200, 200)
self.selectBrush = fn.mkBrush(200, 200, 255, 200)
self.hovered = False
self.node = node
flags = self.ItemIsMovable | self.ItemIsSelectable | self.ItemIsFocusable |self.ItemSendsGeometryChanges
#flags = self.ItemIsFocusable |self.ItemSendsGeometryChanges
self.setFlags(flags)
self.bounds = QtCore.QRectF(0, 0, 100, 100)
self.nameItem = QtGui.QGraphicsTextItem(self.node.name(), self)
self.nameItem.setDefaultTextColor(QtGui.QColor(50, 50, 50))
self.nameItem.moveBy(self.bounds.width()/2. - self.nameItem.boundingRect().width()/2., 0)
self.nameItem.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.updateTerminals()
#self.setZValue(10)
self.nameItem.focusOutEvent = self.labelFocusOut
self.nameItem.keyPressEvent = self.labelKeyPress
self.menu = None
self.buildMenu()
#self.node.sigTerminalRenamed.connect(self.updateActionMenu)
#def setZValue(self, z):
#for t, item in self.terminals.itervalues():
#item.setZValue(z+1)
#GraphicsObject.setZValue(self, z)
def labelFocusOut(self, ev):
QtGui.QGraphicsTextItem.focusOutEvent(self.nameItem, ev)
self.labelChanged()
def labelKeyPress(self, ev):
if ev.key() == QtCore.Qt.Key_Enter or ev.key() == QtCore.Qt.Key_Return:
self.labelChanged()
else:
QtGui.QGraphicsTextItem.keyPressEvent(self.nameItem, ev)
def labelChanged(self):
newName = str(self.nameItem.toPlainText())
if newName != self.node.name():
self.node.rename(newName)
### re-center the label
bounds = self.boundingRect()
self.nameItem.setPos(bounds.width()/2. - self.nameItem.boundingRect().width()/2., 0)
def setPen(self, pen):
self.pen = pen
self.update()
def setBrush(self, brush):
self.brush = brush
self.update()
def updateTerminals(self):
bounds = self.bounds
self.terminals = {}
inp = self.node.inputs()
dy = bounds.height() / (len(inp)+1)
y = dy
for i, t in inp.items():
item = t.graphicsItem()
item.setParentItem(self)
#item.setZValue(self.zValue()+1)
br = self.bounds
item.setAnchor(0, y)
self.terminals[i] = (t, item)
y += dy
out = self.node.outputs()
dy = bounds.height() / (len(out)+1)
y = dy
for i, t in out.items():
item = t.graphicsItem()
item.setParentItem(self)
item.setZValue(self.zValue())
br = self.bounds
item.setAnchor(bounds.width(), y)
self.terminals[i] = (t, item)
y += dy
#self.buildMenu()
def boundingRect(self):
return self.bounds.adjusted(-5, -5, 5, 5)
def paint(self, p, *args):
p.setPen(self.pen)
if self.isSelected():
p.setPen(self.selectPen)
p.setBrush(self.selectBrush)
else:
p.setPen(self.pen)
if self.hovered:
p.setBrush(self.hoverBrush)
else:
p.setBrush(self.brush)
p.drawRect(self.bounds)
def mousePressEvent(self, ev):
ev.ignore()
def mouseClickEvent(self, ev):
#print "Node.mouseClickEvent called."
if int(ev.button()) == int(QtCore.Qt.LeftButton):
ev.accept()
#print " ev.button: left"
sel = self.isSelected()
#ret = QtGui.QGraphicsItem.mousePressEvent(self, ev)
self.setSelected(True)
if not sel and self.isSelected():
#self.setBrush(QtGui.QBrush(QtGui.QColor(200, 200, 255)))
#self.emit(QtCore.SIGNAL('selected'))
#self.scene().selectionChanged.emit() ## for some reason this doesn't seem to be happening automatically
self.update()
#return ret
elif int(ev.button()) == int(QtCore.Qt.RightButton):
#print " ev.button: right"
ev.accept()
#pos = ev.screenPos()
self.raiseContextMenu(ev)
#self.menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def mouseDragEvent(self, ev):
#print "Node.mouseDrag"
if ev.button() == QtCore.Qt.LeftButton:
ev.accept()
self.setPos(self.pos()+self.mapToParent(ev.pos())-self.mapToParent(ev.lastPos()))
def hoverEvent(self, ev):
if not ev.isExit() and ev.acceptClicks(QtCore.Qt.LeftButton):
ev.acceptDrags(QtCore.Qt.LeftButton)
self.hovered = True
else:
self.hovered = False
self.update()
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Delete or ev.key() == QtCore.Qt.Key_Backspace:
ev.accept()
if not self.node._allowRemove:
return
self.node.close()
else:
ev.ignore()
def itemChange(self, change, val):
if change == self.ItemPositionHasChanged:
for k, t in self.terminals.items():
t[1].nodeMoved()
return GraphicsObject.itemChange(self, change, val)
def getMenu(self):
return self.menu
def getContextMenus(self, event):
return [self.menu]
def raiseContextMenu(self, ev):
menu = self.scene().addParentContextMenus(self, self.getMenu(), ev)
pos = ev.screenPos()
menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def buildMenu(self):
self.menu = QtGui.QMenu()
self.menu.setTitle("Node")
a = self.menu.addAction("Add input", self.addInputFromMenu)
if not self.node._allowAddInput:
a.setEnabled(False)
a = self.menu.addAction("Add output", self.addOutputFromMenu)
if not self.node._allowAddOutput:
a.setEnabled(False)
a = self.menu.addAction("Remove node", self.node.close)
if not self.node._allowRemove:
a.setEnabled(False)
def addInputFromMenu(self): ## called when add input is clicked in context menu
self.node.addInput(renamable=True, removable=True, multiable=True)
def addOutputFromMenu(self): ## called when add output is clicked in context menu
self.node.addOutput(renamable=True, removable=True, multiable=False)
| mit | 7,973,248,571,719,320,000 | 38.326023 | 570 | 0.583479 | false |
rodo/pyrede | pyrede/provider/utils/distro.py | 1 | 1219 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Rodolphe Quiédeville <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Distributions tools
"""
import logging
import requests
from celery.task import task
@task
def check_dispack_link(dispack):
"""
Check if an url exists
"""
user_agent = 'Pyrede bot, contact http://pyrede.quiedeville.org/about/'
headers = {'User-agent': user_agent}
logger.debug('check {}'.format(dispack.link))
req = requests.get(dispack.link, headers=headers)
dispack.valid_link = req.ok
dispack.save()
| gpl-3.0 | 6,025,037,577,612,015,000 | 31.918919 | 75 | 0.698686 | false |
scotws/tinkasm | common/test_common.py | 1 | 1174 | # Test routines for tinkasm common routines
# Scot W. Stevenson <[email protected]>
# First version: 07. Feb 2019
# This version: 07. Feb 2019
# From this directory, run "python3 -m unittest"
import unittest
from common import convert_number
class TestHelpers(unittest.TestCase):
def test_convert_number(self):
self.assertEqual(convert_number('0'), (True, 0))
self.assertEqual(convert_number('100'), (True, 100))
self.assertEqual(convert_number('0x0'), (True, 0))
self.assertEqual(convert_number('0x100'), (True, 256))
self.assertEqual(convert_number('$0'), (True, 0))
self.assertEqual(convert_number('$100'), (True, 256))
self.assertEqual(convert_number('%0'), (True, 0))
self.assertEqual(convert_number('%100'), (True, 4))
self.assertEqual(convert_number('%0000100'), (True, 4))
self.assertEqual(convert_number('&100'), (False, '&100'))
self.assertEqual(convert_number('$'), (False, '$'))
self.assertEqual(convert_number('%'), (False, '%'))
self.assertEqual(convert_number('0x'), (False, '0x'))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,169,242,694,813,010,000 | 30.72973 | 65 | 0.631175 | false |
dhis2/dhis2-python | dhis2_core/src/dhis2/e2b/common.py | 1 | 2145 | from datetime import datetime
from typing import Union
from .models.e2b import AttributeValue, Enrollment, Event, EventDataValue, TrackedEntity
def date_format_102(dt: datetime) -> str:
return dt.strftime("%Y%m%d")
def date_format_204(dt: datetime) -> str:
return dt.strftime("%Y%m%d%H%M%S")
def get_attribute_value(at: str, te: TrackedEntity, defaultValue = None) -> Union[str, None]:
av = te.attributes.get(at, defaultValue)
if not av:
return defaultValue
if "value" in av:
return av.value
def get_data_value(de: str, te: TrackedEntity, idx: int = 0, defaultValue = None) -> Union[str, None]:
en: Enrollment = te.enrollments[idx]
ev: Event = en.events["so8YZ9J3MeO"] # AEFI stage
if de not in ev.dataValues:
return defaultValue
dv: EventDataValue = ev.dataValues[de]
if dv:
return dv.value
return defaultValue
def get_patient_age(te: TrackedEntity):
value = get_attribute_value("BiTsLcJQ95V", te)
dt = datetime.fromisoformat(value)
now = datetime.now()
year = now.year - dt.year
if year > 0:
return ("801", str(year))
months = now.month - dt.month
if months > 0:
return ("802", str(months))
return ("804", str(now.day - dt.day))
def get_yes_no(de: str, te: TrackedEntity, idx: int = 0):
dv: EventDataValue = get_data_value(de, te, idx)
if "true" == dv:
return "1"
return "2"
def get_patient_sex(te: TrackedEntity) -> str:
value = get_attribute_value("CklPZdOd6H1", te)
if "MALE" == value:
return "1"
elif "FEMALE" == value:
return "0"
return "9"
def get_reaction_outcome(te: TrackedEntity):
value = get_data_value("yRrSDiR5v1M", te)
if "Recovered/resolved" == value:
return "1"
elif "Recovering/resolving" == value:
return "2"
elif "Not recovered/not resolved" == value:
return "3"
elif "Recovered/resolved with sequelae" == value:
return "4"
elif "Died" == value or "Autopsy done" == value:
return "5"
elif "Unknown" == value:
return "6"
return value
| bsd-3-clause | 2,216,280,784,871,009,300 | 21.819149 | 102 | 0.617249 | false |
phbono/openfisca-web | simulation/views_old.py | 1 | 3992 | # -*-coding:Utf-8 -*
from django.http import HttpResponse
from django.shortcuts import render
from simulation.models import IndividualForm
from django.forms.formsets import formset_factory, BaseFormSet
from datetime import datetime
from core.utils import Scenario
from simulation.lanceur import Simu
class BaseScenarioFormSet(BaseFormSet):
def clean(self):
"""Checks consistency of a formset"""
if any(self.errors):
# Don't bother validating the formset unless each form is valid on its own
return
def index(request):
return HttpResponse("Hello, world. You're at the poll index.")
# form = IndividualForm()
# return render_to_response('simulation/menage.html', {'formset': form})
def menage(request):
scenario = request.session.get('scenario',default=None)
if scenario == None:
print 'scenario is None'
scenario = Scenario()
if request.method == 'POST':
if 'reset' in request.POST:
del request.session['scenario']
scenario = Scenario()
formset = scenario2formset(scenario)
request.session['scenario'] = scenario
else:
ScenarioFormSet = formset_factory(IndividualForm, formset = BaseScenarioFormSet, extra=0)
formset = ScenarioFormSet(request.POST)
# for form in formset.cleaned_data:
# print form
if formset.is_valid():
scenario = formset2scenario(formset)
if 'add' in request.POST:
scenario.addIndiv(scenario.nbIndiv(), datetime(1975,1,1).date(), 'vous', 'chef')
if 'remove' in request.POST:
scenario.rmvIndiv(scenario.nbIndiv()-1)
# print scenario
formset = scenario2formset(scenario)
request.session['scenario'] = scenario
if 'submit' in request.POST:
scenario.genNbEnf()
ok = True
ok = build_simu(scenario)
print 'is it ok ? :', ok
#return (request, 'simulation/menage.html', {'formset' : formset})
else:
formset = scenario2formset(scenario)
request.session['scenario'] = scenario
return render(request, 'simulation/menage.html', {'formset' : formset})
def build_simu(scenario):
simu = Simu(scenario=scenario)
simu.set_openfica_root_dir()
simu.set_date()
msg = simu.scenario.check_consistency()
if msg:
print 'inconsistent scenario'
simu.set_param()
x = simu.compute()
for child in x.children:
for child2 in child.children:
print child2.code
print child2._vals
return True
def formset2scenario(formset):
scenario = Scenario()
for form in formset.cleaned_data:
noi, birth, quifoy, quifam = form['noi']-1, form['birth'], form['quifoy'], form['quifam']
scenario.addIndiv(noi, birth, quifoy, quifam)
return scenario
def scenario2formset(scenario):
var_list = ['noi', 'birth', 'idfoy', 'quifoy', 'idfam', 'quifam']
convert = dict(idfoy = "noidec", idfam ="noichef")
zero_start = [ "idfoy", "idfam", "noi"]
initial = []
for noi, indiv in scenario.indiv.iteritems():
new_form = {}
for var in var_list:
if var == "noi":
new_form[var] = noi
elif var in convert.keys():
new_form[var] = indiv[convert[var]]
else:
new_form[var] = indiv[var]
if var in zero_start:
new_form[var] += 1
initial.append(new_form)
ScenarioFormSet = formset_factory(IndividualForm, formset = BaseScenarioFormSet, extra=0)
return ScenarioFormSet(initial=initial)
# for indinv in formset['noiindiv'] | gpl-3.0 | 588,075,695,828,365,300 | 31.463415 | 101 | 0.576403 | false |
ryad-eldajani/dbs_project_pub | scraper_heise.py | 1 | 1689 | import bs4
import requests
import csv
import re
import operator
def get_page(url):
"""
Returns a BeautifulSoup object from an URL request
:param url: URL
:return: BeautifulSoup object
"""
r = requests.get(url)
data = r.text
return bs4.BeautifulSoup(data, "lxml")
def main():
"""
Web-Scraper for heise.de HTTPS topics.
"""
file_obj = open('heise-data.csv', 'w')
csv_writer = csv.writer(file_obj, delimiter=';')
words = {}
heise_url = "https://www.heise.de/thema/https"
link_pages = get_page(heise_url).find_all("span", {"class", "pagination"}) \
[0].find_all("a")
# scrape all sub-pages of topic HTTPS
for link in link_pages:
page = get_page("https://www.heise.de" + link["href"])
headlines = page.find_all("div", {"class": "keywordliste"})[0] \
.find_all("nav")[0].find_all("header")
for headline in headlines:
# split words in headline, filter some chars like ";"
headline_words = re.findall(r'[^\"()\-,;:\s]+', headline.string)
# set/update counter in words dictionary
for word in headline_words:
if word in words:
words[word] += 1
else:
words[word] = 1
# sort words dictionary by count value
sorted_words = sorted(words.items(), key=operator.itemgetter(1),
reverse=True)
# write result in CSV file
for element in sorted_words:
csv_writer.writerow(element)
file_obj.close()
print("Scraping complete, top 3 words: {}".format(sorted_words[:3]))
if __name__ == '__main__':
main() | mit | -5,539,494,937,456,852,000 | 26.704918 | 80 | 0.569568 | false |
crimsonknave/juniperncprompt | elementtidy-1.0-20050212/selftest.py | 1 | 2054 | # $Id: selftest.py 1758 2004-03-28 17:36:59Z fredrik $
# -*- coding: iso-8859-1 -*-
# elementtidy selftest program (in progress)
from elementtree import ElementTree
def sanity():
"""
Make sure everything can be imported.
>>> import _elementtidy
>>> from elementtidy.TidyHTMLTreeBuilder import *
"""
HTML1 = "<title>Foo</title><ul><li>Foo!<li>åäö"
XML1 = """\
<html:html xmlns:html="http://www.w3.org/1999/xhtml">
<html:head>
<html:meta content="TIDY" name="generator" />
<html:title>Foo</html:title>
</html:head>
<html:body>
<html:ul>
<html:li>Foo!</html:li>
<html:li>åäö</html:li>
</html:ul>
</html:body>
</html:html>"""
def check(a, b):
import re
a = ElementTree.tostring(ElementTree.XML(a))
a = re.sub("HTML Tidy[^\"]+", "TIDY", a)
a = re.sub("\r\n", "\n", a)
if a != b:
print a
print "Expected:"
print b
def testdriver():
"""
Check basic driver interface.
>>> import _elementtidy
>>> xml, errors = _elementtidy.fixup(HTML1)
>>> check(xml, XML1)
"""
def testencoding():
"""
Check basic driver interface.
>>> import _elementtidy
>>> xml, errors = _elementtidy.fixup(HTML1, 'ascii')
>>> check(xml, XML1)
>>> xml, errors = _elementtidy.fixup(HTML1, 'latin1')
>>> check(xml, XML1)
"""
def xmltoolkit35():
"""
@XMLTOOLKIT35
elementtidy crashes on really broken pages.
>>> import _elementtidy
>>> xml, errors = _elementtidy.fixup("<crash>")
>>> tree = ElementTree.XML(xml)
"""
def xmltoolkit48():
"""
@XMLTOOLKIT48
elementtidy gives up on some pages.
>>> import _elementtidy
>>> html = "<table><form><tr><td>test</td></tr></form></table>"
>>> xml, errors = _elementtidy.fixup(html)
>>> tree = ElementTree.XML(xml)
"""
if __name__ == "__main__":
import doctest, selftest
failed, tested = doctest.testmod(selftest)
print tested - failed, "tests ok."
| gpl-3.0 | -2,044,121,684,195,228,200 | 21.883721 | 67 | 0.568647 | false |
pulse-project/mss | mss/www/settings.py | 1 | 4685 | # -*- coding: UTF-8 -*-
#
# (c) 2012 Mandriva, http://www.mandriva.com/
#
# This file is part of Management Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import sys
import logging
import logging.handlers
logger = logging.getLogger()
ADMINS = (('root', 'root@localhost'),)
MANAGERS = ADMINS
PROJECT_DIR = os.path.dirname(__file__)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
EMAIL_SUBJECT_PREFIX = "[MSS]"
SERVER_EMAIL = "[email protected]"
LOG_FILENAME = '/var/log/mss/mss-www.log'
os.chmod(LOG_FILENAME, 0600)
ALLOWED_HOSTS = ['*']
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': LOG_FILENAME
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
}
},
'loggers': {
'mss': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
'propagate': True
}
}
}
else:
LOGGING = {
'version': 1,
'handlers': {
'file': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': LOG_FILENAME
},
'console': {
'level': 'ERROR',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
}
},
'loggers': {
'mss': {
'handlers': ['file', 'console'],
'level': 'ERROR',
'propagate': True
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/var/lib/mss/mss-www.db'
}
}
TIME_ZONE = 'Europe/Paris'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
MEDIA_URL = '/site_media/'
LOGIN_URL = "/mss/account/login/"
LANGUAGES = (
('en-us', 'English'),
('fr-fr', 'Français'),
('pt-br', 'Português do Brasil'),
('de-de', 'Deutsch'),
('zh-cn', 'Chinese'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media"
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'mss.www.errors.middleware.CatchExceptions',
)
ROOT_URLCONF = 'mss.www.urls'
TEMPLATE_DIRS = [
os.path.join(PROJECT_DIR, 'wizard', 'templates'),
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'mss.www.wizard',
'mss.www.cpserver',
'mss.www.errors',
]
AUTHENTICATION_BACKENDS = (
'mss.www.backends.MSSBackend',
)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# add local python libs in path
if not os.path.abspath(os.path.join(PROJECT_DIR, 'lib')) in sys.path:
sys.path.append(os.path.abspath(os.path.join(PROJECT_DIR, 'lib')))
TRACEBACK_API_URL = "https://mbs-reports.mandriva.com/api/mss/traceback/"
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
try:
from local_settings import *
except ImportError:
pass
| gpl-3.0 | -7,318,028,767,869,020,000 | 25.76 | 99 | 0.580824 | false |
tunetosuraj/spectrum | recommenders/document_api.py | 1 | 1706 | import nltk, string
from sklearn.feature_extraction.text import TfidfVectorizer
from items.models import BookProfile
from recommenders.models import DocToDocLink
class DocumentSimilarity:
def __init__(self, doc1=None, doc2=None):
self.b1 = doc1
self.b2 = doc2
def calculate_cosine(self):
stemmer = nltk.stem.porter.PorterStemmer()
clean_punc = dict((ord(char), None) for char in string.punctuation)
def stem_tokens(tokens):
return [stemmer.stem(item) for item in tokens]
#remove punctuation, lowercase, stem
def normalize(text):
return stem_tokens(nltk.word_tokenize(text.lower().translate(clean_punc)))
documents = [self.b1.book.description, self.b2.book.description]
tfidf = TfidfVectorizer(tokenizer=normalize, stop_words='english').fit_transform(documents)
pairwise_cosine_similarity = (tfidf * tfidf.T).A
score = pairwise_cosine_similarity[0][1]
return score
def _get(self, weight=None):
DocToDocLink.objects.create(item1=self.b1, item2=self.b2, raw_weight=weight, calculated_weight=weight, origin='TFIDF Document Similarity')
def analyse(self):
score = self.calculate_cosine()
if score >= 0.5:
self._get(weight=score)
def migrate_d2d():
books = BookProfile.objects.all()
for i in books:
for j in books:
if i.book.volume_id != j.book.volume_id:
if not i.stop_docsim:
print('Initiating TF-IDF Document Similarity..')
d = DocumentSimilarity(doc1=i,doc2=j)
d.analyse()
i.stop_docsim = True
i.save()
| agpl-3.0 | -348,521,159,580,769,150 | 32.45098 | 146 | 0.632474 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.